code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
'''
Class for post-processing measurement data.
'''
from pdata._metadata import __version__
import os
import time
import numpy as np
import types
import re
import logging
import copy
import shutil
import gzip
import tarfile
import itertools
import json
import jsondiff
import datetime
import pytz
from dateutil import tz
from collections import OrderedDict
UNIX_EPOCH = datetime.datetime(1970, 1, 1, 0, 0, tzinfo = pytz.utc)
class PDataSingle():
''' Class for reading in the contents of a single pdata data directory.
Almost always passed on to DataView for actual analysis. '''
def __init__(self, path, convert_timestamps=True, parse_comments=False):
'''Parse data stored in the specified directory path.
convert_timestamps --> Convert values that look like time
stamps into seconds since Unix epoch.
parse_comments --> Parse comments placed between data
rows. In the current implementation, parsing the comments
requires a separate pass through the data.
'''
self._path = path
def parse_initial_snapshot():
self._snapshots = []
if os.path.exists(os.path.join(path, 'snapshot.json')):
with open(os.path.join(path, 'snapshot.json'), 'r') as f:
self._snapshots.append((0, json.load(f)))
else:
with gzip.open(os.path.join(path, 'snapshot.json.gz'), 'r') as f:
self._snapshots.append((0, json.load(f)))
def add_snapshot_diff(row, f):
# Deep copy the last snapshot -> VERY inefficient but easy & safe
snap = json.loads(json.dumps(self._snapshots[-1][-1]))
# Add the new copy with the changes
self._snapshots.append((row, jsondiff.patch(snap, json.load(f))))
def parse_snapshot_diff_names(fnames):
""" Given a list of filenames, filter and sort the snapshot diffs. """
diff_names = []
for f in fnames:
m = re.match(r'snapshot\.row-(\d+)\.diff(\d+)\.json', f)
if m != None:
diff_names.append((int(m.group(1)), int(m.group(2)), m.group(0)))
continue
diff_names.sort(key=lambda x: x[1]) # secondary sort on .diff<n>
diff_names.sort(key=lambda x: x[0]) # primary sort on .row-<n>
return diff_names
def parse_tabular_data(f):
# First read comments in the data file
# and determine the number of data rows and columns
self._comments = []
converters = {}
rowno = 0
comment = ""
while True:
line = f.readline()
if not isinstance(line, str): line = line.decode('utf-8')
if len(line) == 0: break # EOF
line = line.strip()
if len(line) == 0: continue # empty line
if line.startswith('#'):
comment += line[1:].strip() + '\n'
continue
# Otherwise this is a data row
comment = comment.strip()
if len(comment) > 0:
self._comments.append((rowno, comment))
# The second to last comment row preceding the first data row is the table header
# that defines the column names
# Parse the columns
if rowno==0: self._table_header = comment
# Determine the number of columns from the first data row
if rowno==0: ncols = len(line.split('\t'))
# Determine, based on the first data row, whether any columns contain
# time stamps. Convert them into seconds since Unix epoch.
if rowno==0 and convert_timestamps:
for i,c in enumerate(line.split('\t')):
try:
PDataSingle._parse_timestamp(c)
converters[i] = lambda x: PDataSingle._parse_timestamp(x.decode('utf-8'))
logging.info('Column %s appears to contain timestamps. Converting them to seconds since Unix epoch. (Disable by setting convert_timestamps=False.)', i)
except ValueError:
pass # Not a timestamp
rowno += 1
comment = ""
# Done parsing the header. We can stop here if parsing comments is not needed.
if not parse_comments: break
if not hasattr(self, "_table_header"):
logging.warn(f"No header found in tabular data of {self._path}")
self._column_names, self._units = [], []
else:
self._column_names, self._units = PDataSingle._parse_columns_from_header(self._table_header)
assert len(self._column_names) == ncols, "The number of columns in the header and data do not seem to match."
self._column_name_to_index = dict((self._column_names[i], i) for i in range(len(self._column_names)) )
# Parse the actual numerical data
f.seek(0)
self._data = np.genfromtxt(f,
delimiter="\t",
comments="#",
converters=converters,
dtype=float) # Assume all columns contain floats
# If the data contains just a single point, genfromtxt returns a 1D vector instead of a 2D array, so convert it to 2D
if len(self._data.shape) == 1: self._data = np.array([ self._data ])
if parse_comments:
# rowno should equal the number of data rows, if comments were parsed and
# no new data was added between the two passes through the file.
assert len(self._data) >= rowno, 'Unexcepted number of data rows: %s vs %s' % (len(self._data), rowno)
if len(self._data) > 0:
assert len(self._data[0]) == ncols, 'Unexcepted number of data columns: %s vs %s' % (len(self._data[0]), ncols)
###########################################################
# Actually parse the data using the helper functions above
###########################################################
# Parse main data file (possibly compressed)
if os.path.exists(os.path.join(path, "tabular_data.dat")):
with open(os.path.join(path, "tabular_data.dat"), 'r') as f:
parse_tabular_data(f)
elif os.path.exists(os.path.join(path, "tabular_data.dat.gz")):
with gzip.open(os.path.join(path, "tabular_data.dat.gz"), 'rb') as f:
parse_tabular_data(f)
else:
other_dat_files = [ pp for pp in os.scandir(path) if pp.name.endswith(".dat") ]
if len(other_dat_files) == 0: assert False, f'No .dat file found in {os.path.abspath(path)}'
logging.info(f"No tabular_data.dat(.gz) found in {path}. Using {other_dat_files[0].name} instead.")
with open(other_dat_files[0].path, 'r') as f:
parse_tabular_data(f)
# Parse initial snapshot
parse_initial_snapshot()
# Parse snapshot diffs
tar_fname = os.path.join(path, 'snapshot_diffs.tar.gz')
if os.path.exists(tar_fname):
with tarfile.open(tar_fname) as tar:
for row,j,fname in parse_snapshot_diff_names(tar.getnames()):
add_snapshot_diff(row, tar.extractfile(fname))
else: # uncompressed snapshot diffs as separate files
for row,j,fname in parse_snapshot_diff_names(os.listdir(path)):
with open(os.path.join(path, fname)) as f:
add_snapshot_diff(row, f)
def name(self): return os.path.split(self._path)[-1]
def filename(self): return self._path
def dimension_names(self): return self._column_names
def dimension_units(self): return self._units
def npoints(self): return len(self._data)
def data(self): return self._data
def comments(self):
return self._comments
def settings(self):
return self._snapshots
def __getitem__(self, key):
return self._data[:, self._column_name_to_index[key]]
@staticmethod
def _parse_timestamp(s):
t = datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S.%f')
return (t.astimezone() - UNIX_EPOCH).total_seconds()
@staticmethod
def _parse_columns_from_header(s):
try:
# Try assuming the "Column name (unit)\t" format in pdata
cols = []
units = []
for c in s.split('\t'):
m = re.match(r'([\w\d\s]+)\s+\(([\w\d\s]*)\)', c.strip())
cols.append(m.group(1))
units.append(m.group(2))
except AttributeError:
# Try assuming the legacy format used in QCoDeS (qcodes/data/gnuplot_format.py)
s = s.split('\n')[-2] # Second to last header row contains the tab separated column names
cols = [ c.strip().strip('"') for c in s.split('\t') ]
units = [ '' for i in range(len(cols))]
return cols, units
class DataView():
'''
Class for post-processing measurement data. Main features are:
* Concatenating multiple separate data objects
* Creating "virtual" columns by parsing comments or snapshot files
or by applying arbitrary functions to the data
* Dividing the rows into "sweeps" based on various criteria.
See docs/examples/Procedural Data and DataView.ipynb for example use.
'''
def __init__(self, data, deep_copy=False, source_column_name='data_source', fill_value=None, **kwargs):
'''
Create a new view of existing data objects for post-processing.
The original data objects will not be modified.
args:
data -- Data object(s). Each data object needs to provide the following methods:
* name() # Arbitrary string identifier for the data object
* filename() # Specifies the path to the main datafile
# (for identification/debugging purpose only)
* dimension_names() # List of all data column names
* dimension_units() # List of all data column units
* npoints() # Number of data points/rows.
* data() # 2D ndarray containing all data rows and columns.
* comments() # List of tuples (data_row_no, comment string),
# where data_row_no indicated the index of
# the data point that the comment precedes.
* settings() # List of tuples (data_row_no, settings dict),
# where data_row_no indicated the index of
# the data point that the settings apply to.
kwargs input:
deep_copy -- specifies whether the underlying data is copied or
only referenced (more error prone, but memory efficient)
source_column_name -- specifies the name of the (virtual) column that tells which
data object the row originates from. Specify None, if
you don't want this column to be added.
fill_value -- fill value for columns that do not exist in all data objects.
Default is None, in which case the column is omitted entirely.
'''
self._virtual_dims = {}
if isinstance(data, DataView): # clone
# these private variables should be immutable so no need to deep copy
self._dimensions = data._dimensions
self._units = data._units
self._dimension_indices = data._dimension_indices
self._source_col = data._source_col
self._comments = data._comments
self._settings = data._settings
if deep_copy:
self._data = data._data.copy()
else:
self._data = data._data
# Always deep copy the mask
self._mask = data._mask.copy()
for name, fn in data._virtual_dims.items():
self._virtual_dims[name] = fn
return
try: # see if a single Data object
self._dimensions = data.dimension_names()
self._units = dict(zip(data.dimension_names(), data.dimension_units()))
unmasked = data.data().copy() if deep_copy else data.data()
if source_column_name != None:
n = data.name()
self._source_col = [n for i in range(data.npoints())]
else:
self._source_col = None
self._comments = data.comments()
try:
self._settings = data.settings()
except:
logging.exception("Could not parse the instrument settings file. Doesn't matter if you were not planning to add virtual columns based on values in the snapshot files.")
self._settings = None
except MemoryError as e:
raise
except Exception as e: # probably a sequence of Data objects then
self._dimensions = set(itertools.chain( *(dd.dimension_names() for dd in data) ))
unmasked = {}
for dim in self._dimensions:
unmasked[dim] = []
for dat in data:
if len(dat.dimension_names()) == 0:
logging.warn("Data object '%s' seems to contain zero columns. Skipping it..." % (str(dat)))
break
n_rows = dat.npoints()
if n_rows == 0:
logging.warn("Data object '%s' seems to contain zero rows. Skipping it..." % (str(dat)))
break
try:
unmasked[dim].append(dat[dim])
except:
msg = "Dimension '%s' does not exist in Data object '%s'. " % (dim, str(dat))
if fill_value == None:
# ignore dimensions that don't exist in all data objects
del unmasked[dim]
msg += ' Omitting the dimension.'
logging.warn(msg)
break
else:
unmasked[dim].append(fill_value + np.zeros(n_rows, dtype=type(fill_value)))
msg += ' Using fill_value = %s (for %d rows)' % (str(fill_value), len(unmasked[dim][-1]))
logging.warn(msg)
# concatenate rows from all files
if dim in unmasked.keys():
unmasked[dim] = np.concatenate(unmasked[dim])
# add a column that specifies the source data file
lens = [ dat.npoints() for dat in data ]
if source_column_name != None:
names = [ '%s_(%s)' % (dat.name(), dat.filename().strip('.dat')) for dat in data ]
self._source_col = [ [n for jj in range(l)] for n,l in zip(names,lens) ]
#self._source_col = [ jj for jj in itertools.chain.from_iterable(self._source_col) ] # flatten
self._source_col = list(itertools.chain.from_iterable(self._source_col)) # flatten
else:
self._source_col = None
# keep only dimensions that could be parsed from all files
self._dimensions = unmasked.keys()
unmasked = np.array([unmasked[k] for k in self._dimensions]).T
# take units from first data set
self._units = dict(zip(data[0].dimension_names(), data[0].dimension_units()))
# concatenate comments, adjusting row numbers from Data object rows to the corresponding dataview rows
lens = np.array(lens)
self._comments = [ dat.comments() for dat in data ]
all_comments = []
for jj,comments in enumerate(self._comments):
all_comments.append([ (rowno + lens[:jj].sum(), commentstr) for rowno,commentstr in comments ])
self._comments = list(itertools.chain.from_iterable(all_comments)) # flatten by one level
# concatenate settings (snapshot) files in the same way
self._settings = [ dat.settings() for dat in data ]
all_settings = []
for jj,settings in enumerate(self._settings):
all_settings.append([ (rowno + lens[:jj].sum(), sett) for rowno,sett in settings ])
self._settings = list(itertools.chain.from_iterable(all_settings)) # flatten by one level
self._data = unmasked
self._mask = np.zeros(len(unmasked), dtype=bool)
self._mask_stack = []
self._dimension_indices = dict([(n,i) for i,n in enumerate(self._dimensions)])
self.set_mask(False)
if source_column_name != None:
self.add_virtual_dimension(source_column_name, arr=np.array(self._source_col))
def __getitem__(self, index):
'''
Access the data.
index may be a slice or a string, in which case it is interpreted
as a dimension name.
'''
if isinstance(index, str):
return self.column(index)
else:
return self.data()[index]
def copy(self, copy_data=False):
'''
Make a copy of the view. The returned copy will always have an independent mask.
copy_data -- whether the underlying data is also deep copied.
'''
return DataView(self, deep_copy=copy_data)
def data_source(self):
'''
Returns a list of strings that tell which Data object each of the unmasked rows originated from.
'''
return [ i for i in itertools.compress(self._source_col, ~(self._mask)) ]
def clear_mask(self):
'''
Unmask all data (i.e. make all data in the initially
provided Data object visible again).
'''
self._mask[:] = False
self._mask_stack = []
def mask(self):
'''
Get a vector of booleans indicating which rows are masked.
'''
return self._mask.copy()
def dimensions(self):
'''
Returns a list of all dimensions, both real and virtual.
'''
return list(itertools.chain(self._dimension_indices.keys(), self._virtual_dims.keys()))
def units(self, d):
'''
Returns the units for dimension d
'''
return self._units[d]
def comments(self):
'''
Return the comments parsed from the data files.
Returns tuples where the first item is an index to the
first datarow that the comment applies to.
'''
return self._comments
def settings(self):
'''
Return the settings parsed from the settings files.
Returns tuples where the first item is an index to the
first datarow that the settings apply to.
'''
return self._settings
def continuous_ranges(self, masked_ranges=False):
'''
Returns a list of (start,stop) tuples that indicate continuous ranges of (un)masked data.
'''
m = self.mask() * (-1 if masked_ranges else 1)
dm = m[1:] - m[:-1]
starts = 1+np.where(dm < 0)[0]
stops = 1+np.where(dm > 0)[0]
if not m[0]:
starts = np.concatenate(( [0], starts ))
if not m[-1]:
stops = np.concatenate(( stops, [len(m)] ))
return zip(starts, stops)
def set_mask(self, mask):
'''
Set an arbitrary mask for the data. Should be a vector of booleans of
the same length as the number of data points.
Alternatively, simply True/False masks/unmasks all data.
See also mask_rows().
'''
try:
if mask:
self._mask[:] = True
else:
self._mask[:] = False
except:
m = np.zeros(len(self._mask), dtype=bool)
m[mask] = True
self._mask = m
def mask_rows(self, row_mask, unmask_instead = False):
'''
Mask rows in the data. row_mask can be a slice or a boolean vector with
length equal to the number of previously unmasked rows.
The old mask is determined from the mask of the first column.
Example:
d = DataView(...)
# ignore points where source current exceeds 1 uA.
d.mask_rows(np.abs(d['I_source']) > 1e-6)
'''
old_mask = self._mask
n = (~old_mask).astype(int).sum() # no. of previously unmasked entries
#logging.debug("previously unmasked rows = %d" % n)
# new mask for the previously unmasked rows
new_mask = np.empty(n, dtype=bool); new_mask[:] = unmask_instead
new_mask[row_mask] = (not unmask_instead)
#logging.debug("new_mask.sum() = %d" % new_mask.sum())
# combine the old and new masks
full_mask = old_mask.copy()
full_mask[~old_mask] = new_mask
logging.debug("# of masked/unmasked rows = %d/%d" % (full_mask.astype(int).sum(), (~full_mask).astype(int).sum()))
self.set_mask(full_mask)
def push_mask(self, mask, unmask_instead = False):
'''
Same as mask_rows(), but also pushes the mask to a 'mask stack'.
Handy for temporary masks e.g. inside loops.
See also pop_mask().
'''
self._mask_stack.append(self.mask())
self.mask_rows(mask, unmask_instead = unmask_instead)
def pop_mask(self):
'''
Pop the topmost mask from the mask stack,
set previous mask in the stack as current one
and return the popped mask.
Raises an exception if trying to pop an empty stack.
'''
try:
previous_mask = self._mask_stack.pop()
except IndexError as e:
raise Exception("Trying to pop empty mask stack: %s" % e)
self.set_mask(previous_mask)
return previous_mask
def remove_masked_rows_permanently(self):
'''
Removes the currently masked rows permanently.
This is typically unnecessary, but may be useful
before adding (cached) virtual columns to
huge data sets where most rows are masked (because
the cached virtual columns are computed for
masked rows as well.)
'''
# Removing the real data rows themselves is easy.
self._data = self._data[~(self._mask),:]
# but we have to also adjust the comment & settings line numbers
s = np.cumsum(self._mask.astype(int))
def n_masked_before_line(lineno): return s[max(0, min(len(s)-1, lineno-1))]
self._comments = [ (max(0,lineno-n_masked_before_line(lineno)), comment) for lineno,comment in self._comments ]
self._settings = [ (max(0,lineno-n_masked_before_line(lineno)), setting) for lineno,setting in self._settings ]
# as well as remove the masked rows from cached virtual columns.
# However, _virtual_dims is assumed to be immutable in copy() so
# we must copy it here!
old_dims = self._virtual_dims
self._virtual_dims = {}
for name, dim in old_dims.iteritems():
cached_arr = dim['cached_array']
if isinstance(cached_arr, np.ndarray):
cached_arr = cached_arr[~(self._mask)]
elif cached_arr != None:
cached_arr = [ val for i,val in enumerate(cached_arr) if not self._mask[i] ]
self._virtual_dims[name] = { 'fn': dim['fn'], 'cached_array': cached_arr }
# finally remove the obsolete mask(s)
self._mask = np.zeros(len(self._data), dtype=bool)
self._mask_stack = []
def single_valued_parameter(self, param):
''' If all values in the (virtual) dimension "param" are the same, return that value. '''
assert len(np.unique(self[param])) == 1 or (all(np.isnan(self[param])) and len(self[param]) > 0), \
'%s is not single valued for the current unmasked rows: %s' % (param, np.unique(self[param]))
return self[param][0]
def all_single_valued_parameters(self):
params = OrderedDict()
for p in self.dimensions():
try: params[p] = self.single_valued_parameter(p)
except: pass
return params
def divide_into_sweeps(self, sweep_dimension, use_sweep_direction = None):
'''Divide the rows into "sweeps" based on a monotonously increasing
or decreasing value of column "sweep_dimension", if use_sweep_direction==True.
If use_sweep_direction==False, sequences of points where
"sweep_dimension" stays constant are considered sweeps. This
is useful for splitting the data into sweeps based on a slowly
varying parameter, e.g. a gate voltage set point that is
changed between IV curve sweeps.
If use_sweep_direction is None, this function tries to figure
out which one is more reasonable.
Returns a sequence of slices indicating the start and end of
each sweep.
Note that the indices are relative to the currently _unmasked_
rows only.
'''
sdim = self[sweep_dimension]
if isinstance(sdim[0], str):
use_sweep_direction = False
dx = np.array([ sdim[i+1] != sdim[i] for i in range(len(sdim)-1) ])
else:
dx = np.sign(sdim[1:] - sdim[:-1])
if use_sweep_direction == None:
use_sweep_direction = ( np.abs(dx).astype(int).sum() > len(dx)/4. )
if use_sweep_direction:
logging.info("Assuming '%s' is swept." % sweep_dimension)
else:
logging.info("Assuming '%s' stays constant within a sweep." % sweep_dimension)
if use_sweep_direction:
for i in range(1,len(dx)):
if i+1 < len(dx) and dx[i] == 0: dx[i]=dx[i+1] # this is necessary to detect changes in direction, when the end point is repeated
change_in_sign = (2 + np.array(np.where(dx[1:] * dx[:-1] < 0),dtype=int).reshape((-1))).tolist()
# the direction changing twice in a row means that sweeps are being done repeatedly
# in the same direction.
for i in range(len(change_in_sign)-1, 0, -1):
if change_in_sign[i]-change_in_sign[i-1] == 1: del change_in_sign[i]
if len(change_in_sign) == 0: return [ slice(0, len(sdim)) ]
start_indices = np.concatenate(([0], change_in_sign))
stop_indices = np.concatenate((change_in_sign, [len(sdim)]))
sweeps = np.concatenate((start_indices, stop_indices)).reshape((2,-1)).T
else:
change_in_sdim = 1 + np.array(np.where(dx != 0)).reshape((-1))
if len(change_in_sdim) == 0: return [ slice(0, len(sdim)) ]
start_indices = np.concatenate(([0], change_in_sdim))
stop_indices = np.concatenate((change_in_sdim, [len(sdim)]))
sweeps = np.concatenate((start_indices, stop_indices)).reshape((2,-1)).T
return [ slice(max(s, 0), min(e, len(sdim))) for s,e in sweeps ]
def mask_sweeps(self, sweep_dimension, sl, unmask_instead=False):
'''
Mask entire sweeps (see divide_into_sweeps()).
sl can be a single integer or any slice object compatible with a 1D numpy.ndarray (list of sweeps).
unmask_instead -- unmask the specified sweeps instead, mask everything else
'''
sweeps = self.divide_into_sweeps(sweep_dimension)
row_mask = np.zeros(len(self[sweep_dimension]), dtype=bool)
for start,stop in ([sweeps[sl]] if isinstance(sl, int) else sweeps[sl]):
logging.debug("%smasking start: %d, stop %d" % ('un' if unmask_instead else '',start, stop))
row_mask[start:stop] = True
self.mask_rows(~row_mask if unmask_instead else row_mask)
def unmask_sweeps(self, sweep_dimension, sl):
'''
Mask all rows except the specified sweeps (see divide_into_sweeps()).
sl can be a single integer or any slice object compatible with a 1D numpy.ndarray (list of sweeps).
'''
self.mask_sweeps(sweep_dimension, sl, unmask_instead=True)
def data(self, deep_copy=False):
'''
Get the non-masked data as a 2D ndarray.
kwargs:
deep_copy -- copy the returned data so that it is safe to modify it.
'''
d = self._data[~(self._mask)]
if deep_copy: d = d.copy()
return d
def column(self, name, deep_copy=False):
'''
Get the non-masked entries of dimension 'name' as a 1D ndarray.
name is the dimension name.
kwargs:
deep_copy -- copy the returned data so that it is safe to modify it.
'''
if name in self._virtual_dims.keys():
d = self._virtual_dims[name]['cached_array']
if d is None: d = self._virtual_dims[name]['fn'](self)
if len(d) == len(self._mask): # The function may return masked or unmasked data...
# The function returned unmasked data so apply the mask
try:
d = d[~(self._mask)] # This works for ndarrays
except:
# workaround to mask native python arrays
d = [ x for i,x in enumerate(d) if not self._mask[i] ]
return d
else:
d = self._data[~(self._mask),self._dimension_indices[name]]
if deep_copy: d = d.copy()
return d
non_numpy_array_warning_given = []
def add_virtual_dimension(self, name, units="", fn=None, arr=None, comment_regex=None, from_set=None, dtype=float, preparser=None, cache_fn_values=True, return_result=False):
'''
Makes a computed vector accessible as self[name].
The computed vector depends on whether fn, arr or comment_regex is specified.
It is advisable that the computed vector is of the same length as
the real data columns.
kwargs:
Arguments for specifying how to parse the value:
fn -- the function applied to the DataView object, i.e self[name] returns fn(self)
arr -- specify the column directly as an array, i.e. self[name] returns arr
comment_regex -- for each row, take the value from the last match in a comment, otherwise np.NaN. Should be a regex string.
from_set -- for each row, take the value from the corresponding snapshot file. Specify as a tuple that indexes the settings dict ("instrument_name", "parameter_name", ...).
Other options:
dtype -- data type (default: float)
preparser -- optional preparser function that massages the value before it is passed to dtype
cache_fn_values -- evaluate fn(self) immediately for the entire (unmasked) array and cache the result
return_result -- return the result directly as an (nd)array instead of adding it as a virtual dimension
'''
logging.debug('adding virtual dimension "%s"' % name)
assert (fn != None) + (arr is not None) + (comment_regex != None) + (from_set != None) == 1, 'You must specify exactly one of "fn", "arr", or "comment_regex".'
if arr is not None:
assert len(arr) == len(self._mask), '"arr" must be a vector of the same length as the real data columns. If you want to do something fancier, specify your own fn.'
if from_set != None:
assert self._settings != None, 'snapshot files were not successfully parsed during dataview initialization.'
if comment_regex != None or from_set != None:
# construct the column by parsing the comments or snapshots
use_set = (from_set != None) # shorthand for convenience
# pre-allocate an array for the values
try:
if issubclass(dtype, str):
raise Exception('Do not store strings in numpy arrays (because it "works" but the behavior is unintuitive, i.e. only the first character is stored if you just specify dtype=str).')
vals = np.zeros(len(self._mask), dtype=dtype)
if dtype == float: vals += np.nan # initialize to NaN instead of zeros
except:
if not name in self.non_numpy_array_warning_given:
logging.info("%s does not seem to be a numpy data type. The virtual column '%s' will be a native python array instead, which may be slow." % (str(dtype), name))
self.non_numpy_array_warning_given.append(name)
vals = [None for jjj in range(len(self._mask))]
def set_vals(up_to_row, new_val):
"""
Helper that sets values up to the specified row, starting from where we last left off.
This is a little trickier than might seem at first because when we parse a new value,
we don't yet know the row up to which it applies. Instead, we always set the previous value
up to row where the new value appeared (and remember the new value for the next call).
"""
if up_to_row > set_vals.prev_match_on_row:
# Apply preparser() and dtype(() to the previously parsed value.
#
# It's good to do it only here because occasionally there may be multiple definitions for the
# same column and same row, usually on row zero.
# These might not all have valid syntax for preparser/dtype()
# so it's best to only parse the one that matters (the last one).
v = set_vals.prev_val
try:
if preparser != None: v = preparser(v)
v = dtype(v)
except:
#logging.exception('Could not convert the parsed value (%s) to the specifed data type (%s).'
# % (v, dtype))
raise
if isinstance(vals, np.ndarray): vals[set_vals.prev_match_on_row:up_to_row] = v
else: vals[set_vals.prev_match_on_row:up_to_row] = ( v for jjj in range(up_to_row-set_vals.prev_match_on_row) )
logging.debug('Setting value for rows %d:%d = %s' % (set_vals.prev_match_on_row, up_to_row, v))
set_vals.prev_match_on_row = up_to_row
set_vals.prev_val = new_val
set_vals.prev_match_on_row = 0
#logging.debug(self._comments)
for rowno,commentstr in (self._settings if use_set else self._comments):
if use_set:
# simply use the value from the snapshot file
assert from_set[0] in commentstr.keys(), '"%s" not found in settings.' % from_set[0]
new_val = commentstr
for k in from_set: new_val = new_val[k]
else:
# see if the comment matches the specified regex
m = re.search(comment_regex, commentstr)
if m == None: continue
#logging.debug('Match on row %d: "%s"' % (rowno, commentstr))
if len(m.groups()) != 1:
logging.warn('Did not get a unique match (%s) in comment (%d): %s'
% (str(groups), rowno, commentstr))
new_val = m.group(1)
set_vals(up_to_row=rowno, new_val=new_val)
logging.debug('Setting value for (remaining) rows %d: = %s' % (set_vals.prev_match_on_row, set_vals.prev_val))
set_vals(up_to_row=len(vals), new_val=None)
return self.add_virtual_dimension(name, units=units, arr=vals, return_result=return_result)
if cache_fn_values and arr is None:
old_mask = self.mask().copy() # backup the mask
self.clear_mask()
vals = fn(self)
self.mask_rows(old_mask) # restore the mask
return self.add_virtual_dimension(name, units=units, arr=vals, cache_fn_values=False, return_result=return_result)
if return_result:
return arr
else:
self._virtual_dims[name] = {'fn': fn, 'cached_array': arr}
self._units[name] = units
def remove_virtual_dimension(self, name):
if name in self._virtual_dims.keys():
del self._virtual_dims[name]
else:
logging.warn('Virtual dimension "%s" does not exist.' % name)
def remove_virtual_dimensions(self):
self._virtual_dims = {}
| [
"numpy.abs",
"numpy.empty",
"json.dumps",
"numpy.isnan",
"os.path.join",
"numpy.unique",
"os.path.abspath",
"os.path.exists",
"numpy.genfromtxt",
"tarfile.open",
"re.search",
"re.match",
"datetime.datetime",
"datetime.datetime.strptime",
"os.listdir",
"numpy.concatenate",
"os.scandir... | [((373, 425), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)', '(0)', '(0)'], {'tzinfo': 'pytz.utc'}), '(1970, 1, 1, 0, 0, tzinfo=pytz.utc)\n', (390, 425), False, 'import datetime\n'), ((6812, 6855), 'os.path.join', 'os.path.join', (['path', '"""snapshot_diffs.tar.gz"""'], {}), "(path, 'snapshot_diffs.tar.gz')\n", (6824, 6855), False, 'import os\n'), ((6865, 6890), 'os.path.exists', 'os.path.exists', (['tar_fname'], {}), '(tar_fname)\n', (6879, 6890), False, 'import os\n'), ((7842, 7895), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['s', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(s, '%Y-%m-%d %H:%M:%S.%f')\n", (7868, 7895), False, 'import datetime\n'), ((20222, 20245), 'numpy.empty', 'np.empty', (['n'], {'dtype': 'bool'}), '(n, dtype=bool)\n', (20230, 20245), True, 'import numpy as np\n'), ((23653, 23666), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (23664, 23666), False, 'from collections import OrderedDict\n'), ((30512, 30565), 'logging.debug', 'logging.debug', (['(\'adding virtual dimension "%s"\' % name)'], {}), '(\'adding virtual dimension "%s"\' % name)\n', (30525, 30565), False, 'import logging\n'), ((4789, 4876), 'numpy.genfromtxt', 'np.genfromtxt', (['f'], {'delimiter': '"""\t"""', 'comments': '"""#"""', 'converters': 'converters', 'dtype': 'float'}), "(f, delimiter='\\t', comments='#', converters=converters, dtype\n =float)\n", (4802, 4876), True, 'import numpy as np\n'), ((5982, 6020), 'os.path.join', 'os.path.join', (['path', '"""tabular_data.dat"""'], {}), "(path, 'tabular_data.dat')\n", (5994, 6020), False, 'import os\n'), ((7321, 7346), 'os.path.split', 'os.path.split', (['self._path'], {}), '(self._path)\n', (7334, 7346), False, 'import os\n'), ((18862, 18891), 'numpy.concatenate', 'np.concatenate', (['([0], starts)'], {}), '(([0], starts))\n', (18876, 18891), True, 'import numpy as np\n'), ((24889, 24918), 'numpy.sign', 'np.sign', (['(sdim[1:] - sdim[:-1])'], {}), '(sdim[1:] - sdim[:-1])\n', (24896, 24918), True, 'import numpy as np\n'), ((25081, 25138), 'logging.info', 'logging.info', (['("Assuming \'%s\' is swept." % sweep_dimension)'], {}), '("Assuming \'%s\' is swept." % sweep_dimension)\n', (25093, 25138), False, 'import logging\n'), ((25163, 25241), 'logging.info', 'logging.info', (['("Assuming \'%s\' stays constant within a sweep." % sweep_dimension)'], {}), '("Assuming \'%s\' stays constant within a sweep." % sweep_dimension)\n', (25175, 25241), False, 'import logging\n'), ((25928, 25965), 'numpy.concatenate', 'np.concatenate', (['([0], change_in_sign)'], {}), '(([0], change_in_sign))\n', (25942, 25965), True, 'import numpy as np\n'), ((26306, 26343), 'numpy.concatenate', 'np.concatenate', (['([0], change_in_sdim)'], {}), '(([0], change_in_sdim))\n', (26320, 26343), True, 'import numpy as np\n'), ((27145, 27242), 'logging.debug', 'logging.debug', (["('%smasking start: %d, stop %d' % ('un' if unmask_instead else '', start, stop)\n )"], {}), "('%smasking start: %d, stop %d' % ('un' if unmask_instead else\n '', start, stop))\n", (27158, 27242), False, 'import logging\n'), ((34917, 35032), 'logging.debug', 'logging.debug', (["('Setting value for (remaining) rows %d: = %s' % (set_vals.\n prev_match_on_row, set_vals.prev_val))"], {}), "('Setting value for (remaining) rows %d: = %s' % (set_vals.\n prev_match_on_row, set_vals.prev_val))\n", (34930, 35032), False, 'import logging\n'), ((35876, 35937), 'logging.warn', 'logging.warn', (['(\'Virtual dimension "%s" does not exist.\' % name)'], {}), '(\'Virtual dimension "%s" does not exist.\' % name)\n', (35888, 35937), False, 'import logging\n'), ((1157, 1192), 'os.path.join', 'os.path.join', (['path', '"""snapshot.json"""'], {}), "(path, 'snapshot.json')\n", (1169, 1192), False, 'import os\n'), ((1599, 1634), 'json.dumps', 'json.dumps', (['self._snapshots[-1][-1]'], {}), '(self._snapshots[-1][-1])\n', (1609, 1634), False, 'import json\n'), ((1943, 1999), 're.match', 're.match', (['"""snapshot\\\\.row-(\\\\d+)\\\\.diff(\\\\d+)\\\\.json"""', 'f'], {}), "('snapshot\\\\.row-(\\\\d+)\\\\.diff(\\\\d+)\\\\.json', f)\n", (1951, 1999), False, 'import re\n'), ((4243, 4307), 'logging.warn', 'logging.warn', (['f"""No header found in tabular data of {self._path}"""'], {}), "(f'No header found in tabular data of {self._path}')\n", (4255, 4307), False, 'import logging\n'), ((5227, 5249), 'numpy.array', 'np.array', (['[self._data]'], {}), '([self._data])\n', (5235, 5249), True, 'import numpy as np\n'), ((6151, 6192), 'os.path.join', 'os.path.join', (['path', '"""tabular_data.dat.gz"""'], {}), "(path, 'tabular_data.dat.gz')\n", (6163, 6192), False, 'import os\n'), ((6515, 6624), 'logging.info', 'logging.info', (['f"""No tabular_data.dat(.gz) found in {path}. Using {other_dat_files[0].name} instead."""'], {}), "(\n f'No tabular_data.dat(.gz) found in {path}. Using {other_dat_files[0].name} instead.'\n )\n", (6527, 6624), False, 'import logging\n'), ((6905, 6928), 'tarfile.open', 'tarfile.open', (['tar_fname'], {}), '(tar_fname)\n', (6917, 6928), False, 'import tarfile\n'), ((7182, 7198), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (7192, 7198), False, 'import os\n'), ((15295, 15309), 'numpy.array', 'np.array', (['lens'], {}), '(lens)\n', (15303, 15309), True, 'import numpy as np\n'), ((17221, 17270), 'itertools.compress', 'itertools.compress', (['self._source_col', '(~self._mask)'], {}), '(self._source_col, ~self._mask)\n', (17239, 17270), False, 'import itertools\n'), ((18761, 18777), 'numpy.where', 'np.where', (['(dm < 0)'], {}), '(dm < 0)\n', (18769, 18777), True, 'import numpy as np\n'), ((18799, 18815), 'numpy.where', 'np.where', (['(dm > 0)'], {}), '(dm > 0)\n', (18807, 18815), True, 'import numpy as np\n'), ((23537, 23559), 'numpy.unique', 'np.unique', (['self[param]'], {}), '(self[param])\n', (23546, 23559), True, 'import numpy as np\n'), ((6041, 6079), 'os.path.join', 'os.path.join', (['path', '"""tabular_data.dat"""'], {}), "(path, 'tabular_data.dat')\n", (6053, 6079), False, 'import os\n'), ((12456, 12634), 'logging.exception', 'logging.exception', (['"""Could not parse the instrument settings file. Doesn\'t matter if you were not planning to add virtual columns based on values in the snapshot files."""'], {}), '(\n "Could not parse the instrument settings file. Doesn\'t matter if you were not planning to add virtual columns based on values in the snapshot files."\n )\n', (12473, 12634), False, 'import logging\n'), ((14980, 15029), 'numpy.array', 'np.array', (['[unmasked[k] for k in self._dimensions]'], {}), '([unmasked[k] for k in self._dimensions])\n', (14988, 15029), True, 'import numpy as np\n'), ((15599, 15642), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['all_comments'], {}), '(all_comments)\n', (15628, 15642), False, 'import itertools\n'), ((16010, 16053), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['all_settings'], {}), '(all_settings)\n', (16039, 16053), False, 'import itertools\n'), ((16414, 16440), 'numpy.array', 'np.array', (['self._source_col'], {}), '(self._source_col)\n', (16422, 16440), True, 'import numpy as np\n'), ((23366, 23388), 'numpy.unique', 'np.unique', (['self[param]'], {}), '(self[param])\n', (23375, 23388), True, 'import numpy as np\n'), ((23403, 23424), 'numpy.isnan', 'np.isnan', (['self[param]'], {}), '(self[param])\n', (23411, 23424), True, 'import numpy as np\n'), ((33708, 33808), 'logging.debug', 'logging.debug', (["('Setting value for rows %d:%d = %s' % (set_vals.prev_match_on_row,\n up_to_row, v))"], {}), "('Setting value for rows %d:%d = %s' % (set_vals.\n prev_match_on_row, up_to_row, v))\n", (33721, 33808), False, 'import logging\n'), ((34461, 34497), 're.search', 're.search', (['comment_regex', 'commentstr'], {}), '(comment_regex, commentstr)\n', (34470, 34497), False, 'import re\n'), ((1215, 1250), 'os.path.join', 'os.path.join', (['path', '"""snapshot.json"""'], {}), "(path, 'snapshot.json')\n", (1227, 1250), False, 'import os\n'), ((1356, 1394), 'os.path.join', 'os.path.join', (['path', '"""snapshot.json.gz"""'], {}), "(path, 'snapshot.json.gz')\n", (1368, 1394), False, 'import os\n'), ((1738, 1750), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1747, 1750), False, 'import json\n'), ((6218, 6259), 'os.path.join', 'os.path.join', (['path', '"""tabular_data.dat.gz"""'], {}), "(path, 'tabular_data.dat.gz')\n", (6230, 6259), False, 'import os\n'), ((6359, 6375), 'os.scandir', 'os.scandir', (['path'], {}), '(path)\n', (6369, 6375), False, 'import os\n'), ((7221, 7246), 'os.path.join', 'os.path.join', (['path', 'fname'], {}), '(path, fname)\n', (7233, 7246), False, 'import os\n'), ((14206, 14235), 'numpy.concatenate', 'np.concatenate', (['unmasked[dim]'], {}), '(unmasked[dim])\n', (14220, 14235), True, 'import numpy as np\n'), ((14723, 14770), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['self._source_col'], {}), '(self._source_col)\n', (14752, 14770), False, 'import itertools\n'), ((26058, 26103), 'numpy.concatenate', 'np.concatenate', (['(start_indices, stop_indices)'], {}), '((start_indices, stop_indices))\n', (26072, 26103), True, 'import numpy as np\n'), ((26444, 26489), 'numpy.concatenate', 'np.concatenate', (['(start_indices, stop_indices)'], {}), '((start_indices, stop_indices))\n', (26458, 26489), True, 'import numpy as np\n'), ((1302, 1314), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1311, 1314), False, 'import json\n'), ((1446, 1458), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1455, 1458), False, 'import json\n'), ((3787, 3948), 'logging.info', 'logging.info', (['"""Column %s appears to contain timestamps. Converting them to seconds since Unix epoch. (Disable by setting convert_timestamps=False.)"""', 'i'], {}), "(\n 'Column %s appears to contain timestamps. Converting them to seconds since Unix epoch. (Disable by setting convert_timestamps=False.)'\n , i)\n", (3799, 3948), False, 'import logging\n'), ((6483, 6504), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (6498, 6504), False, 'import os\n'), ((26176, 26193), 'numpy.where', 'np.where', (['(dx != 0)'], {}), '(dx != 0)\n', (26184, 26193), True, 'import numpy as np\n'), ((24994, 25004), 'numpy.abs', 'np.abs', (['dx'], {}), '(dx)\n', (25000, 25004), True, 'import numpy as np\n'), ((13788, 13805), 'logging.warn', 'logging.warn', (['msg'], {}), '(msg)\n', (13800, 13805), False, 'import logging\n'), ((14072, 14089), 'logging.warn', 'logging.warn', (['msg'], {}), '(msg)\n', (14084, 14089), False, 'import logging\n'), ((25497, 25527), 'numpy.where', 'np.where', (['(dx[1:] * dx[:-1] < 0)'], {}), '(dx[1:] * dx[:-1] < 0)\n', (25505, 25527), True, 'import numpy as np\n')] |
import numbers
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from numpy import asarray
import numpy as np
from numpy.lib.stride_tricks import as_strided
from skimage import feature
from skimage.filters import threshold_otsu
from sklearn.utils import check_random_state, check_array
from torch import sqrt
from torch.utils.data import DataLoader, ConcatDataset, Dataset
from torchvision.datasets import ImageFolder
import matplotlib.pyplot as plt
from tqdm import tqdm
def train_val_split(data, train_ratio=0.9):
train_size = int(train_ratio * len(data))
train_data = data[:train_size]
val_data = data[train_size:]
return train_data, val_data
def binary(img):
gray_img = img.convert('L')
otsu = threshold_otsu(asarray(gray_img))
binary_img = gray_img.point(lambda x: 255 if x < otsu else 0, '1')
return binary_img
class Binary(object):
def __call__(self, img):
return binary(img)
def squeeze_weights(m):
m.weight.data = m.weight.data.sum(dim=1)[:, None]
m.in_channels = 1
def change_out_features(m, classes):
m.out_features = classes
return m
def init_weights(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
def dataset_mean_and_std(train_path, test_path):
# Dataset should be a folder which follows
# ImageFolder format with pages in each label folder
transform = transforms.Compose([
transforms.ToTensor(),
])
train_data = ImageFolder(train_path,
transform=transform)
test_data = ImageFolder(test_path,
transform=transform)
data = ConcatDataset([train_data, test_data])
loader = DataLoader(data, batch_size=1)
n = 0
m = 0.0
var = 0.0
with tqdm(total=len(loader)) as pbar:
for data in loader:
batch = data[0]
# Rearrange batch to be the shape of [B, C, W * H]
batch = batch.view(batch.size(0), batch.size(1), -1)
# Update total number of images
n += batch.size(0)
# Compute mean and std here
m += batch.mean(2).sum(0)
var += batch.var(2).sum(0)
pbar.update(1)
m /= n
var /= n
s = sqrt(var)
print(m)
print(s)
return m, s
def _extract_patches(arr, patch_shape=8, extraction_step=1):
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = tuple(slice(None, None, st) for st in extraction_step)
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None,
random_state=None, stride=1, th=2000):
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
if isinstance(stride, numbers.Number):
step = stride
s_h = stride
s_w = stride
else:
s_h, s_w = stride
step = (s_h, s_w, n_colors)
extracted_patches = _extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=step)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, stride, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint((i_h - p_h + 1) // s_h, size=n_patches)
j_s = rng.randint((i_w - p_w + 1) // s_w, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
patches = patches.reshape((n_patches, p_h, p_w))
# return clean_patches(patches, th)
return patches
def _compute_n_patches(i_h, i_w, p_h, p_w, stride, max_patches=None):
if isinstance(stride, numbers.Number):
s_h = stride
s_w = stride
else:
s_h, s_w = stride
n_h = (i_h - p_h) // s_h + 1
n_w = (i_w - p_w) // s_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, numbers.Integral)
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, numbers.Integral)
and max_patches >= all_patches):
return all_patches
elif (isinstance(max_patches, numbers.Real)
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def clean_patches(patches, th=2000):
indices = []
for i, patch in enumerate(patches):
if patch.shape[-1] == 3:
patch = patch / 255
num_features = feature.canny(patch.mean(axis=2), sigma=2).sum()
else:
num_features = feature.canny(patch, sigma=2).sum()
if num_features > th:
indices.append(i)
return patches[indices]
def get_labels_and_class_counts(labels_list):
'''
Calculates the counts of all unique classes.
'''
labels = np.array(labels_list)
_, class_counts = np.unique(labels, return_counts=True)
return labels, class_counts
def plot_class_distributions(class_names, train_class_counts,
test_class_counts, validation_class_counts):
'''
Plots the class distributions for the training and test set asa barplot.
'''
f, (ax1, ax2, ax3) = plt.subplots(3, 1, sharey=True, figsize=(15, 6))
ax1.bar(class_names, train_class_counts)
ax1.set_title('Training dataset distribution')
ax1.set_xlabel('Classes')
ax1.set_ylabel('Class counts')
ax2.bar(class_names, test_class_counts)
ax2.set_title('Test dataset distribution')
ax2.set_xlabel('Classes')
ax2.set_ylabel('Class counts')
ax3.bar(class_names, validation_class_counts)
ax3.set_title('Validation dataset distribution')
ax3.set_xlabel('Classes')
ax3.set_ylabel('Class counts')
class ImbalancedDatasetSampler(torch.utils.data.sampler.Sampler):
"""Samples elements randomly from a given list of indices for imbalanced dataset
Arguments:
indices (list, optional): a list of indices
num_samples (int, optional): number of samples to draw
callback_get_label func: a callback-like function which takes two arguments - dataset and index
"""
def __init__(self, dataset, indices=None, num_samples=None, callback_get_label=None):
# if indices is not provided,
# all elements in the dataset will be considered
self.indices = list(range(len(dataset))) \
if indices is None else indices
# define custom callback
self.callback_get_label = callback_get_label
# if num_samples is not provided,
# draw `len(indices)` samples in each iteration
self.num_samples = len(self.indices) \
if num_samples is None else num_samples
# distribution of classes in the dataset
label_to_count = {}
for idx in self.indices:
label = self._get_label(dataset, idx)
if label in label_to_count:
label_to_count[label] += 1
else:
label_to_count[label] = 1
# weight for each sample
weights = [1.0 / label_to_count[self._get_label(dataset, idx)]
for idx in self.indices]
self.weights = torch.DoubleTensor(weights)
def _get_label(self, dataset, idx):
if isinstance(dataset, torchvision.datasets.MNIST):
return dataset.train_labels[idx].item()
elif isinstance(dataset, torchvision.datasets.ImageFolder):
return dataset.imgs[idx][1]
elif isinstance(dataset, torch.utils.data.Subset):
return dataset.dataset.imgs[idx][1]
elif self.callback_get_label:
return self.callback_get_label(dataset, idx)
elif isinstance(dataset, BinColorDataset):
return dataset.dataset.imgs[idx][1]
else:
raise NotImplementedError
def __iter__(self):
return (self.indices[i] for i in torch.multinomial(
self.weights, self.num_samples, replacement=True))
def __len__(self):
return self.num_samples
class BinColorDataset(Dataset):
def __init__(self, dataset, col_transform=None, bin_transform=None):
self.dataset = dataset
self.col_transform = col_transform
self.bin_transform = bin_transform
def __getitem__(self, index):
x1, y1 = self.dataset[index]
if self.bin_transform:
x2 = self.bin_transform(x1)
if self.col_transform:
x1 = self.col_transform(x1)
return x1, x2, y1
def __len__(self):
return len(self.dataset)
if __name__ == '__main__':
# transform = transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize([0.7993, 0.7404, 0.6438], [0.1168, 0.1198, 0.1186]), # icdar17 norm
# ])
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.9706, 0.9706, 0.9706], [0.1448, 0.1448, 0.1448]), # firemaker norm
])
train_path = '/home/akshay/PycharmProjects/TFG/datasets/firemaker-500/train'
val_path = '/home/akshay/PycharmProjects/TFG/datasets/firemaker-500/validation'
test_path = '/home/akshay/PycharmProjects/TFG/datasets/firemaker-500/test'
train_data = ImageFolder(train_path, transform=transform)
val_data = ImageFolder(val_path, transform=transform)
test_data = ImageFolder(test_path, transform=transform)
labels, c1 = get_labels_and_class_counts(train_data.targets)
labels1, c2 = get_labels_and_class_counts(test_data.targets)
labels2, c3 = get_labels_and_class_counts(val_data.targets)
plot_class_distributions(train_data.classes, c1, c2, c3)
| [
"torch.utils.data.ConcatDataset",
"sklearn.utils.check_random_state",
"torch.multinomial",
"torch.utils.data.DataLoader",
"torch.sqrt",
"sklearn.utils.check_array",
"numpy.asarray",
"torch.nn.init.xavier_uniform_",
"torch.DoubleTensor",
"torchvision.datasets.ImageFolder",
"numpy.lib.stride_trick... | [((1540, 1584), 'torchvision.datasets.ImageFolder', 'ImageFolder', (['train_path'], {'transform': 'transform'}), '(train_path, transform=transform)\n', (1551, 1584), False, 'from torchvision.datasets import ImageFolder\n'), ((1630, 1673), 'torchvision.datasets.ImageFolder', 'ImageFolder', (['test_path'], {'transform': 'transform'}), '(test_path, transform=transform)\n', (1641, 1673), False, 'from torchvision.datasets import ImageFolder\n'), ((1713, 1751), 'torch.utils.data.ConcatDataset', 'ConcatDataset', (['[train_data, test_data]'], {}), '([train_data, test_data])\n', (1726, 1751), False, 'from torch.utils.data import DataLoader, ConcatDataset, Dataset\n'), ((1765, 1795), 'torch.utils.data.DataLoader', 'DataLoader', (['data'], {'batch_size': '(1)'}), '(data, batch_size=1)\n', (1775, 1795), False, 'from torch.utils.data import DataLoader, ConcatDataset, Dataset\n'), ((2311, 2320), 'torch.sqrt', 'sqrt', (['var'], {}), '(var)\n', (2315, 2320), False, 'from torch import sqrt\n'), ((3096, 3141), 'numpy.lib.stride_tricks.as_strided', 'as_strided', (['arr'], {'shape': 'shape', 'strides': 'strides'}), '(arr, shape=shape, strides=strides)\n', (3106, 3141), False, 'from numpy.lib.stride_tricks import as_strided\n'), ((3633, 3666), 'sklearn.utils.check_array', 'check_array', (['image'], {'allow_nd': '(True)'}), '(image, allow_nd=True)\n', (3644, 3666), False, 'from sklearn.utils import check_random_state, check_array\n'), ((6103, 6124), 'numpy.array', 'np.array', (['labels_list'], {}), '(labels_list)\n', (6111, 6124), True, 'import numpy as np\n'), ((6147, 6184), 'numpy.unique', 'np.unique', (['labels'], {'return_counts': '(True)'}), '(labels, return_counts=True)\n', (6156, 6184), True, 'import numpy as np\n'), ((6473, 6521), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'sharey': '(True)', 'figsize': '(15, 6)'}), '(3, 1, sharey=True, figsize=(15, 6))\n', (6485, 6521), True, 'import matplotlib.pyplot as plt\n'), ((10462, 10506), 'torchvision.datasets.ImageFolder', 'ImageFolder', (['train_path'], {'transform': 'transform'}), '(train_path, transform=transform)\n', (10473, 10506), False, 'from torchvision.datasets import ImageFolder\n'), ((10523, 10565), 'torchvision.datasets.ImageFolder', 'ImageFolder', (['val_path'], {'transform': 'transform'}), '(val_path, transform=transform)\n', (10534, 10565), False, 'from torchvision.datasets import ImageFolder\n'), ((10583, 10626), 'torchvision.datasets.ImageFolder', 'ImageFolder', (['test_path'], {'transform': 'transform'}), '(test_path, transform=transform)\n', (10594, 10626), False, 'from torchvision.datasets import ImageFolder\n'), ((790, 807), 'numpy.asarray', 'asarray', (['gray_img'], {}), '(gray_img)\n', (797, 807), False, 'from numpy import asarray\n'), ((1225, 1258), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['m.weight'], {}), '(m.weight)\n', (1248, 1258), True, 'import torch.nn as nn\n'), ((4218, 4250), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (4236, 4250), False, 'from sklearn.utils import check_random_state, check_array\n'), ((8445, 8472), 'torch.DoubleTensor', 'torch.DoubleTensor', (['weights'], {}), '(weights)\n', (8463, 8472), False, 'import torch\n'), ((1492, 1513), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1511, 1513), True, 'import torchvision.transforms as transforms\n'), ((2918, 2943), 'numpy.array', 'np.array', (['extraction_step'], {}), '(extraction_step)\n', (2926, 2943), True, 'import numpy as np\n'), ((10069, 10090), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (10088, 10090), True, 'import torchvision.transforms as transforms\n'), ((10100, 10172), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.9706, 0.9706, 0.9706]', '[0.1448, 0.1448, 0.1448]'], {}), '([0.9706, 0.9706, 0.9706], [0.1448, 0.1448, 0.1448])\n', (10120, 10172), True, 'import torchvision.transforms as transforms\n'), ((2843, 2862), 'numpy.array', 'np.array', (['arr.shape'], {}), '(arr.shape)\n', (2851, 2862), True, 'import numpy as np\n'), ((2865, 2886), 'numpy.array', 'np.array', (['patch_shape'], {}), '(patch_shape)\n', (2873, 2886), True, 'import numpy as np\n'), ((9153, 9220), 'torch.multinomial', 'torch.multinomial', (['self.weights', 'self.num_samples'], {'replacement': '(True)'}), '(self.weights, self.num_samples, replacement=True)\n', (9170, 9220), False, 'import torch\n'), ((5852, 5881), 'skimage.feature.canny', 'feature.canny', (['patch'], {'sigma': '(2)'}), '(patch, sigma=2)\n', (5865, 5881), False, 'from skimage import feature\n')] |
import numpy as np
import keras
from .. import backend
from ..utils import anchors as util_anchors
class Anchors(keras.layers.Layer):
def __init__(self, size, stride, ratios=None, scales=None, *args, **kwargs):
self.size = size
self.stride = stride
self.ratios = ratios
self.scales = scales
if ratios is None:
self.ratios = np.array([0.5, 1, 2], keras.backend.floatx())
else:
self.ratios = np.array(self.ratios, keras.backend.floatx())
if scales is None:
self.scales = np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)], keras.backend.floatx()),
else:
self.scales = np.array(self.scales, keras.backend.floatx())
self.num_anchors = len(self.ratios) * len(self.scales)
self.anchors = keras.backend.variable(util_anchors.generate_anchors(
base_size = self.size,
ratios = self.ratios,
scales = self.scales
))
super(Anchors, self).__init__(*args, **kwargs)
def call(self, inputs, **kwargs):
# get height and with as well as number of images
input_shape = keras.backend.shape(inputs)[:3]
# generate proposals from bbox deltas and shifted anchors
anchors = backend.shift(input_shape[1:3], self.stride, self.anchors)
# anchors = backend.shift(inputs.shape[1:3], self.stride, self.anchors)
anchors = keras.backend.tile(keras.backend.expand_dims(anchors, axis=0), (input_shape[0], 1, 1))
return anchors
def compute_output_shape(self, input_shape):
if None not in input_shape[1:]:
total = np.prod(input_shape[1:3]) * self.num_anchors
return (input_shape[0], total, 4)
else:
return (input_shape[0], None , 4)
def get_config(self):
config = super(Anchors, self).get_config()
config.update({
'size' : self.size,
'stride' : self.stride,
'ratios' : self.ratios.tolist(),
'scales' : self.scales.tolist(),
})
return config
class RegressBoxes(keras.layers.Layer):
"Applies regression on generated anchors"
def __init__(self, mean=None, std=None, *args, **kwargs):
if mean is None:
mean = np.array([0, 0, 0, 0])
if std is None:
std = np.array([0.2, 0.2, 0.2, 0.2])
if isinstance(mean, (list, tuple)):
mean = np.array(mean)
elif not isinstance(mean, np.ndarray):
raise ValueError('Expected mean to be a np.ndarray, list or tuple. Received: {}'.format(type(mean)))
if isinstance(std, (list, tuple)):
std = np.array(std)
elif not isinstance(std, np.ndarray):
raise ValueError('Expected std to be a np.ndarray, list or tuple. Received: {}'.format(type(std)))
self.mean = mean
self.std = std
super(RegressBoxes, self).__init__(*args, **kwargs)
def call(self, inputs, **kwargs):
anchors, regression = inputs
return backend.bbox_transform_inv(anchors, regression, mean=self.mean, std=self.std)
def compute_output_shape(self, input_shape):
return input_shape[0]
def get_config(self):
config = super(RegressBoxes, self).get_config()
config.update({
'mean': self.mean.tolist(),
'std' : self.std.tolist(),
})
return config
class ClipBoxes(keras.layers.Layer):
def call(self, inputs, **kwargs):
image, boxes = inputs
shape = keras.backend.cast(keras.backend.shape(image), keras.backend.floatx())
x1 = backend.clip_by_value(boxes[:, :, 0], 0, shape[2])
y1 = backend.clip_by_value(boxes[:, :, 1], 0, shape[1])
x2 = backend.clip_by_value(boxes[:, :, 2], 0, shape[2])
y2 = backend.clip_by_value(boxes[:, :, 3], 0, shape[1])
return keras.backend.stack([x1, y1, x2, y2], axis=2)
def compute_output_shape(self, input_shape):
return input_shape[1]
| [
"keras.backend.stack",
"keras.backend.expand_dims",
"keras.backend.floatx",
"keras.backend.shape",
"numpy.array",
"numpy.prod"
] | [((3927, 3972), 'keras.backend.stack', 'keras.backend.stack', (['[x1, y1, x2, y2]'], {'axis': '(2)'}), '([x1, y1, x2, y2], axis=2)\n', (3946, 3972), False, 'import keras\n'), ((1176, 1203), 'keras.backend.shape', 'keras.backend.shape', (['inputs'], {}), '(inputs)\n', (1195, 1203), False, 'import keras\n'), ((1469, 1511), 'keras.backend.expand_dims', 'keras.backend.expand_dims', (['anchors'], {'axis': '(0)'}), '(anchors, axis=0)\n', (1494, 1511), False, 'import keras\n'), ((2313, 2335), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (2321, 2335), True, 'import numpy as np\n'), ((2378, 2408), 'numpy.array', 'np.array', (['[0.2, 0.2, 0.2, 0.2]'], {}), '([0.2, 0.2, 0.2, 0.2])\n', (2386, 2408), True, 'import numpy as np\n'), ((2473, 2487), 'numpy.array', 'np.array', (['mean'], {}), '(mean)\n', (2481, 2487), True, 'import numpy as np\n'), ((2710, 2723), 'numpy.array', 'np.array', (['std'], {}), '(std)\n', (2718, 2723), True, 'import numpy as np\n'), ((3602, 3628), 'keras.backend.shape', 'keras.backend.shape', (['image'], {}), '(image)\n', (3621, 3628), False, 'import keras\n'), ((3630, 3652), 'keras.backend.floatx', 'keras.backend.floatx', ([], {}), '()\n', (3650, 3652), False, 'import keras\n'), ((407, 429), 'keras.backend.floatx', 'keras.backend.floatx', ([], {}), '()\n', (427, 429), False, 'import keras\n'), ((493, 515), 'keras.backend.floatx', 'keras.backend.floatx', ([], {}), '()\n', (513, 515), False, 'import keras\n'), ((713, 735), 'keras.backend.floatx', 'keras.backend.floatx', ([], {}), '()\n', (733, 735), False, 'import keras\n'), ((1671, 1696), 'numpy.prod', 'np.prod', (['input_shape[1:3]'], {}), '(input_shape[1:3])\n', (1678, 1696), True, 'import numpy as np\n'), ((626, 648), 'keras.backend.floatx', 'keras.backend.floatx', ([], {}), '()\n', (646, 648), False, 'import keras\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 13 17:13:23 2019
@author: mavro
"""
#%%
import sys
sys.path.remove ('/opt/ros/kinetic/lib/python2.7/dist-packages')
#%%
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img = mpimg.imread('../test_images/stopsign.jpg')
plt.imshow(img)
#%%
plt.imshow(img)
plt.plot(55, 70, '.')
plt.plot(150, 57, '.')
plt.plot(150, 100, '.')
plt.plot(55, 115, '.')
#%%
def warp(img):
img_size=(img.shape[1], img.shape[0])
src=np.float32([[55,70],[150,57],[150,100],[55,115]])
dst=np.float32([[50,30],[120,30],[120,100],[50,100]])
M=cv2.getPerspectiveTransform(src, dst)
Minv=cv2.getPerspectiveTransform(dst, src)
warped=cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
return warped
warped_img=warp(img)
f, (ax1, ax2) = plt.subplots(2,1,figsize=(20,10))
ax1.set_title('source image')
ax1.imshow(img)
ax2.set_title('dest image')
ax2.imshow(warped_img) | [
"matplotlib.image.imread",
"sys.path.remove",
"cv2.warpPerspective",
"matplotlib.pyplot.plot",
"cv2.getPerspectiveTransform",
"matplotlib.pyplot.imshow",
"numpy.float32",
"matplotlib.pyplot.subplots"
] | [((122, 185), 'sys.path.remove', 'sys.path.remove', (['"""/opt/ros/kinetic/lib/python2.7/dist-packages"""'], {}), "('/opt/ros/kinetic/lib/python2.7/dist-packages')\n", (137, 185), False, 'import sys\n'), ((293, 336), 'matplotlib.image.imread', 'mpimg.imread', (['"""../test_images/stopsign.jpg"""'], {}), "('../test_images/stopsign.jpg')\n", (305, 336), True, 'import matplotlib.image as mpimg\n'), ((337, 352), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (347, 352), True, 'import matplotlib.pyplot as plt\n'), ((358, 373), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (368, 373), True, 'import matplotlib.pyplot as plt\n'), ((374, 395), 'matplotlib.pyplot.plot', 'plt.plot', (['(55)', '(70)', '"""."""'], {}), "(55, 70, '.')\n", (382, 395), True, 'import matplotlib.pyplot as plt\n'), ((396, 418), 'matplotlib.pyplot.plot', 'plt.plot', (['(150)', '(57)', '"""."""'], {}), "(150, 57, '.')\n", (404, 418), True, 'import matplotlib.pyplot as plt\n'), ((419, 442), 'matplotlib.pyplot.plot', 'plt.plot', (['(150)', '(100)', '"""."""'], {}), "(150, 100, '.')\n", (427, 442), True, 'import matplotlib.pyplot as plt\n'), ((443, 465), 'matplotlib.pyplot.plot', 'plt.plot', (['(55)', '(115)', '"""."""'], {}), "(55, 115, '.')\n", (451, 465), True, 'import matplotlib.pyplot as plt\n'), ((863, 899), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(20, 10)'}), '(2, 1, figsize=(20, 10))\n', (875, 899), True, 'import matplotlib.pyplot as plt\n'), ((535, 591), 'numpy.float32', 'np.float32', (['[[55, 70], [150, 57], [150, 100], [55, 115]]'], {}), '([[55, 70], [150, 57], [150, 100], [55, 115]])\n', (545, 591), True, 'import numpy as np\n'), ((593, 649), 'numpy.float32', 'np.float32', (['[[50, 30], [120, 30], [120, 100], [50, 100]]'], {}), '([[50, 30], [120, 30], [120, 100], [50, 100]])\n', (603, 649), True, 'import numpy as np\n'), ((649, 686), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['src', 'dst'], {}), '(src, dst)\n', (676, 686), False, 'import cv2\n'), ((696, 733), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['dst', 'src'], {}), '(dst, src)\n', (723, 733), False, 'import cv2\n'), ((745, 806), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'M', 'img_size'], {'flags': 'cv2.INTER_LINEAR'}), '(img, M, img_size, flags=cv2.INTER_LINEAR)\n', (764, 806), False, 'import cv2\n')] |
##################################################
# <NAME> | CMU
# Python classifier
##################################################
# imports
from matplotlib import pyplot as plt
import numpy as np
import os
import csv
import math
##################################################
# Helper Functions
##################################################
# format number strings into integer values
def format_numbers(list, type = 'int'):
result = []
for (i, elem) in enumerate(list):
try:
if type == 'int':
list[i] = int(elem)
else:
list[i] = float(elem)
except Exception as e:
continue
# parse data into nested list format with number/type arguments
def parse_data(filename, header = False):
result = []
with open(filename) as data:
reader = csv.reader(data)
if header: next(reader, None) #Skips header
for row in reader:
format_numbers(row, 'float')
result.append(row)
result = np.array(result)
result = np.rot90(result, axes=(0,1))
result = np.flip(result, 0)
print(result)
return result
# Loss functions for calculating cost
def squared_error(pred, target):
return ((pred - target)**2)/2
# Takes in t
def cross_entropy(X, y):
pass
# generates target vectors
def target_vector(target, num_labels):
result = np.zeros(num_labels)
result[target] = 1
return result
# activation function that returns a value between 0 and 1
def sigmoid(z):
return (1/(1+np.exp(-z)))
# activation function that returns a value between -1 and 1
def tanh(z):
return np.tanh(z)
# activation function that returns the identity of input or 0 if less than 0
def relu(z):
return max(0,z)
# activation function for multiclass outputs | returns probability measure
def softmax(args):
ex= np.exp(args)
sum_ex = np.sum( np.exp(args))
return ex/sum_ex
# used for derivatives of softmax function
def kronecker_delta(i, j):
if i == j: return 1
return 0
def predict(a1, w1, b):
z = a1*w1 + b
result = sigmoid(z)
return result
# used for random generation of weights and biases (between -1 and 1)
def generate_weights(hidden_layers, layer_width, num_inputs):
result = []
for i in range (hidden_layers):
weight = np.random.randn(layer_width, num_inputs)
result.append (weight)
return np.array(result)
def generate_bias(hidden_layers, layer_width):
for i in range (hidden_layers):
biases = np.random.randn(hidden_layers,layer_width)
return biases
def forward_propagate (layer_width, features, weight_matrix, bias_matrix):
result = []
for i in range(layer_width):
weight = weight_matrix[0][i]
bias = bias_matrix[0][i]
dot_product = np.dot(weight, features)
z = np.add(dot_product, bias)
temp_neuron = sigmoid(z)
result.append(temp_neuron)
# print(dot_product)
#print(result)
return np.array(result)
##################################################
# Main
##################################################
# initialize data from csv file
filename = 'iris.csv'
test_data = parse_data(filename, True)
hidden_layers = 1
num_labels = 3
layer_width = 3
# w1 = np.random.randn()
# b = np.random.randn()
#print(test_data)
def train_model(data):
num_inputs = len(data[0])-1
targets = []
weight_matrix = generate_weights(hidden_layers, layer_width, num_inputs)
print(weight_matrix, 'w')
bias_matrix = generate_bias(hidden_layers, layer_width)
hidden_neurons = []
for (i, elem) in enumerate(data):
targets.append(elem[-1])
features = np.delete(elem, -1)
# print(test_number)
hidden_neurons.append(forward_propagate(layer_width, features, weight_matrix, bias_matrix))
hidden_neurons = np.array(hidden_neurons)
# print(hidden_neurons)
result_softmax, predictions = [], []
for i in range (len(hidden_neurons)):
result_softmax.append(softmax(hidden_neurons[i]))
predictions.append(np.argmax(hidden_neurons[i]))
result_softmax = np.array(result_softmax)
total_loss = 0
for (i, elem) in enumerate (targets):
prediction, target = predictions[i], targets[i]
loss = squared_error(prediction, target)
total_loss += loss
print(total_loss)
# prediction = predict(test_number[0], w1, b)
# loss = cost(prediction, target)
# total_loss += loss
# print(total_loss)
if __name__ == '__main__':
train_model(test_data)
| [
"numpy.flip",
"numpy.tanh",
"csv.reader",
"numpy.random.randn",
"numpy.argmax",
"numpy.zeros",
"numpy.rot90",
"numpy.array",
"numpy.exp",
"numpy.dot",
"numpy.add",
"numpy.delete"
] | [((1051, 1067), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (1059, 1067), True, 'import numpy as np\n'), ((1081, 1110), 'numpy.rot90', 'np.rot90', (['result'], {'axes': '(0, 1)'}), '(result, axes=(0, 1))\n', (1089, 1110), True, 'import numpy as np\n'), ((1123, 1141), 'numpy.flip', 'np.flip', (['result', '(0)'], {}), '(result, 0)\n', (1130, 1141), True, 'import numpy as np\n'), ((1412, 1432), 'numpy.zeros', 'np.zeros', (['num_labels'], {}), '(num_labels)\n', (1420, 1432), True, 'import numpy as np\n'), ((1664, 1674), 'numpy.tanh', 'np.tanh', (['z'], {}), '(z)\n', (1671, 1674), True, 'import numpy as np\n'), ((1889, 1901), 'numpy.exp', 'np.exp', (['args'], {}), '(args)\n', (1895, 1901), True, 'import numpy as np\n'), ((2436, 2452), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (2444, 2452), True, 'import numpy as np\n'), ((3026, 3042), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (3034, 3042), True, 'import numpy as np\n'), ((3889, 3913), 'numpy.array', 'np.array', (['hidden_neurons'], {}), '(hidden_neurons)\n', (3897, 3913), True, 'import numpy as np\n'), ((4162, 4186), 'numpy.array', 'np.array', (['result_softmax'], {}), '(result_softmax)\n', (4170, 4186), True, 'import numpy as np\n'), ((857, 873), 'csv.reader', 'csv.reader', (['data'], {}), '(data)\n', (867, 873), False, 'import csv\n'), ((1923, 1935), 'numpy.exp', 'np.exp', (['args'], {}), '(args)\n', (1929, 1935), True, 'import numpy as np\n'), ((2353, 2393), 'numpy.random.randn', 'np.random.randn', (['layer_width', 'num_inputs'], {}), '(layer_width, num_inputs)\n', (2368, 2393), True, 'import numpy as np\n'), ((2554, 2597), 'numpy.random.randn', 'np.random.randn', (['hidden_layers', 'layer_width'], {}), '(hidden_layers, layer_width)\n', (2569, 2597), True, 'import numpy as np\n'), ((2836, 2860), 'numpy.dot', 'np.dot', (['weight', 'features'], {}), '(weight, features)\n', (2842, 2860), True, 'import numpy as np\n'), ((2873, 2898), 'numpy.add', 'np.add', (['dot_product', 'bias'], {}), '(dot_product, bias)\n', (2879, 2898), True, 'import numpy as np\n'), ((3719, 3738), 'numpy.delete', 'np.delete', (['elem', '(-1)'], {}), '(elem, -1)\n', (3728, 3738), True, 'import numpy as np\n'), ((1567, 1577), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (1573, 1577), True, 'import numpy as np\n'), ((4110, 4138), 'numpy.argmax', 'np.argmax', (['hidden_neurons[i]'], {}), '(hidden_neurons[i])\n', (4119, 4138), True, 'import numpy as np\n')] |
import numpy as np
from skimage.filters import sobel
from skimage.measure import find_contours
from skimage.morphology import binary_closing, binary_opening, dilation
from skimage.transform import rescale
def check_intersection(segments):
def check_xy(a11, a12, a21, a22):
return a21 <= (a11 + a12) / 2 <= a22
for s_checking in sorted(segments, key=lambda x: x[0].size):
for segment in segments:
s1 = s_checking[1]
s2 = segment[1]
if s1 == s2:
continue
if check_xy(s1[0], s1[1], s2[0], s2[1]) and \
check_xy(s1[2], s1[3], s2[2], s2[3]):
# TODO: нутызнаешь
try:
segments.remove(s_checking)
except ValueError:
pass
segments = list(map(lambda x: x[0], segments))
return segments
def segments_extraction(image):
"""
:param image:
:return:
"""
# бинарим изображение
binary = image < .5
# ширина и высота
w, h = image.shape
# коэффициент уменьшения изображения для создания карты сегментов
scale = 800 / max(w, h)
scaled = rescale(binary, scale)
w, h = scaled.shape
window_o = np.ones((1, int(w / 100)))
window = np.ones((8, 8))
edges = sobel(scaled)
open_image = binary_closing(edges, window_o)
close_image = binary_opening(open_image, window_o)
#
dilate = dilation(close_image, window)
#
contours = find_contours(dilate, .8)
# список для хранения сегментов
segments = []
for contour in contours:
segment = binary[int(min(contour[:, 0]) / scale):
int(max(contour[:, 0]) / scale),
int(min(contour[:, 1]) / scale):
int(max(contour[:, 1]) / scale)]
# Если в сегмента средний уровень яркости маленький, значит полезной
# информации там нет
if segment.mean() <= .05:
continue
coordinates = min(contour[:, 0]) / scale, max(contour[:, 0]) / scale, \
min(contour[:, 1]) / scale, max(contour[:, 1]) / scale
segments.append((segment, coordinates))
return segments
def segmentation(img):
segments = segments_extraction(img)
return check_intersection(segments)
def line_segmentation(segment):
"""
В каждой строке массива сегмента расчитывается среднее значение
интенсивности пикселей. Если это среднее меньше 1.(самое большое значение
интенсивности) значит полезная информация в данной строке есть.
Пока значение среднего не будет ровнятся 1. строки записываются...
:param segment:
:return:
"""
# список для хранения строк
lines = []
# коэфициент для разделения строк
c = 0
# ширина сегмента
width = segment.shape[1]
up = down = 0
brights = [np.mean(line) for line in segment]
for n, bright in enumerate(brights):
if bright > c and not up:
up = n - 1
down = 0
if bright <= c and not down and up:
down = n
lines.append(segment[up:down, 0:width])
up = 0
return lines
def symbol_segmentation(line):
"""
:param line:
:return:
"""
# ширина строки
line_width = line.shape[1]
# cредняя яркость столбов пикселей фрагмента строки
mean_bright_l = [np.mean(line[:, __]) for __ in range(line_width)]
# коэффициент по которому будем разделять слова и символы
c = 0
# временные координаты для выделения символа и пробела
space_l = space_r = symbol_l = symbol_r = 0
# координаты пробела
space = None
# акумулятор среднего значения пробелов
mean_space = 0
# список для хранения символов, список на возврат
symbols, ret_words = [], []
# TODO: если нет пробелов то не находит ничего
for n, bright in enumerate(mean_bright_l):
if not space_l and bright <= c:
space_l, space_r = n, 0
elif space_l and not space_r and bright > c:
space = n - space_l
mean_space += space
space_l, space_r = 0, n
if not symbol_l and not space_l and bright > c:
symbol_l, symbol_r = n, 0
elif symbol_l and not symbol_r and bright <= c:
# (start of word, symbol width, space length before liter)
symbols.append((symbol_l, n - symbol_l, space))
symbol_l, symbol_r = 0, n
mean_space /= len(symbols) - 2
mean_height = 0
for symbol in symbols:
start, end, space = symbol
if not space < mean_space:
ret_words.append(' ')
symbol = allocate_symbol(line[0:line_width, start:start + end])
mean_height += symbol.shape[0]
ret_words.append(symbol)
return ret_words + ['\n'], mean_height
def allocate_symbol(symbol):
"""
При выделении линии символы находятся в разных регистрах. Для выделения
символа по вертикали сжимаем изображение.
:param symbol:
:return:
"""
# Яркость рядов фрагмента(символа)
mean = [np.mean(_) for _ in symbol]
up, down = 0, len(symbol) - 1
с = 0
while mean[up] <= с:
up += 1
while mean[down] <= с:
down -= 1
return symbol[up - 1:down + 1, :]
| [
"skimage.morphology.binary_opening",
"skimage.morphology.binary_closing",
"skimage.transform.rescale",
"numpy.ones",
"skimage.filters.sobel",
"numpy.mean",
"skimage.measure.find_contours",
"skimage.morphology.dilation"
] | [((1173, 1195), 'skimage.transform.rescale', 'rescale', (['binary', 'scale'], {}), '(binary, scale)\n', (1180, 1195), False, 'from skimage.transform import rescale\n'), ((1277, 1292), 'numpy.ones', 'np.ones', (['(8, 8)'], {}), '((8, 8))\n', (1284, 1292), True, 'import numpy as np\n'), ((1306, 1319), 'skimage.filters.sobel', 'sobel', (['scaled'], {}), '(scaled)\n', (1311, 1319), False, 'from skimage.filters import sobel\n'), ((1337, 1368), 'skimage.morphology.binary_closing', 'binary_closing', (['edges', 'window_o'], {}), '(edges, window_o)\n', (1351, 1368), False, 'from skimage.morphology import binary_closing, binary_opening, dilation\n'), ((1387, 1423), 'skimage.morphology.binary_opening', 'binary_opening', (['open_image', 'window_o'], {}), '(open_image, window_o)\n', (1401, 1423), False, 'from skimage.morphology import binary_closing, binary_opening, dilation\n'), ((1443, 1472), 'skimage.morphology.dilation', 'dilation', (['close_image', 'window'], {}), '(close_image, window)\n', (1451, 1472), False, 'from skimage.morphology import binary_closing, binary_opening, dilation\n'), ((1494, 1520), 'skimage.measure.find_contours', 'find_contours', (['dilate', '(0.8)'], {}), '(dilate, 0.8)\n', (1507, 1520), False, 'from skimage.measure import find_contours\n'), ((2879, 2892), 'numpy.mean', 'np.mean', (['line'], {}), '(line)\n', (2886, 2892), True, 'import numpy as np\n'), ((3395, 3415), 'numpy.mean', 'np.mean', (['line[:, __]'], {}), '(line[:, __])\n', (3402, 3415), True, 'import numpy as np\n'), ((5094, 5104), 'numpy.mean', 'np.mean', (['_'], {}), '(_)\n', (5101, 5104), True, 'import numpy as np\n')] |
# Test methods with long descriptive names can omit docstrings
# pylint: disable=missing-docstring
import unittest
import numpy as np
from Orange.data import Table
from Orange.regression import MeanLearner
class TestMeanLearner(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.learn = MeanLearner()
def test_mean(self):
nrows = 1000
ncols = 10
x = np.random.randint(1, 4, (nrows, ncols))
y = np.random.randint(0, 5, (nrows, 1)) / 3.0
t = Table(x, y)
clf = self.learn(t)
true_mean = np.average(y)
x2 = np.random.randint(1, 4, (nrows, ncols))
y2 = clf(x2)
self.assertTrue(np.allclose(y2, true_mean))
def test_weights(self):
nrows = 100
ncols = 10
x = np.random.randint(1, 4, (nrows, ncols))
y = np.random.randint(0, 5, (nrows, 1)) / 3.0
heavy = 1
w = ((y == heavy) * 123 + 1.0) / 124.0
t = Table(x, y, W=w)
clf = self.learn(t)
expected_mean = np.average(y, weights=w)
x2 = np.random.randint(1, 4, (nrows, ncols))
y2 = clf(x2)
self.assertTrue(np.allclose(y2, expected_mean))
def test_empty(self):
autompg = Table("auto-mpg")
clf = self.learn(autompg[:0])
y = clf(autompg[0])
self.assertEqual(y, 0)
def test_discrete(self):
iris = Table("iris")
self.assertRaises(ValueError, self.learn, iris)
| [
"numpy.average",
"numpy.allclose",
"Orange.regression.MeanLearner",
"numpy.random.randint",
"Orange.data.Table"
] | [((314, 327), 'Orange.regression.MeanLearner', 'MeanLearner', ([], {}), '()\n', (325, 327), False, 'from Orange.regression import MeanLearner\n'), ((406, 445), 'numpy.random.randint', 'np.random.randint', (['(1)', '(4)', '(nrows, ncols)'], {}), '(1, 4, (nrows, ncols))\n', (423, 445), True, 'import numpy as np\n'), ((512, 523), 'Orange.data.Table', 'Table', (['x', 'y'], {}), '(x, y)\n', (517, 523), False, 'from Orange.data import Table\n'), ((573, 586), 'numpy.average', 'np.average', (['y'], {}), '(y)\n', (583, 586), True, 'import numpy as np\n'), ((600, 639), 'numpy.random.randint', 'np.random.randint', (['(1)', '(4)', '(nrows, ncols)'], {}), '(1, 4, (nrows, ncols))\n', (617, 639), True, 'import numpy as np\n'), ((793, 832), 'numpy.random.randint', 'np.random.randint', (['(1)', '(4)', '(nrows, ncols)'], {}), '(1, 4, (nrows, ncols))\n', (810, 832), True, 'import numpy as np\n'), ((964, 980), 'Orange.data.Table', 'Table', (['x', 'y'], {'W': 'w'}), '(x, y, W=w)\n', (969, 980), False, 'from Orange.data import Table\n'), ((1034, 1058), 'numpy.average', 'np.average', (['y'], {'weights': 'w'}), '(y, weights=w)\n', (1044, 1058), True, 'import numpy as np\n'), ((1072, 1111), 'numpy.random.randint', 'np.random.randint', (['(1)', '(4)', '(nrows, ncols)'], {}), '(1, 4, (nrows, ncols))\n', (1089, 1111), True, 'import numpy as np\n'), ((1234, 1251), 'Orange.data.Table', 'Table', (['"""auto-mpg"""'], {}), "('auto-mpg')\n", (1239, 1251), False, 'from Orange.data import Table\n'), ((1394, 1407), 'Orange.data.Table', 'Table', (['"""iris"""'], {}), "('iris')\n", (1399, 1407), False, 'from Orange.data import Table\n'), ((458, 493), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)', '(nrows, 1)'], {}), '(0, 5, (nrows, 1))\n', (475, 493), True, 'import numpy as np\n'), ((685, 711), 'numpy.allclose', 'np.allclose', (['y2', 'true_mean'], {}), '(y2, true_mean)\n', (696, 711), True, 'import numpy as np\n'), ((845, 880), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)', '(nrows, 1)'], {}), '(0, 5, (nrows, 1))\n', (862, 880), True, 'import numpy as np\n'), ((1157, 1187), 'numpy.allclose', 'np.allclose', (['y2', 'expected_mean'], {}), '(y2, expected_mean)\n', (1168, 1187), True, 'import numpy as np\n')] |
from physDBD import Params0Gauss, ImportHelper, Params0GaussTraj
import numpy as np
import os
import tensorflow as tf
class TestParams0Gauss:
fnames = [
"../data_test/0000.txt",
"../data_test/0001.txt",
"../data_test/0002.txt",
"../data_test/0003.txt",
"../data_test/0004.txt"
]
species = ["ca2i","ip3"]
nv = 2
def import_params(self, time: float) -> Params0Gauss:
data = ImportHelper.import_gillespie_ssa_at_time(
fnames=self.fnames,
time=time,
species=self.species
)
params = Params0Gauss.fromData(data)
return params
def create_params_traj(self) -> Params0GaussTraj:
return Params0GaussTraj(
times=np.array([0.2,0.3,0.4,0.5,0.6,0.7]),
params0_traj=[
self.import_params(0.2),
self.import_params(0.3),
self.import_params(0.4),
self.import_params(0.5),
self.import_params(0.6),
self.import_params(0.7)
]
)
def test_params(self):
params = self.import_params(0.4)
def test_export(self):
pt = self.create_params_traj()
fname = "cache_params.txt"
pt.export(fname)
# import back
pt_back = Params0GaussTraj.fromFile(fname,nv=self.nv)
# Check
assert len(pt.params0_traj) == len(pt_back.params0_traj)
for i in range(0,len(pt.params0_traj)):
assert pt.params0_traj[i] == pt_back.params0_traj[i]
if os.path.exists(fname):
os.remove(fname)
def test_tf_input(self):
params = self.import_params(0.4)
input0 = params.get_tf_input(tpt=0)
tf.debugging.assert_equal(tf.constant(params.mu_v, dtype="float32"), input0["mu_v"].astype("float32"))
tf.debugging.assert_equal(tf.constant(params.chol_v, dtype="float32"), input0["chol_v"].astype("float32"))
pt = self.create_params_traj()
inputs = pt.get_tf_inputs()
assert len(inputs["mu_v"]) == len(pt.times)-1
for i in range(0,len(inputs["mu_v"])):
tf.debugging.assert_equal(tf.constant(pt.params0_traj[i].mu_v, dtype="float32"), inputs["mu_v"][i].astype("float32"))
tf.debugging.assert_equal(tf.constant(pt.params0_traj[i].chol_v, dtype="float32"), inputs["chol_v"][i].astype("float32")) | [
"os.remove",
"physDBD.Params0Gauss.fromData",
"os.path.exists",
"physDBD.Params0GaussTraj.fromFile",
"physDBD.ImportHelper.import_gillespie_ssa_at_time",
"tensorflow.constant",
"numpy.array"
] | [((447, 545), 'physDBD.ImportHelper.import_gillespie_ssa_at_time', 'ImportHelper.import_gillespie_ssa_at_time', ([], {'fnames': 'self.fnames', 'time': 'time', 'species': 'self.species'}), '(fnames=self.fnames, time=time,\n species=self.species)\n', (488, 545), False, 'from physDBD import Params0Gauss, ImportHelper, Params0GaussTraj\n'), ((606, 633), 'physDBD.Params0Gauss.fromData', 'Params0Gauss.fromData', (['data'], {}), '(data)\n', (627, 633), False, 'from physDBD import Params0Gauss, ImportHelper, Params0GaussTraj\n'), ((1340, 1384), 'physDBD.Params0GaussTraj.fromFile', 'Params0GaussTraj.fromFile', (['fname'], {'nv': 'self.nv'}), '(fname, nv=self.nv)\n', (1365, 1384), False, 'from physDBD import Params0Gauss, ImportHelper, Params0GaussTraj\n'), ((1591, 1612), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (1605, 1612), False, 'import os\n'), ((1626, 1642), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (1635, 1642), False, 'import os\n'), ((1801, 1842), 'tensorflow.constant', 'tf.constant', (['params.mu_v'], {'dtype': '"""float32"""'}), "(params.mu_v, dtype='float32')\n", (1812, 1842), True, 'import tensorflow as tf\n'), ((1912, 1955), 'tensorflow.constant', 'tf.constant', (['params.chol_v'], {'dtype': '"""float32"""'}), "(params.chol_v, dtype='float32')\n", (1923, 1955), True, 'import tensorflow as tf\n'), ((762, 802), 'numpy.array', 'np.array', (['[0.2, 0.3, 0.4, 0.5, 0.6, 0.7]'], {}), '([0.2, 0.3, 0.4, 0.5, 0.6, 0.7])\n', (770, 802), True, 'import numpy as np\n'), ((2217, 2270), 'tensorflow.constant', 'tf.constant', (['pt.params0_traj[i].mu_v'], {'dtype': '"""float32"""'}), "(pt.params0_traj[i].mu_v, dtype='float32')\n", (2228, 2270), True, 'import tensorflow as tf\n'), ((2347, 2402), 'tensorflow.constant', 'tf.constant', (['pt.params0_traj[i].chol_v'], {'dtype': '"""float32"""'}), "(pt.params0_traj[i].chol_v, dtype='float32')\n", (2358, 2402), True, 'import tensorflow as tf\n')] |
import csv
import cv2
import numpy as np
import pandas as pd
"""
转换 SCUT-FBP55000_v2 数据集到csv格式
参考:
https://bbs.huaweicloud.com/blogs/detail/278704
https://github.com/spytensor/prepare_detection_dataset
pip install opencv-python
wget https://raw.githubusercontent.com/opencv/opencv/master/samples/dnn/face_detector/deploy.prototxt
wget https://raw.githubusercontent.com/opencv/opencv_3rdparty/dnn_samples_face_detector_20180205_fp16/res10_300x300_ssd_iter_140000_fp16.caffemodel
/path/to/image,xmin,ymin,xmax,ymax,label
/mfs/dataset/face/0d4c5e4f-fc3c-4d5a-906c-105.jpg,450,154,754,341,face
"""
face_cascade = cv2.CascadeClassifier("haarcascade_fontalface_default.xml")
prototxt_path = "/opt/opencv/deploy.prototxt"
model_path = "/opt/opencv/res10_300x300_ssd_iter_140000_fp16.caffemodel"
cv_model = cv2.dnn.readNetFromCaffe(prototxt_path, model_path)
image_path = "/opt/data/SCUT-FBP5500_v2/Images/train/face/"
df_label = pd.read_csv("/opt/data/SCUT-FBP5500_v2/All_Ratings.csv")
df_label = df_label.groupby("Filename").mean()
# 先仅处理女性(女性和男性的美差异较大)
df_label = df_label[df_label["Filename"].str.find("F")>=0]
def gen_label(se):
global image_path, cv_model
image = cv2.imread(image_path + str(se["Filename"]))
h, w = image.shape[:2]
start_x=start_y=0
end_x = w
end_y = h
# 注意:这里 104.0, 177.0, 123.0 表示b-104,g-177,r-123
# 这里实际应减去数人脸据集的图像均值而不是当前图像均值来对图像进行归一化
blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300),(104.0, 177.0, 123.0))
cv_model.setInput(blob)
output = np.squeeze(cv_model.forward())
if len(output)>0:
# 选取最大置信度的结果
max_box = output[0, 3:7] * np.array([w, h, w, h])
max_confidence = output[0, 2]
for i in range(0, output.shape[0]):
confidence = output[i, 2]
if confidence > max_confidence:
max_box = output[i, 3:7] * np.array([w, h, w, h])
max_confidence = confidence
start_x, start_y, end_x, end_y = max_box.astype(int)
line_str = image_path + str(se["Filename"]) + "," + str(start_x) + "," + str(start_y) + "," + str(end_x) + "," + str(end_y) + "," + str(se["Rating"])
return line_str
if __name__=="__main__":
df_label["csv_str"] = df_label.apply(gen_label, axis=1)
df_label["csv_str"].drop_duplicates().to_csv("/opt/data/SCUT-FBP5500_v2/Images/train/labels.csv", index=False, header=False, quoting=csv.QUOTE_NONE)
print("gen label finished.")
| [
"pandas.read_csv",
"cv2.dnn.blobFromImage",
"numpy.array",
"cv2.dnn.readNetFromCaffe",
"cv2.CascadeClassifier"
] | [((612, 671), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_fontalface_default.xml"""'], {}), "('haarcascade_fontalface_default.xml')\n", (633, 671), False, 'import cv2\n'), ((802, 853), 'cv2.dnn.readNetFromCaffe', 'cv2.dnn.readNetFromCaffe', (['prototxt_path', 'model_path'], {}), '(prototxt_path, model_path)\n', (826, 853), False, 'import cv2\n'), ((926, 982), 'pandas.read_csv', 'pd.read_csv', (['"""/opt/data/SCUT-FBP5500_v2/All_Ratings.csv"""'], {}), "('/opt/data/SCUT-FBP5500_v2/All_Ratings.csv')\n", (937, 982), True, 'import pandas as pd\n'), ((1403, 1471), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['image', '(1.0)', '(300, 300)', '(104.0, 177.0, 123.0)'], {}), '(image, 1.0, (300, 300), (104.0, 177.0, 123.0))\n', (1424, 1471), False, 'import cv2\n'), ((1621, 1643), 'numpy.array', 'np.array', (['[w, h, w, h]'], {}), '([w, h, w, h])\n', (1629, 1643), True, 'import numpy as np\n'), ((1851, 1873), 'numpy.array', 'np.array', (['[w, h, w, h]'], {}), '([w, h, w, h])\n', (1859, 1873), True, 'import numpy as np\n')] |
import numpy as np
from PolicyEvaluation import policy_eval
def policy_improvement(env, discount_factor=1.0):
"""
Policy Improvement Algorithm. Iteratively evaluates and improves a policy
until an optimal policy is found.
Args:
env: The OpenAI envrionment.
policy_eval_fn: Policy Evaluation function that takes 3 arguments:
policy, env, discount_factor.
discount_factor: Lambda discount factor.
Returns:
A tuple (policy, V).
policy is the optimal policy, a matrix of shape [S, A] where each state s
contains a valid probability distribution over actions.
V is the value function for the optimal policy.
"""
# Start with a random policy
Policy = np.ones([env.nS, env.nA]) / env.nA
V = policy_eval(Policy, env, theta=0.01)
while True:
for StateIdx, StateName in enumerate(env.P.keys()):
StateInfo = env.P[StateName]
ActionValues = np.zeros(env.nA)
for ActionIdx, ActionName in enumerate(StateInfo.keys()):
# For now assume that all probabilities are 1
ActionInfo = StateInfo[ActionName][0]
Reward = ActionInfo[2]
NextState = ActionInfo[1]
NextStateValue = V[NextState]
ActionValues[ActionIdx] = Reward + discount_factor*NextStateValue
MaxValueIdx = np.argmax(ActionValues)
Policy[StateIdx,:] = 0
Policy[StateIdx,MaxValueIdx] = 1
VNew = policy_eval(Policy, env, theta=0.01)
if np.all(VNew==V):
V = VNew
break
else:
V = VNew
return Policy, V
| [
"numpy.argmax",
"numpy.zeros",
"numpy.ones",
"PolicyEvaluation.policy_eval",
"numpy.all"
] | [((813, 849), 'PolicyEvaluation.policy_eval', 'policy_eval', (['Policy', 'env'], {'theta': '(0.01)'}), '(Policy, env, theta=0.01)\n', (824, 849), False, 'from PolicyEvaluation import policy_eval\n'), ((770, 795), 'numpy.ones', 'np.ones', (['[env.nS, env.nA]'], {}), '([env.nS, env.nA])\n', (777, 795), True, 'import numpy as np\n'), ((1611, 1647), 'PolicyEvaluation.policy_eval', 'policy_eval', (['Policy', 'env'], {'theta': '(0.01)'}), '(Policy, env, theta=0.01)\n', (1622, 1647), False, 'from PolicyEvaluation import policy_eval\n'), ((1659, 1676), 'numpy.all', 'np.all', (['(VNew == V)'], {}), '(VNew == V)\n', (1665, 1676), True, 'import numpy as np\n'), ((997, 1013), 'numpy.zeros', 'np.zeros', (['env.nA'], {}), '(env.nA)\n', (1005, 1013), True, 'import numpy as np\n'), ((1483, 1506), 'numpy.argmax', 'np.argmax', (['ActionValues'], {}), '(ActionValues)\n', (1492, 1506), True, 'import numpy as np\n')] |
import requests, zipfile, io, os, re
import pandas as pd
import numpy as np
import geopandas, astral
import time
from astral.sun import sun
import tabulate
METEO_FOLDER = r"C:/Users/48604/Documents/semestr5/PAG/pag2/Meteo/"
ZAPIS_ZIP = METEO_FOLDER + r"Meteo_"
url = "https://dane.imgw.pl/datastore/getfiledown/Arch/Telemetria/Meteo/2015/Meteo_2015-07.zip"
#!
#change: METEO_FOLDER, url
def get_data(url, pth):
file = requests.get(url)
zip = zipfile.ZipFile(io.BytesIO(file.content))
#download zip from IMGW archive
url_end = url[-4:]
#later checking if file ends with .zip or .ZIP
pattern = "Meteo_(.*?)" + url_end
substring = re.search(pattern, url).group(1)
#pattern matching in order to name new dir properly
path = pth + substring + "/"
#path to dir with data from specified period
if os.path.isdir(path) == 0:
os.mkdir(path)
zip.extractall(path)
#creating dir if it doesnt exist and unpacking data
return path
path_data = get_data(url, ZAPIS_ZIP)
path_parametry = METEO_FOLDER + "kody_parametr.csv"
path_effacility = METEO_FOLDER + "effacility.geojson"
path_powiaty = METEO_FOLDER + "powiaty/powiaty.shp"
path_wojewodztwa = METEO_FOLDER + "woj/woj.shp"
def read_parametry(path_parametr):
parametr = pd.read_csv(path_parametr, sep=';', index_col=False, encoding='cp1250')
#separator=';' - by default ','
#index_col=False - store all data as columns not indexes
return parametr
#function to read parameters from the path_parametr file
def read_data(path_data):
fields = ["KodSH", "ParametrSH", "Date", "Wartosc"]
data = {}
#column names; empty dictionary for data from separate csv files in folder
for filename in os.listdir(path_data):
#for every file in folder
dataset_name = pd.read_csv(path_data + filename, sep=';', header=None, names=fields, index_col=False, low_memory=False, dtype={'KodSH': int, 'Wartosc': str}, parse_dates=['Date'])
#applying value
#separator=';' - by default ','
#no header by default
#names=fields - column names
#index_col=False - store all data as columns not indexes
#low_memory=false - way to get rid of different datatypes in columns warning
dataset_name["Wartosc"] = dataset_name["Wartosc"].str.replace(',','.').astype('float64')
#replace ',' with '.' and convert string to float
dataset_name["Date"] = dataset_name["Date"].dt.tz_localize("Europe/Warsaw")
#setting "Data" column to datetime64[ns, Europe/Warsaw] from datetime64[ns]
data[filename] = dataset_name
return data
#function to read data from the path_data file
def read_effacility(path_effacility):
path = open(path_effacility)
effacility = geopandas.read_file(path)
#read geojson
effacility["geometry"] = effacility["geometry"].to_crs(epsg=4258)
x = effacility["geometry"].x
y = effacility["geometry"].y
data = {"KodSH" : effacility["name"], "City" : effacility["name1"], "Lon" : x, "Lat" : y}
effacility = pd.DataFrame(data)
effacility["KodSH"] = effacility["KodSH"].astype('float64')
#store KodSH as number not string
return effacility
def f_init_mean(data):
init_mean = {}
for key in data:
init_mean[key] = data[key].groupby(["KodSH", data[key]["Date"].dt.date])["Wartosc"].mean()
init_mean[key] = init_mean[key].to_frame()
init_mean[key].drop(columns = ["Wartosc"], inplace=True)
return init_mean
def f_sun_info(init_mean, effacility):
sun_info = {}
for key in init_mean:
init_mean[key] = init_mean[key].reset_index("Date")
#Date as a non index value
#init_mean[key] = init_mean[key].drop(["24h"], axis=1)
sun_info[key] = pd.merge(init_mean[key], effacility, on = "KodSH", how = "left")
astral_info = {}
for key in sun_info:
shp = sun_info[key].shape[0]
Dawn = list(range(shp))
Dusk = list(range(shp))
for k in sun_info[key].index:
City = astral.LocationInfo(sun_info[key]["City"][k],"Poland", "Europe/Warsaw", sun_info[key]["Lat"][k], sun_info[key]["Lon"][k])
Dawn[k] = (sun(City.observer, date=sun_info[key]["Date"][k], tzinfo=City.timezone))["dawn"]
Dusk[k] = (sun(City.observer, date=sun_info[key]["Date"][k], tzinfo=City.timezone))["dusk"]
data = {"KodSH" : sun_info[key]["KodSH"], "Dawn" : Dawn ,"Dusk" : Dusk}
astral_info[key] = pd.DataFrame(data)
sun_info[key] = pd.merge(sun_info[key], astral_info[key], left_index=True, right_index=True)
sun_info[key].drop(["KodSH_y"], axis=1, inplace=True)
sun_info[key].rename(columns = {"KodSH_x" : "KodSH", "Date" : "Date"}, inplace=True)
sun_info[key]["Date"] = pd.to_datetime(sun_info[key]["Date"]).dt.tz_localize("Europe/Warsaw")
return sun_info
def f_day_night(data, sun_info):
day_night = {}
for key in data:
date_time = data[key]["Date"]
#save old datetime
data[key]["Date"] = data[key]["Date"].dt.date
#trim Date of time, which is necessary to merge(unwanted conv from datetime64 to object)
data[key]["Date"] = pd.to_datetime(data[key]["Date"]).dt.tz_localize("Europe/Warsaw")
#conversion from object to datetime64
day_night[key] = pd.merge(data[key], sun_info[key], on=["KodSH", "Date"], how="inner")
#merging data with info about dusk and dawn
data[key].drop(["Date"], axis=1, inplace=True)
data[key].insert(2, "Date", date_time)
day_night[key].drop(["Date"], axis=1, inplace=True)
day_night[key].insert(2, "Date", date_time)
#bringing back proper "Date" VALUE
day_night[key]["day/night"] = np.where((day_night[key]["Date"] >= day_night[key]["Dawn"]) & (day_night[key]["Date"] < day_night[key]["Dusk"]), 1, 0)
#add column which determins if its day or night
return day_night
def f_analysis_basic(sun_info, day_night):
analysis_basic = {}
mean = {}
mean_day = {}
mean_night = {}
median = {}
median_day = {}
median_night = {}
for key in day_night:
mean[key] = day_night[key].groupby(["KodSH", day_night[key]["Date"].dt.date, day_night[key]["day/night"]], dropna=False)["Wartosc"].mean()
mean[key].to_frame
mean[key] = mean[key].reset_index()
#mean group by
median[key] = day_night[key].groupby(["KodSH", day_night[key]["Date"].dt.date, day_night[key]["day/night"]], dropna=False)["Wartosc"].median()
median[key].to_frame
median[key] = median[key].reset_index()
#median geoup by
mean_day[key] = mean[key][mean[key]["day/night"] != 0]
mean_night[key] = mean[key][mean[key]["day/night"] != 1]
median_day[key] = median[key][median[key]["day/night"] != 0]
median_night[key] = median[key][median[key]["day/night"] != 1]
#selecting values for different time of day(loss of nan data)
mean_day[key] = sun_info[key].merge(mean_day[key], how="left", right_on=["KodSH", "Date"], left_on=["KodSH", sun_info[key]["Date"].dt.date])
mean_night[key] = sun_info[key].merge(mean_night[key], how="left", right_on=["KodSH", "Date"], left_on=["KodSH", sun_info[key]["Date"].dt.date])
median_day[key] = sun_info[key].merge(median_day[key], how="left", right_on=["KodSH", "Date"], left_on=["KodSH", sun_info[key]["Date"].dt.date])
median_night[key] = sun_info[key].merge(median_night[key], how="left", right_on=["KodSH", "Date"], left_on=["KodSH", sun_info[key]["Date"].dt.date])
#bring nan data back
mean_day[key].drop(["Date_x", "Dawn", "Dusk", "Date_y", "day/night"], axis=1, inplace=True)
mean_night[key].drop(["Date_x", "Dawn", "Dusk", "Date_y", "day/night"], axis=1, inplace=True)
median_day[key].drop(["Date_x", "Dawn", "Dusk", "Date_y", "day/night"], axis=1, inplace=True)
median_night[key].drop(["Date_x", "Dawn", "Dusk", "Date_y", "day/night"], axis=1, inplace=True)
mean_day[key].rename(columns = {"Wartosc" : "Mean_day"}, inplace=True)
mean_night[key].rename(columns = {"Wartosc" : "Mean_night"}, inplace=True)
median_day[key].rename(columns = {"Wartosc" : "Median_day"}, inplace=True)
median_night[key].rename(columns = {"Wartosc" : "Median_night"}, inplace=True)
#basic dataframe maintenance
mean_day[key] = pd.concat([mean_day[key], mean_night[key]["Mean_night"], median_day[key]["Median_day"], median_night[key]["Median_night"]], axis=1)
analysis_basic[key] = mean_day[key]
return analysis_basic
def f_analysis_trim(sun_info, day_night):
analysis_trim = {}
return analysis_trim
def f_display_analysis(analysis_basic):
hdrs = ["KodSH", "Date", "City", "Lon", "Lat", "Mean value day", "Mean value night", "Median value day", "Median value night"]
for key in analysis_basic:
table = tabulate(analysis_basic[key], headers = hdrs, tablefmt = 'psql')
result = open("analysis_basic_" + key[:15] + ".txt", "w")
result.write(table)
result.close()
def read_powiaty(path_powiaty):
powiaty = geopandas.read_file(path_powiaty)
powiaty["geometry"] = powiaty["geometry"].to_crs(epsg=4258)
data = {"Powiat" : powiaty["name"], "geometry" : powiaty["geometry"]}
powiaty = geopandas.GeoDataFrame(data)
return powiaty
def read_wojewodztwa(path_wojewodztwa):
wojewodztwa = geopandas.read_file(path_wojewodztwa)
wojewodztwa["geometry"] = wojewodztwa["geometry"].to_crs(epsg=4258)
data = {"Wojewodztwo" : wojewodztwa["name"], "geometry" : wojewodztwa["geometry"]}
wojewodztwa = geopandas.GeoDataFrame(data)
return wojewodztwa
def f_merge_stacje_powiaty(effacility, powiaty):
stacje_powiaty = effacility
stacje_powiaty = geopandas.GeoDataFrame(stacje_powiaty, crs="EPSG:4258", geometry=geopandas.points_from_xy(stacje_powiaty["Lon"], stacje_powiaty["Lat"]))
stacje_powiaty = stacje_powiaty.sjoin(powiaty, how="inner", predicate="within")
stacje_powiaty.drop(["geometry"], axis=1, inplace=True)
data = {"KodSH" : stacje_powiaty["KodSH"], "Powiat" : stacje_powiaty["Powiat"]}
stacje_powiaty = pd.DataFrame(data)
return stacje_powiaty
def f_merge_stacje_wojewodztwa(effacility, wojewodztwa):
stacje_woj = effacility
stacje_woj = geopandas.GeoDataFrame(stacje_woj, crs="EPSG:4258", geometry=geopandas.points_from_xy(stacje_woj["Lon"], stacje_woj["Lat"]))
stacje_woj = stacje_woj.sjoin(wojewodztwa, how="inner", predicate="within")
stacje_woj.drop(["geometry"], axis=1, inplace=True)
data = {"KodSH" : stacje_woj["KodSH"], "Wojewodztwo" : stacje_woj["Wojewodztwo"]}
stacje_woj = pd.DataFrame(data)
return stacje_woj
def f_which_powiat(analysis_basic, stacje_powiaty):
which_powiat = analysis_basic
for key in which_powiat:
which_powiat[key] = pd.merge(which_powiat[key], stacje_powiaty, on=["KodSH"], how="left", right_index=False)
return which_powiat
def f_analysis_basic_powiat(analysis_basic, which_powiat):
analysis_basic_powiat = {}
for key in analysis_basic:
analysis_basic_powiat[key] = analysis_basic[key].groupby(["Date", "Powiat"])[["Mean_day", "Mean_night", "Median_day", "Median_night"]].mean()
analysis_basic_powiat[key] = analysis_basic_powiat[key].reset_index()
return analysis_basic_powiat
def f_which_woj(analysis_basic, stacje_woj):
which_woj = analysis_basic
for key in which_woj:
which_woj[key] = pd.merge(which_woj[key], stacje_woj, on=["KodSH"], how="left", right_index=False)
return which_woj
def f_analysis_basic_woj(analysis_basic, which_woj):
analysis_basic_woj = {}
for key in analysis_basic:
analysis_basic_woj[key] = analysis_basic[key].groupby(["Date", "Wojewodztwo"])[["Mean_day", "Mean_night", "Median_day", "Median_night"]].mean()
analysis_basic_woj[key] = analysis_basic_woj[key].reset_index()
return analysis_basic_woj
def f_wykres_powiat(analysis_basic_powiat, powiat):
wykres_data = {}
for p in powiat:
for key in analysis_basic_powiat:
data = analysis_basic_powiat[key].loc[analysis_basic_powiat[key]["Powiat"] == p].copy(deep=True)
if data.empty == False:
data["Date"] = pd.to_datetime(data["Date"])
data.index = data["Date"].dt.day
data.drop(["Date"], axis=1, inplace=True)
data.plot(ylabel="Values", title=p)
wykres_data[key] = data
return wykres_data
def f_wykres_woj(analysis_basic_woj, woj):
wykres_data = {}
for w in woj:
for key in analysis_basic_woj:
data = analysis_basic_woj[key].loc[analysis_basic_woj[key]["Wojewodztwo"] == w].copy(deep=True)
if data.empty == False:
data["Date"] = pd.to_datetime(data["Date"])
data.index = data["Date"].dt.day
data.drop(["Date"], axis=1, inplace=True)
data.plot(xlabel="Dzień miesiąca", ylabel="Wartosci", title=w + " " + key)
wykres_data[key] = data
return wykres_data
def main():
start_time = time.time()
parametry = read_parametry(path_parametry)
data = read_data(path_data)
effacility = read_effacility(path_effacility)
init_mean = f_init_mean(data)
sun_info = f_sun_info(init_mean, effacility)
day_night = f_day_night(data, sun_info)
analysis_basic = f_analysis_basic(sun_info, day_night)
analysis_trim = f_analysis_trim(sun_info, day_night)
# f_display_analysis(analysis_basic)
powiaty = read_powiaty(path_powiaty)
wojewodztwa = read_wojewodztwa(path_wojewodztwa)
stacje_powiaty = f_merge_stacje_powiaty(effacility, powiaty)
stacje_woj = f_merge_stacje_wojewodztwa(effacility, wojewodztwa)
which_powiat = f_which_powiat(analysis_basic, stacje_powiaty)
analysis_basic_powiat = f_analysis_basic_powiat(analysis_basic, which_powiat)
which_woj = f_which_woj(analysis_basic, stacje_woj)
analysis_basic_woj = f_analysis_basic_woj(analysis_basic, which_woj)
#wykres_powiat = f_wykres_powiat(analysis_basic_woj, ["brzeziński"])
wykres_woj = f_wykres_woj(analysis_basic_woj, ["łódzkie"])
print("--- %s seconds ---" % (time.time() - start_time))
return 0
if __name__ == "__main__":
main() | [
"os.mkdir",
"pandas.read_csv",
"astral.sun.sun",
"astral.LocationInfo",
"pandas.DataFrame",
"pandas.merge",
"geopandas.GeoDataFrame",
"requests.get",
"re.search",
"pandas.concat",
"geopandas.read_file",
"io.BytesIO",
"pandas.to_datetime",
"os.listdir",
"tabulate",
"os.path.isdir",
"t... | [((440, 457), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (452, 457), False, 'import requests, zipfile, io, os, re\n'), ((1343, 1414), 'pandas.read_csv', 'pd.read_csv', (['path_parametr'], {'sep': '""";"""', 'index_col': '(False)', 'encoding': '"""cp1250"""'}), "(path_parametr, sep=';', index_col=False, encoding='cp1250')\n", (1354, 1414), True, 'import pandas as pd\n'), ((1811, 1832), 'os.listdir', 'os.listdir', (['path_data'], {}), '(path_data)\n', (1821, 1832), False, 'import requests, zipfile, io, os, re\n'), ((2913, 2938), 'geopandas.read_file', 'geopandas.read_file', (['path'], {}), '(path)\n', (2932, 2938), False, 'import geopandas, astral\n'), ((3210, 3228), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (3222, 3228), True, 'import pandas as pd\n'), ((9553, 9586), 'geopandas.read_file', 'geopandas.read_file', (['path_powiaty'], {}), '(path_powiaty)\n', (9572, 9586), False, 'import geopandas, astral\n'), ((9742, 9770), 'geopandas.GeoDataFrame', 'geopandas.GeoDataFrame', (['data'], {}), '(data)\n', (9764, 9770), False, 'import geopandas, astral\n'), ((9853, 9890), 'geopandas.read_file', 'geopandas.read_file', (['path_wojewodztwa'], {}), '(path_wojewodztwa)\n', (9872, 9890), False, 'import geopandas, astral\n'), ((10071, 10099), 'geopandas.GeoDataFrame', 'geopandas.GeoDataFrame', (['data'], {}), '(data)\n', (10093, 10099), False, 'import geopandas, astral\n'), ((10625, 10643), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (10637, 10643), True, 'import pandas as pd\n'), ((11146, 11164), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (11158, 11164), True, 'import pandas as pd\n'), ((13670, 13681), 'time.time', 'time.time', ([], {}), '()\n', (13679, 13681), False, 'import time\n'), ((486, 510), 'io.BytesIO', 'io.BytesIO', (['file.content'], {}), '(file.content)\n', (496, 510), False, 'import requests, zipfile, io, os, re\n'), ((883, 902), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (896, 902), False, 'import requests, zipfile, io, os, re\n'), ((918, 932), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (926, 932), False, 'import requests, zipfile, io, os, re\n'), ((1893, 2065), 'pandas.read_csv', 'pd.read_csv', (['(path_data + filename)'], {'sep': '""";"""', 'header': 'None', 'names': 'fields', 'index_col': '(False)', 'low_memory': '(False)', 'dtype': "{'KodSH': int, 'Wartosc': str}", 'parse_dates': "['Date']"}), "(path_data + filename, sep=';', header=None, names=fields,\n index_col=False, low_memory=False, dtype={'KodSH': int, 'Wartosc': str},\n parse_dates=['Date'])\n", (1904, 2065), True, 'import pandas as pd\n'), ((3950, 4010), 'pandas.merge', 'pd.merge', (['init_mean[key]', 'effacility'], {'on': '"""KodSH"""', 'how': '"""left"""'}), "(init_mean[key], effacility, on='KodSH', how='left')\n", (3958, 4010), True, 'import pandas as pd\n'), ((4673, 4691), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (4685, 4691), True, 'import pandas as pd\n'), ((4717, 4793), 'pandas.merge', 'pd.merge', (['sun_info[key]', 'astral_info[key]'], {'left_index': '(True)', 'right_index': '(True)'}), '(sun_info[key], astral_info[key], left_index=True, right_index=True)\n', (4725, 4793), True, 'import pandas as pd\n'), ((5563, 5632), 'pandas.merge', 'pd.merge', (['data[key]', 'sun_info[key]'], {'on': "['KodSH', 'Date']", 'how': '"""inner"""'}), "(data[key], sun_info[key], on=['KodSH', 'Date'], how='inner')\n", (5571, 5632), True, 'import pandas as pd\n'), ((5989, 6112), 'numpy.where', 'np.where', (["((day_night[key]['Date'] >= day_night[key]['Dawn']) & (day_night[key][\n 'Date'] < day_night[key]['Dusk']))", '(1)', '(0)'], {}), "((day_night[key]['Date'] >= day_night[key]['Dawn']) & (day_night[\n key]['Date'] < day_night[key]['Dusk']), 1, 0)\n", (5997, 6112), True, 'import numpy as np\n'), ((8771, 8907), 'pandas.concat', 'pd.concat', (["[mean_day[key], mean_night[key]['Mean_night'], median_day[key]['Median_day'\n ], median_night[key]['Median_night']]"], {'axis': '(1)'}), "([mean_day[key], mean_night[key]['Mean_night'], median_day[key][\n 'Median_day'], median_night[key]['Median_night']], axis=1)\n", (8780, 8907), True, 'import pandas as pd\n'), ((9314, 9374), 'tabulate', 'tabulate', (['analysis_basic[key]'], {'headers': 'hdrs', 'tablefmt': '"""psql"""'}), "(analysis_basic[key], headers=hdrs, tablefmt='psql')\n", (9322, 9374), False, 'import tabulate\n'), ((11337, 11429), 'pandas.merge', 'pd.merge', (['which_powiat[key]', 'stacje_powiaty'], {'on': "['KodSH']", 'how': '"""left"""', 'right_index': '(False)'}), "(which_powiat[key], stacje_powiaty, on=['KodSH'], how='left',\n right_index=False)\n", (11345, 11429), True, 'import pandas as pd\n'), ((11978, 12064), 'pandas.merge', 'pd.merge', (['which_woj[key]', 'stacje_woj'], {'on': "['KodSH']", 'how': '"""left"""', 'right_index': '(False)'}), "(which_woj[key], stacje_woj, on=['KodSH'], how='left', right_index=\n False)\n", (11986, 12064), True, 'import pandas as pd\n'), ((689, 712), 're.search', 're.search', (['pattern', 'url'], {}), '(pattern, url)\n', (698, 712), False, 'import requests, zipfile, io, os, re\n'), ((4232, 4358), 'astral.LocationInfo', 'astral.LocationInfo', (["sun_info[key]['City'][k]", '"""Poland"""', '"""Europe/Warsaw"""', "sun_info[key]['Lat'][k]", "sun_info[key]['Lon'][k]"], {}), "(sun_info[key]['City'][k], 'Poland', 'Europe/Warsaw',\n sun_info[key]['Lat'][k], sun_info[key]['Lon'][k])\n", (4251, 4358), False, 'import geopandas, astral\n'), ((10300, 10370), 'geopandas.points_from_xy', 'geopandas.points_from_xy', (["stacje_powiaty['Lon']", "stacje_powiaty['Lat']"], {}), "(stacje_powiaty['Lon'], stacje_powiaty['Lat'])\n", (10324, 10370), False, 'import geopandas, astral\n'), ((10839, 10901), 'geopandas.points_from_xy', 'geopandas.points_from_xy', (["stacje_woj['Lon']", "stacje_woj['Lat']"], {}), "(stacje_woj['Lon'], stacje_woj['Lat'])\n", (10863, 10901), False, 'import geopandas, astral\n'), ((4378, 4449), 'astral.sun.sun', 'sun', (['City.observer'], {'date': "sun_info[key]['Date'][k]", 'tzinfo': 'City.timezone'}), "(City.observer, date=sun_info[key]['Date'][k], tzinfo=City.timezone)\n", (4381, 4449), False, 'from astral.sun import sun\n'), ((4483, 4554), 'astral.sun.sun', 'sun', (['City.observer'], {'date': "sun_info[key]['Date'][k]", 'tzinfo': 'City.timezone'}), "(City.observer, date=sun_info[key]['Date'][k], tzinfo=City.timezone)\n", (4486, 4554), False, 'from astral.sun import sun\n'), ((12781, 12809), 'pandas.to_datetime', 'pd.to_datetime', (["data['Date']"], {}), "(data['Date'])\n", (12795, 12809), True, 'import pandas as pd\n'), ((13342, 13370), 'pandas.to_datetime', 'pd.to_datetime', (["data['Date']"], {}), "(data['Date'])\n", (13356, 13370), True, 'import pandas as pd\n'), ((14832, 14843), 'time.time', 'time.time', ([], {}), '()\n', (14841, 14843), False, 'import time\n'), ((4984, 5021), 'pandas.to_datetime', 'pd.to_datetime', (["sun_info[key]['Date']"], {}), "(sun_info[key]['Date'])\n", (4998, 5021), True, 'import pandas as pd\n'), ((5424, 5457), 'pandas.to_datetime', 'pd.to_datetime', (["data[key]['Date']"], {}), "(data[key]['Date'])\n", (5438, 5457), True, 'import pandas as pd\n')] |
import logging
from collections import OrderedDict
import numpy as np
from .. import tools
logger = logging.getLogger(__name__)
CONVERTERS = OrderedDict()
@tools.profiling.timeing(f'{__name__}')
def list_converters():
st = ''
for name in CONVERTERS:
st += f'{name}:\n'
for backend in CONVERTERS[name]:
if backend == 'validator':
st += '├Validator> present\n'
else:
st += f'├Backend> {backend}\n'
return st
@tools.profiling.timeing(f'{__name__}')
def converter(name, backend):
'''Decorator to register function as a convert to a backend format
#TODO: add descripiton of backend load function here (what it should return ect)
'''
def converter_wrapper(func):
logger.debug(f'Registering converter from {name} to {backend} backend')
if name in CONVERTERS:
CONVERTERS[name][backend] = func
else:
CONVERTERS[name] = {backend: func}
return func
return converter_wrapper
@tools.profiling.timeing(f'{__name__}')
def converter_validator(name):
'''Decorator to register function as a converter validator
#TODO: add descripiton of backend load function here (what it should return ect)
'''
def converter_wrapper(func):
logger.debug(f'Registering validator for {name} backend')
if name in CONVERTERS:
CONVERTERS[name]['validator'] = func
else:
CONVERTERS[name] = {'validator': func}
return func
return converter_wrapper
def check_if_convertable(path):
'''Checks if the given path can be converted to a supported backend and returns the detected format, else returns None.
'''
for fmt, backends in CONVERTERS.items():
if 'validator' not in backends:
continue
validator = backends['validator']
if validator(path):
return fmt
return None
@tools.profiling.timeing(f'{__name__}')
def convert(input_files, output_location, backend=None, input_format=None, **kwargs):
'''Convert given list of input files or input file to a supported
backend format and returns the created data files.
'''
if len(input_files) == 0:
return []
files_created = None
if input_format is None:
format_found = False
for fmt, backends in CONVERTERS.items():
if 'validator' not in backends:
continue
validator = backends['validator']
valid = [validator(file) for file in input_files]
if np.any(valid):
sub_files = [input_files[x] for x in np.argwhere(valid).flatten()]
if backend is None:
use_backend = None
for key in backends:
if key == 'validator':
continue
use_backend = key
elif backend not in backends:
raise ValueError(
f'No converter to the given backend \
"{backend}" found for given input path type "{fmt}"\n \
Files affected: {sub_files}'
)
else:
use_backend = backend
logger.info(f'Converting {len(sub_files)} from {fmt} to {use_backend}...')
func = backends[use_backend]
files_created = func(sub_files, output_location, **kwargs)
format_found = True
break
else:
continue
if not format_found:
raise ValueError('No converter found for given input path')
else:
if input_format not in CONVERTERS:
raise ValueError(f'Given input format {input_format} has no converters')
backends = CONVERTERS[input_format]
if backend is None:
use_backend = None
for key in backends:
if key == 'validator':
continue
use_backend = key
else:
use_backend = backend
logger.info(f'Converting {len(input_files)} from {input_format} to {use_backend}...')
func = CONVERTERS[input_format][use_backend]
files_created = func(input_files, output_location, **kwargs)
return files_created
| [
"collections.OrderedDict",
"numpy.any",
"logging.getLogger",
"numpy.argwhere"
] | [((103, 130), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (120, 130), False, 'import logging\n'), ((145, 158), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (156, 158), False, 'from collections import OrderedDict\n'), ((2576, 2589), 'numpy.any', 'np.any', (['valid'], {}), '(valid)\n', (2582, 2589), True, 'import numpy as np\n'), ((2644, 2662), 'numpy.argwhere', 'np.argwhere', (['valid'], {}), '(valid)\n', (2655, 2662), True, 'import numpy as np\n')] |
""" This example replicates the behaviour of legacy code that creates
data in arrays and then convert them into matrices in order to use in linear
algebra algorithms.
Imagine this implementation hidden inside 10k > lines of code with very
little documentation. Using a function memory monitor you can map the
behaviour of the code and spot possible memory issues before they become
severe. Because narrow peaks will always appear when temporary data are
created even when there is no apparent memory error.
.. note::
Narrow peeks are the best place to look for correcting possible
memory issues in the usage of libraries such as numpy, scipy and
matplotlib.
"""
import argparse
import numpy as np
from pikos.api import memory_on_functions
@memory_on_functions()
def legacy(size):
b = np.mat(np.random.random(size).T)
# very bad example that makes copies of numpy arrays when converting them
# to matrix
a = np.matrix(np.random.random(size))
final = a * b
return final.I
@memory_on_functions()
def fixed(size):
# more appropriate way using a numpy.mat
b = np.mat(np.random.random(size).T)
a = np.mat(np.random.random(size))
final = a * b
return final.I
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--small',
action="store_true",
help='Use a smaller size for the data matrix '
'(default -- use large size).')
parser.add_argument(
'--fixed',
action="store_true",
help='Run the corrected code (default -- run the faulty code).')
args = parser.parse_args()
if args.small:
size = (1000, 5000)
else:
size = (1000, 20000)
if args.fixed:
fixed(size)
else:
legacy(size)
| [
"numpy.random.random",
"pikos.api.memory_on_functions",
"argparse.ArgumentParser"
] | [((760, 781), 'pikos.api.memory_on_functions', 'memory_on_functions', ([], {}), '()\n', (779, 781), False, 'from pikos.api import memory_on_functions\n'), ((1017, 1038), 'pikos.api.memory_on_functions', 'memory_on_functions', ([], {}), '()\n', (1036, 1038), False, 'from pikos.api import memory_on_functions\n'), ((1260, 1285), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1283, 1285), False, 'import argparse\n'), ((953, 975), 'numpy.random.random', 'np.random.random', (['size'], {}), '(size)\n', (969, 975), True, 'import numpy as np\n'), ((1157, 1179), 'numpy.random.random', 'np.random.random', (['size'], {}), '(size)\n', (1173, 1179), True, 'import numpy as np\n'), ((815, 837), 'numpy.random.random', 'np.random.random', (['size'], {}), '(size)\n', (831, 837), True, 'import numpy as np\n'), ((1116, 1138), 'numpy.random.random', 'np.random.random', (['size'], {}), '(size)\n', (1132, 1138), True, 'import numpy as np\n')] |
from model import Word2Vec, ScoringLayer, EmbeddingLayer
from utils import constructBagOfWordsInWindowSize, contextPairToOneHot, OneHotOfAllInVocab
from keras.callbacks import TensorBoard
from dataloader import tokenizeData, performTokenization
import argparse
import datetime
from numpy import save, load
from evaluation import getSimilarity, getSimilarityByEmbedding, getTenClosestWords, analogy, plotEmbeddingsIn2D
from collections import OrderedDict
def parse_args():
parser = argparse.ArgumentParser()
#optim config
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--batch_size', type=int, default=2000)
parser.add_argument('--optimizer', type=str, default="adam")
#Model config
parser.add_argument('--dim_embedding', type=int, default=100)
#evaluation
parser.add_argument('--mode', default="train", type=str)
#getSimilarity
parser.add_argument('--word1', type=str, default="window")
parser.add_argument('--word2', type=str, default="hoouse")
#getTenClosestWords
parser.add_argument('--word', type=str, default="window")
#analogy
parser.add_argument('--word1_', type=str, default="window")
parser.add_argument('--word2_', type=str, default="hoouse")
parser.add_argument('--word3_', type=str, default="door")
#wordIsInVocab
parser.add_argument('--word_', type=str)
args = parser.parse_args()
optim_config = OrderedDict([
('epochs', args.epochs),
('batch_size', args.batch_size),
('optimizer', args.optimizer)
])
model_config = OrderedDict([
('dim_embedding', args.dim_embedding)
])
evaluation_config = OrderedDict([
('word1', args.word1),
('word2', args.word2),
('word', args.word),
('word1_', args.word1_),
('word2_', args.word2_),
('word3_', args.word3_),
('word_', args.word_),
])
config = OrderedDict([
('optim_config', optim_config),
('evaluation_config', evaluation_config),
('model_config', model_config),
('mode', args.mode),
])
return config
config = parse_args()
model_config = config['model_config']
optim_config = config['optim_config']
evaluation_config = config['evaluation_config']
mode = config['mode']
# log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
# tensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=1)
tokenized_data = performTokenization()
context_tuple_list = constructBagOfWordsInWindowSize(tokenized_data)
oneHotNumpy, data = contextPairToOneHot(context_tuple_list, tokenized_data)
print("The total number of words in corpus size are: ",data["vocabSize"])
if(mode == "train"):
def train():
dimensionality_of_embeddings = model_config['dim_embedding']
optimizer = optim_config['optimizer']
epochs = optim_config['epochs']
batch_size = optim_config['batch_size']
model = Word2Vec(input_dim=data['vocabSize'], units = int(dimensionality_of_embeddings))
model.compile(loss='categorical_crossentropy',
optimizer= optimizer,
metrics= ['accuracy'])
model.fit(oneHotNumpy[:,0,:],oneHotNumpy[:,1,:],
epochs = epochs,
batch_size = batch_size)
emb = model.get_weights()[0]
save("word2vec_embeddings.npy",emb)
elif(mode == "help"):
print("$ python3 main.py --epochs 100 --optimizer \"adam\" --batch_size 2000 --dim_embedding 100\n")
print("$ python3 main.py --mode \"getSimilarity\" --word1 \"window\" --word2 \"house\"\n")
print("$ python3 main.py --mode \"getTenClosestWords\" --word \"window\"\n")
print("$ python3 main.py --mode \"analogy\" --word1_ \"window\" --word2_ \"house\" --word3_ \"door\"\n")
print("$ python3 main.py --mode \"plot\"")
print("$ python3 main.py --mode \"help\"")
print("$ python3 main.py --mode \"wordIsInVocab\" --word_ \"window\"")
else:
emb = load("embeddings.npy")
if(mode == "getSimilarity"):
word1 = evaluation_config['word1']
word2 = evaluation_config['word2']
print(getSimilarity(word1, word2, data, emb))
if(mode == "getTenClosestWords"):
word = evaluation_config['word']
print(getTenClosestWords(word, data['vocab'], data, emb))
if(mode == "analogy"):
word1_ = evaluation_config['word1_']
word2_ = evaluation_config['word2_']
word3_ = evaluation_config['word3_']
print(analogy(word1_, word2_, word3_, data, data['vocab'], emb))
if(mode == "wordIsInVocab"):
word_ = evaluation_config['word_']
vocabList = data['vocab'].tolist()
if word_ in vocabList:
print("YES")
else:
print("NO")
if(mode == "plot"):
plotEmbeddingsIn2D(emb, data)
| [
"numpy.load",
"numpy.save",
"utils.constructBagOfWordsInWindowSize",
"argparse.ArgumentParser",
"dataloader.performTokenization",
"evaluation.analogy",
"utils.contextPairToOneHot",
"evaluation.plotEmbeddingsIn2D",
"collections.OrderedDict",
"evaluation.getTenClosestWords",
"evaluation.getSimilar... | [((2478, 2499), 'dataloader.performTokenization', 'performTokenization', ([], {}), '()\n', (2497, 2499), False, 'from dataloader import tokenizeData, performTokenization\n'), ((2521, 2568), 'utils.constructBagOfWordsInWindowSize', 'constructBagOfWordsInWindowSize', (['tokenized_data'], {}), '(tokenized_data)\n', (2552, 2568), False, 'from utils import constructBagOfWordsInWindowSize, contextPairToOneHot, OneHotOfAllInVocab\n'), ((2589, 2644), 'utils.contextPairToOneHot', 'contextPairToOneHot', (['context_tuple_list', 'tokenized_data'], {}), '(context_tuple_list, tokenized_data)\n', (2608, 2644), False, 'from utils import constructBagOfWordsInWindowSize, contextPairToOneHot, OneHotOfAllInVocab\n'), ((487, 512), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (510, 512), False, 'import argparse\n'), ((1432, 1539), 'collections.OrderedDict', 'OrderedDict', (["[('epochs', args.epochs), ('batch_size', args.batch_size), ('optimizer',\n args.optimizer)]"], {}), "([('epochs', args.epochs), ('batch_size', args.batch_size), (\n 'optimizer', args.optimizer)])\n", (1443, 1539), False, 'from collections import OrderedDict\n'), ((1585, 1637), 'collections.OrderedDict', 'OrderedDict', (["[('dim_embedding', args.dim_embedding)]"], {}), "([('dim_embedding', args.dim_embedding)])\n", (1596, 1637), False, 'from collections import OrderedDict\n'), ((1681, 1868), 'collections.OrderedDict', 'OrderedDict', (["[('word1', args.word1), ('word2', args.word2), ('word', args.word), (\n 'word1_', args.word1_), ('word2_', args.word2_), ('word3_', args.word3_\n ), ('word_', args.word_)]"], {}), "([('word1', args.word1), ('word2', args.word2), ('word', args.\n word), ('word1_', args.word1_), ('word2_', args.word2_), ('word3_',\n args.word3_), ('word_', args.word_)])\n", (1692, 1868), False, 'from collections import OrderedDict\n'), ((1942, 2086), 'collections.OrderedDict', 'OrderedDict', (["[('optim_config', optim_config), ('evaluation_config', evaluation_config),\n ('model_config', model_config), ('mode', args.mode)]"], {}), "([('optim_config', optim_config), ('evaluation_config',\n evaluation_config), ('model_config', model_config), ('mode', args.mode)])\n", (1953, 2086), False, 'from collections import OrderedDict\n'), ((3379, 3415), 'numpy.save', 'save', (['"""word2vec_embeddings.npy"""', 'emb'], {}), "('word2vec_embeddings.npy', emb)\n", (3383, 3415), False, 'from numpy import save, load\n'), ((4014, 4036), 'numpy.load', 'load', (['"""embeddings.npy"""'], {}), "('embeddings.npy')\n", (4018, 4036), False, 'from numpy import save, load\n'), ((4848, 4877), 'evaluation.plotEmbeddingsIn2D', 'plotEmbeddingsIn2D', (['emb', 'data'], {}), '(emb, data)\n', (4866, 4877), False, 'from evaluation import getSimilarity, getSimilarityByEmbedding, getTenClosestWords, analogy, plotEmbeddingsIn2D\n'), ((4172, 4210), 'evaluation.getSimilarity', 'getSimilarity', (['word1', 'word2', 'data', 'emb'], {}), '(word1, word2, data, emb)\n', (4185, 4210), False, 'from evaluation import getSimilarity, getSimilarityByEmbedding, getTenClosestWords, analogy, plotEmbeddingsIn2D\n'), ((4307, 4357), 'evaluation.getTenClosestWords', 'getTenClosestWords', (['word', "data['vocab']", 'data', 'emb'], {}), "(word, data['vocab'], data, emb)\n", (4325, 4357), False, 'from evaluation import getSimilarity, getSimilarityByEmbedding, getTenClosestWords, analogy, plotEmbeddingsIn2D\n'), ((4537, 4594), 'evaluation.analogy', 'analogy', (['word1_', 'word2_', 'word3_', 'data', "data['vocab']", 'emb'], {}), "(word1_, word2_, word3_, data, data['vocab'], emb)\n", (4544, 4594), False, 'from evaluation import getSimilarity, getSimilarityByEmbedding, getTenClosestWords, analogy, plotEmbeddingsIn2D\n')] |
from pathlib import Path
import numpy as np
def _split_and_remove_whitespace(line):
return ' '.join(line.split()).split(' ')
def read_problem_specs(input_file):
input_file = Path(input_file)
assert input_file.exists(), "Input file: {} does not exist.".format(input_file)
file = open(input_file, 'r')
line = file.readline()
size, start, stop, cells = _split_and_remove_whitespace(line)
return float(size), float(start), float(stop), float(cells)
def build_mesh(size, start, stop, cells):
x = np.linspace(start, stop, cells)
y = np.linspace(start, stop, cells)
X, Y = np.meshgrid(x, y)
points = np.array(list(zip(X.flatten(), Y.flatten())))
return points
def main():
input_file = input("Enter path to input file: ")
size, start, stop, cells = read_problem_specs(input_file)
points = build_mesh(size, start, stop, cells)
np.savetxt("mesh.txt", points)
return 0
if __name__ == "__main__":
main()
| [
"numpy.savetxt",
"pathlib.Path",
"numpy.meshgrid",
"numpy.linspace"
] | [((185, 201), 'pathlib.Path', 'Path', (['input_file'], {}), '(input_file)\n', (189, 201), False, 'from pathlib import Path\n'), ((528, 559), 'numpy.linspace', 'np.linspace', (['start', 'stop', 'cells'], {}), '(start, stop, cells)\n', (539, 559), True, 'import numpy as np\n'), ((568, 599), 'numpy.linspace', 'np.linspace', (['start', 'stop', 'cells'], {}), '(start, stop, cells)\n', (579, 599), True, 'import numpy as np\n'), ((611, 628), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (622, 628), True, 'import numpy as np\n'), ((889, 919), 'numpy.savetxt', 'np.savetxt', (['"""mesh.txt"""', 'points'], {}), "('mesh.txt', points)\n", (899, 919), True, 'import numpy as np\n')] |
import os
import numpy as np
import sys, traceback
import pyqtgraph as pg
from matplotlib import cm
from scipy.stats import iqr
from collections import defaultdict
from PyQt5 import QtCore, QtWidgets, QtGui
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import QShortcut
from labelling.ui.css import css
from labelling.ui.layout_label import Ui_MainWindow
from lib.cmaps import default_cmap
from lib.labeling import save_pickle, load_pickle
from lib.windows import SEQUENCE_WINDOWS, window_numpy
from lib.labeling import get_studies_peter as get_studies
DATADIR_TO_LABEL = "./data/dicoms/test"
# LABEL_DIR = "./data/labels/test/"
if QtCore.QT_VERSION >= 0x50501:
def excepthook(type_, value, traceback_):
traceback.print_exception(type_, value, traceback_)
QtCore.qFatal('')
sys.excepthook = excepthook
LABELS = ('endo', 'epi', 'myo')
class MainWindowUI(Ui_MainWindow):
def __init__(self, mainwindow, data_root_dir=DATADIR_TO_LABEL):
super(MainWindowUI, self).__init__()
self.data_root_dir = data_root_dir
self.mainwindow = mainwindow
self.setupUi(mainwindow)
self.plotWidget.setAspectLocked(True)
self.imageItem = None
self.activelabel_name = LABELS[0]
self.i_window = 0
self.numpy_path = None
self.human_report_path = None
self.auto_report_path = None
self.img_array = None
self.roi_coords = defaultdict(list)
self.roi_selectors = {}
self.shortcuts = {}
self.labelbuttons = {'endo': self.pushButton_endo,
'epi': self.pushButton_epi,
'myo': self.pushButton_myo}
self.labelmode = 'add'
self.sequences = get_studies(self.data_root_dir)
self.refresh_studyselector()
self.comboBox_studies.activated.connect(self.load_study)
self.comboBox_sequences.activated.connect(self.load_sequence)
self.create_shortcuts()
def refresh_studyselector(self):
self.comboBox_studies.clear()
self.comboBox_studies.addItem("Please select a study")
n_reported_human, n_reported_auto = 0, 0
for sequence_id, sequence_dict in self.sequences.items():
self.comboBox_studies.addItem(sequence_id)
if sequence_dict['reported'] == 'human':
colour = 'green'
n_reported_human += 1
elif sequence_dict['reported'] == 'auto':
colour = 'yellow'
n_reported_auto += 1
elif sequence_dict['reported'] == 'no':
colour = 'white'
elif sequence_dict['reported'] == 'invalid':
colour = 'red'
else:
raise ValueError()
self.comboBox_studies.setItemData(self.comboBox_studies.count()-1, QtGui.QColor(colour), QtCore.Qt.BackgroundRole)
print(f"{n_reported_auto+n_reported_human} of {len(self.sequences)} reported ({n_reported_human} human labelled, {n_reported_auto} auto labelled)")
def load_study(self):
self.i_window = 0
self.comboBox_sequences.clear()
try:
sequence_id = self.comboBox_studies.currentText()
self.numpy_path = self.sequences[sequence_id]['numpy_path']
self.human_report_path = self.sequences[sequence_id]['human_report_path']
self.auto_report_path = self.sequences[sequence_id]['auto_report_path']
for i_seq, seq in enumerate(SEQUENCE_WINDOWS.keys()):
self.comboBox_sequences.addItem(f"{i_seq} - {seq} - {self.numpy_path}")
self.comboBox_sequences.setCurrentIndex(4) # Select T2w by default
self.roi_coords = self.load_coords()
print(f"self.roi_coords: {self.roi_coords}")
self.load_sequence()
self.activelabel_name = LABELS[0]
self.draw_buttons()
except KeyError as e: # Selected heading
print(f"exception: {e}")
def load_sequence(self):
i_seq, seq_name, numpy_path = self.comboBox_sequences.currentText().split(' - ', 2)
print(numpy_path)
i_seq = int(i_seq)
img_array = np.load(numpy_path)[:,:,i_seq]
img_array = img_array.T
img_array = np.flip(img_array, axis=1)
try:
windows_for_class = SEQUENCE_WINDOWS[seq_name]
window = windows_for_class[self.i_window % len(windows_for_class)]
window_centre = window['wc']
window_width = window['ww']
cmap = default_cmap
except (KeyError, TypeError):
# No wc/ww for this, so use median for wc and 2* IQR for WW
window_centre = np.median(img_array)
window_width = iqr(img_array) * 2
cmap = cm.gray
img_array = window_numpy(img_array,
window_centre=window_centre,
window_width=window_width,
cmap=cmap)
self.img_array = img_array
self.draw_image_and_rois()
def load_coords(self, report_path=None):
if report_path is None:
if (not os.path.exists(self.human_report_path)) and os.path.exists(self.auto_report_path):
report_path = self.auto_report_path # Only use the auto path if exists and human doesn't
else:
report_path = self.human_report_path
if os.path.exists(report_path):
return load_pickle(report_path)
else:
return defaultdict(list)
def create_shortcuts(self):
# Space -> Edit
shortcut_mode = QShortcut(QKeySequence("Space"), self.pushButton_mode)
shortcut_mode.activated.connect(lambda: self.action_changemode())
self.shortcuts['mode'] = shortcut_mode
# Numbers -> Labels
for i, label in enumerate(LABELS):
shortcut_key = f"{i + 1}"
shortcut = QShortcut(QKeySequence(shortcut_key), self.labelbuttons[label])
shortcut.activated.connect(lambda labelname=label: self.action_labelbutton(labelname))
self.shortcuts[label] = shortcut
# Up/down -> Change study
shortcut_prevstudy = QShortcut(QKeySequence("Up"), self.pushButton_prevstudy)
shortcut_prevstudy.activated.connect(lambda: self.action_changestudy(-1))
shortcut_nextstudy = QShortcut(QKeySequence("Down"), self.pushButton_nextstudy)
shortcut_nextstudy.activated.connect(lambda: self.action_changestudy(1))
# Left/right -> Change sequence
shortcut_prevseq = QShortcut(QKeySequence("Left"), self.pushButton_prevseq)
shortcut_prevseq.activated.connect(lambda: self.action_changeseq(-1))
shortcut_nextseq = QShortcut(QKeySequence("Right"), self.pushButton_nextseq)
shortcut_nextseq.activated.connect(lambda: self.action_changeseq(1))
# x -> change window
shortcut_changewindow = QShortcut(QKeySequence("X"), self.pushButton_changeWindow)
shortcut_changewindow.activated.connect(lambda: self.change_window())
# del -> invalidate
shortcut_invalidate = QShortcut(QKeySequence("Del"), self.pushButton_invalidate)
shortcut_invalidate.activated.connect(lambda: self.invalidate())
@pyqtSlot()
def action_changestudy(self, changeby):
current_id = self.comboBox_studies.currentIndex()
new_id = (current_id + changeby) % self.comboBox_studies.count()
self.comboBox_studies.setCurrentIndex(new_id)
self.load_study()
@pyqtSlot()
def action_changeseq(self, changeby):
current_id = self.comboBox_sequences.currentIndex()
new_id = (current_id + changeby) % self.comboBox_sequences.count()
self.comboBox_sequences.setCurrentIndex(new_id)
self.load_sequence()
@pyqtSlot()
def action_changemode(self):
if self.labelmode == 'add':
print(f"Add -> Edit")
self.labelmode = 'edit'
self.imageItem.mousePressEvent = None
elif self.labelmode == 'edit':
print(f"Edit -> Add")
self.labelmode = 'add'
self.imageItem.mousePressEvent = self.add_node_at_mouse
else:
raise ValueError()
self.draw_buttons()
@pyqtSlot()
def action_labelbutton(self, labelname):
self.activelabel_name = labelname
self.draw_buttons()
@pyqtSlot()
def change_window(self):
print("changing window")
self.i_window += 1
self.load_sequence()
@pyqtSlot()
def invalidate(self):
if os.path.exists(self.human_report_path+'.invalid'):
print(f"Revalidating old human label")
os.rename(self.human_report_path+'.invalid', self.human_report_path)
elif os.path.exists(self.auto_report_path+'.invalid'):
print(f"Revalidating old auto label")
os.rename(self.auto_report_path+'.invalid', self.auto_report_path)
else:
print(f"Invalidating slice")
if os.path.exists(self.human_report_path):
os.rename(self.human_report_path, self.human_report_path+'.invalid')
elif os.path.exists(self.auto_report_path):
os.rename(self.auto_report_path, self.auto_report_path+'.invalid')
else:
self.save_coords(self.human_report_path+'.invalid')
def draw_buttons(self):
# Edit button
if self.labelmode == 'add':
mode_text = 'Add mode'
mode_style = css['modebutton_add']
elif self.labelmode == 'edit':
mode_text = 'Edit mode'
mode_style = css['modebutton_edit']
else:
raise ValueError()
self.pushButton_mode.setText(mode_text)
self.pushButton_mode.setStyleSheet(mode_style)
self.draw_image_and_rois(fix_roi=True)
# Label buttons
for name, button in self.labelbuttons.items():
reported = self.roi_selectors.get(name, None) is not None
if name == self.activelabel_name:
style = css['labelbutton_active_green'] if reported else css['labelbutton_active_red']
else:
style = css['labelbutton_inactive_green'] if reported else css['labelbutton_inactive_red']
self.labelbuttons[name].setStyleSheet(style)
def draw_image_and_rois(self, fix_roi=False):
# Get current axis range
x_range = self.plotWidget.getAxis('bottom').range
y_range = self.plotWidget.getAxis('left').range
# Remove imageItem
if self.imageItem:
self.imageItem.mousePressEvent = None
self.plotWidget.removeItem(self.imageItem)
del self.imageItem
# Remove ROIs from plot and delete
for roi_name, roi_selector in self.roi_selectors.items():
self.plotWidget.removeItem(roi_selector)
self.roi_selectors[roi_name] = None
del roi_selector
self.roi_selectors = {}
# Draw image
self.imageItem = pg.ImageItem(self.img_array)
self.plotWidget.addItem(self.imageItem)
if self.labelmode == 'add':
self.imageItem.mousePressEvent = self.add_node_at_mouse
# Draw ROIs
for roi_name, coords in self.roi_coords.items():
if roi_name == 'dims': # Ignore the label containing the image size
continue
roi_selector = pg.PolyLineROI(coords, movable=False, closed=True, pen=QtGui.QPen(QtGui.QColor(115, 194, 251)))
roi_selector.sigRegionChangeFinished.connect(lambda roi: self.update_coords())
self.roi_selectors[roi_name] = roi_selector
self.plotWidget.addItem(roi_selector)
# Restore range
if fix_roi:
self.plotWidget.setRange(xRange=x_range, yRange=y_range, padding=0)
def update_coords(self):
if self.labelmode == "add":
# Do not allow adjustments in add mode, recipe for disaster
return None
for roi_name, roi in self.roi_selectors.items():
self.roi_coords[roi_name] = []
for segment in roi.segments:
point = segment.listPoints()[0]
self.roi_coords[roi_name].append([point.x(), point.y()])
# Finally save
if self.numpy_path:
self.save_coords()
def save_coords(self, report_path=None):
out_dict = self.roi_coords
out_dict['dims'] = self.img_array.shape
if report_path is None:
report_path = self.human_report_path
print(f"Saving as {report_path}")
save_pickle(report_path, out_dict)
def add_node_at_mouse(self, event):
self.labelbuttons[self.activelabel_name].setStyleSheet(css['labelbutton_active_green'])
x = round(event.pos().x())
y = round(event.pos().y())
self.roi_coords[self.activelabel_name].append([x, y])
print(self.roi_coords[self.activelabel_name])
self.draw_image_and_rois(fix_roi=True)
# Finally save
if self.numpy_path:
self.save_coords()
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = MainWindowUI(MainWindow)
MainWindow.show()
print('Showing')
app.exec_()
| [
"numpy.load",
"PyQt5.QtGui.QKeySequence",
"PyQt5.QtGui.QColor",
"collections.defaultdict",
"PyQt5.QtWidgets.QApplication",
"os.path.exists",
"lib.labeling.get_studies_peter",
"traceback.print_exception",
"lib.labeling.load_pickle",
"scipy.stats.iqr",
"lib.windows.SEQUENCE_WINDOWS.keys",
"PyQt5... | [((7352, 7362), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (7360, 7362), False, 'from PyQt5.QtCore import pyqtSlot\n'), ((7624, 7634), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (7632, 7634), False, 'from PyQt5.QtCore import pyqtSlot\n'), ((7903, 7913), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (7911, 7913), False, 'from PyQt5.QtCore import pyqtSlot\n'), ((8359, 8369), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (8367, 8369), False, 'from PyQt5.QtCore import pyqtSlot\n'), ((8491, 8501), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (8499, 8501), False, 'from PyQt5.QtCore import pyqtSlot\n'), ((8626, 8636), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (8634, 8636), False, 'from PyQt5.QtCore import pyqtSlot\n'), ((13246, 13278), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (13268, 13278), False, 'from PyQt5 import QtCore, QtWidgets, QtGui\n'), ((13296, 13319), 'PyQt5.QtWidgets.QMainWindow', 'QtWidgets.QMainWindow', ([], {}), '()\n', (13317, 13319), False, 'from PyQt5 import QtCore, QtWidgets, QtGui\n'), ((766, 817), 'traceback.print_exception', 'traceback.print_exception', (['type_', 'value', 'traceback_'], {}), '(type_, value, traceback_)\n', (791, 817), False, 'import sys, traceback\n'), ((826, 843), 'PyQt5.QtCore.qFatal', 'QtCore.qFatal', (['""""""'], {}), "('')\n", (839, 843), False, 'from PyQt5 import QtCore, QtWidgets, QtGui\n'), ((1477, 1494), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1488, 1494), False, 'from collections import defaultdict\n'), ((1785, 1816), 'lib.labeling.get_studies_peter', 'get_studies', (['self.data_root_dir'], {}), '(self.data_root_dir)\n', (1796, 1816), True, 'from lib.labeling import get_studies_peter as get_studies\n'), ((4321, 4347), 'numpy.flip', 'np.flip', (['img_array'], {'axis': '(1)'}), '(img_array, axis=1)\n', (4328, 4347), True, 'import numpy as np\n'), ((4867, 4962), 'lib.windows.window_numpy', 'window_numpy', (['img_array'], {'window_centre': 'window_centre', 'window_width': 'window_width', 'cmap': 'cmap'}), '(img_array, window_centre=window_centre, window_width=\n window_width, cmap=cmap)\n', (4879, 4962), False, 'from lib.windows import SEQUENCE_WINDOWS, window_numpy\n'), ((5497, 5524), 'os.path.exists', 'os.path.exists', (['report_path'], {}), '(report_path)\n', (5511, 5524), False, 'import os\n'), ((8674, 8725), 'os.path.exists', 'os.path.exists', (["(self.human_report_path + '.invalid')"], {}), "(self.human_report_path + '.invalid')\n", (8688, 8725), False, 'import os\n'), ((11144, 11172), 'pyqtgraph.ImageItem', 'pg.ImageItem', (['self.img_array'], {}), '(self.img_array)\n', (11156, 11172), True, 'import pyqtgraph as pg\n'), ((12720, 12754), 'lib.labeling.save_pickle', 'save_pickle', (['report_path', 'out_dict'], {}), '(report_path, out_dict)\n', (12731, 12754), False, 'from lib.labeling import save_pickle, load_pickle\n'), ((4238, 4257), 'numpy.load', 'np.load', (['numpy_path'], {}), '(numpy_path)\n', (4245, 4257), True, 'import numpy as np\n'), ((5545, 5569), 'lib.labeling.load_pickle', 'load_pickle', (['report_path'], {}), '(report_path)\n', (5556, 5569), False, 'from lib.labeling import save_pickle, load_pickle\n'), ((5603, 5620), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5614, 5620), False, 'from collections import defaultdict\n'), ((5712, 5733), 'PyQt5.QtGui.QKeySequence', 'QKeySequence', (['"""Space"""'], {}), "('Space')\n", (5724, 5733), False, 'from PyQt5.QtGui import QKeySequence\n'), ((6293, 6311), 'PyQt5.QtGui.QKeySequence', 'QKeySequence', (['"""Up"""'], {}), "('Up')\n", (6305, 6311), False, 'from PyQt5.QtGui import QKeySequence\n'), ((6461, 6481), 'PyQt5.QtGui.QKeySequence', 'QKeySequence', (['"""Down"""'], {}), "('Down')\n", (6473, 6481), False, 'from PyQt5.QtGui import QKeySequence\n'), ((6669, 6689), 'PyQt5.QtGui.QKeySequence', 'QKeySequence', (['"""Left"""'], {}), "('Left')\n", (6681, 6689), False, 'from PyQt5.QtGui import QKeySequence\n'), ((6831, 6852), 'PyQt5.QtGui.QKeySequence', 'QKeySequence', (['"""Right"""'], {}), "('Right')\n", (6843, 6852), False, 'from PyQt5.QtGui import QKeySequence\n'), ((7028, 7045), 'PyQt5.QtGui.QKeySequence', 'QKeySequence', (['"""X"""'], {}), "('X')\n", (7040, 7045), False, 'from PyQt5.QtGui import QKeySequence\n'), ((7224, 7243), 'PyQt5.QtGui.QKeySequence', 'QKeySequence', (['"""Del"""'], {}), "('Del')\n", (7236, 7243), False, 'from PyQt5.QtGui import QKeySequence\n'), ((8788, 8858), 'os.rename', 'os.rename', (["(self.human_report_path + '.invalid')", 'self.human_report_path'], {}), "(self.human_report_path + '.invalid', self.human_report_path)\n", (8797, 8858), False, 'import os\n'), ((8870, 8920), 'os.path.exists', 'os.path.exists', (["(self.auto_report_path + '.invalid')"], {}), "(self.auto_report_path + '.invalid')\n", (8884, 8920), False, 'import os\n'), ((2887, 2907), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['colour'], {}), '(colour)\n', (2899, 2907), False, 'from PyQt5 import QtCore, QtWidgets, QtGui\n'), ((3542, 3565), 'lib.windows.SEQUENCE_WINDOWS.keys', 'SEQUENCE_WINDOWS.keys', ([], {}), '()\n', (3563, 3565), False, 'from lib.windows import SEQUENCE_WINDOWS, window_numpy\n'), ((4752, 4772), 'numpy.median', 'np.median', (['img_array'], {}), '(img_array)\n', (4761, 4772), True, 'import numpy as np\n'), ((5270, 5307), 'os.path.exists', 'os.path.exists', (['self.auto_report_path'], {}), '(self.auto_report_path)\n', (5284, 5307), False, 'import os\n'), ((6021, 6047), 'PyQt5.QtGui.QKeySequence', 'QKeySequence', (['shortcut_key'], {}), '(shortcut_key)\n', (6033, 6047), False, 'from PyQt5.QtGui import QKeySequence\n'), ((8982, 9050), 'os.rename', 'os.rename', (["(self.auto_report_path + '.invalid')", 'self.auto_report_path'], {}), "(self.auto_report_path + '.invalid', self.auto_report_path)\n", (8991, 9050), False, 'import os\n'), ((9119, 9157), 'os.path.exists', 'os.path.exists', (['self.human_report_path'], {}), '(self.human_report_path)\n', (9133, 9157), False, 'import os\n'), ((4800, 4814), 'scipy.stats.iqr', 'iqr', (['img_array'], {}), '(img_array)\n', (4803, 4814), False, 'from scipy.stats import iqr\n'), ((5226, 5264), 'os.path.exists', 'os.path.exists', (['self.human_report_path'], {}), '(self.human_report_path)\n', (5240, 5264), False, 'import os\n'), ((9175, 9245), 'os.rename', 'os.rename', (['self.human_report_path', "(self.human_report_path + '.invalid')"], {}), "(self.human_report_path, self.human_report_path + '.invalid')\n", (9184, 9245), False, 'import os\n'), ((9261, 9298), 'os.path.exists', 'os.path.exists', (['self.auto_report_path'], {}), '(self.auto_report_path)\n', (9275, 9298), False, 'import os\n'), ((9316, 9384), 'os.rename', 'os.rename', (['self.auto_report_path', "(self.auto_report_path + '.invalid')"], {}), "(self.auto_report_path, self.auto_report_path + '.invalid')\n", (9325, 9384), False, 'import os\n'), ((11602, 11629), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(115)', '(194)', '(251)'], {}), '(115, 194, 251)\n', (11614, 11629), False, 'from PyQt5 import QtCore, QtWidgets, QtGui\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 15 16:42:48 2012
Show an animated sine function and measure frames per second (FPS)
"""
import sys
sys.ps1 = 'Ciao'
import time
import numpy as np
import matplotlib
matplotlib.use('qt4agg')
import matplotlib.pyplot as plt
x = np.random.randn(10)
print('ready to plot')
plt.plot(x)
plt.draw()
plt.show(block=False)
print('starting to sleep (or working hard)')
time.sleep(1)
plt.plot(x + 2)
plt.draw()
plt.show(block=False)
print('sleeping again (or more work)')
time.sleep(1)
print('now blocking until the figure is closed')
plt.show(block=True) | [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.random.randn",
"time.sleep",
"matplotlib.pyplot.draw",
"matplotlib.use"
] | [((224, 248), 'matplotlib.use', 'matplotlib.use', (['"""qt4agg"""'], {}), "('qt4agg')\n", (238, 248), False, 'import matplotlib\n'), ((289, 308), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (304, 308), True, 'import numpy as np\n'), ((334, 345), 'matplotlib.pyplot.plot', 'plt.plot', (['x'], {}), '(x)\n', (342, 345), True, 'import matplotlib.pyplot as plt\n'), ((347, 357), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (355, 357), True, 'import matplotlib.pyplot as plt\n'), ((359, 380), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (367, 380), True, 'import matplotlib.pyplot as plt\n'), ((430, 443), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (440, 443), False, 'import time\n'), ((445, 460), 'matplotlib.pyplot.plot', 'plt.plot', (['(x + 2)'], {}), '(x + 2)\n', (453, 460), True, 'import matplotlib.pyplot as plt\n'), ((462, 472), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (470, 472), True, 'import matplotlib.pyplot as plt\n'), ((474, 495), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (482, 495), True, 'import matplotlib.pyplot as plt\n'), ((539, 552), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (549, 552), False, 'import time\n'), ((604, 624), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (612, 624), True, 'import matplotlib.pyplot as plt\n')] |
from unittest import TestCase
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.models import Sequential, clone_model
from tensorflow.python.keras.layers import Dense, Lambda, BatchNormalization
from tensorflow_fewshot.models.fast_gradients import take_n_gradient_step
class TestGradientUtils(TestCase):
def setUp(self):
np.random.seed(37)
tf.random.set_seed(37)
def test_update_weights_creates_model_with_right_weights(self):
# Given
initial_model = create_2_layer_MLP()
grads = initial_model.get_weights()
# When
to_be_updated_model = clone_model(initial_model)
take_n_gradient_step(
initial_model,
to_be_updated_model,
n_step=1,
alpha=1.0,
loss=(lambda y, p: p),
data_x=np.array([[1]]),
data_y=np.array([[1]])
)
to_be_updated_model_weights = [layer.kernel for layer in to_be_updated_model.layers if layer.trainable]
# Then
np.testing.assert_equal(to_be_updated_model_weights[0].numpy(), np.zeros((1, 2)))
np.testing.assert_equal(to_be_updated_model_weights[1].numpy(), np.zeros((2, 1)))
def test_update_weights_creates_model_with_right_weights_with_alpha_2(self):
# Given
initial_model = create_2_layer_MLP()
grads = initial_model.get_weights()
# When
updated_model = clone_model(initial_model)
take_n_gradient_step(
initial_model,
updated_model,
n_step=1,
alpha=4.0,
loss=(lambda y, p: p),
data_x=np.array([[1]]),
data_y=np.array([[1]])
)
updated_model_weights = [layer.kernel for layer in updated_model.layers if layer.trainable]
# Then
np.testing.assert_equal(updated_model_weights[0].numpy(), -3*np.ones((1, 2)))
np.testing.assert_equal(updated_model_weights[1].numpy(), -3*np.ones((2, 1)))
def test_2nd_order_gradient_through_updated_model(self):
# Given
initial_model = Sequential([
Dense(1, use_bias=False, kernel_initializer='ones', input_shape=(1,)),
Lambda(lambda x: x ** 2)
])
x = np.array([[3]])
updated_model = clone_model(initial_model)
# When
with tf.GradientTape() as outer_tape:
take_n_gradient_step(
initial_model,
updated_model,
n_step=1,
alpha=1.0,
loss=(lambda y, p: p),
data_x=x,
data_y=x
)
yp = updated_model(x)
grad_of_grads = outer_tape.gradient(yp, initial_model.trainable_variables)
# Then
self.assertEqual(5202, grad_of_grads[0])
def test_gradient_tape_doesnt_crash_when_model_has_non_trainable_variables(self):
# Given
initial_model = Sequential([
tf.keras.layers.Input((1,)),
Dense(3),
BatchNormalization(),
Dense(7)
])
initial_weights = initial_model.get_weights()
x = np.array([[1]])
# When
updated_model = clone_model(initial_model)
take_n_gradient_step(
initial_model,
updated_model,
n_step=1,
alpha=1.0,
loss=(lambda y, p: p),
data_x=x,
data_y=x
)
# Then
np.testing.assert_equal(initial_weights[4], updated_model.get_weights()[4]) # Moving mean
np.testing.assert_equal(initial_weights[5], updated_model.get_weights()[5]) # Moving Variance
def test_take_5_gradient_steps(self):
# Given
model = Sequential([
tf.keras.layers.Input((1,)),
Dense(1, use_bias=False, kernel_initializer='ones'),
])
updated_model = clone_model(model)
x = np.array([[1]])
y = np.array([[4]])
# When
n_step = 5
alpha = 1.0
take_n_gradient_step(model, updated_model, n_step, alpha, tf.keras.losses.mse, x, y)
# Then
self.assertIsNotNone(updated_model(x))
# TODO: test on different layers and models: convnets, batchnorm, conv2d, pooling, etc.
def create_2_layer_MLP():
return tf.keras.models.Sequential([
tf.keras.layers.Input((1,)),
tf.keras.layers.Dense(2, use_bias=False, kernel_initializer='ones'),
tf.keras.layers.Dense(1, use_bias=False, kernel_initializer='ones'),
])
| [
"tensorflow.random.set_seed",
"tensorflow.python.keras.layers.Dense",
"numpy.random.seed",
"tensorflow.keras.layers.Dense",
"tensorflow.python.keras.layers.Lambda",
"numpy.zeros",
"numpy.ones",
"tensorflow_fewshot.models.fast_gradients.take_n_gradient_step",
"numpy.array",
"tensorflow.keras.layers... | [((359, 377), 'numpy.random.seed', 'np.random.seed', (['(37)'], {}), '(37)\n', (373, 377), True, 'import numpy as np\n'), ((386, 408), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(37)'], {}), '(37)\n', (404, 408), True, 'import tensorflow as tf\n'), ((629, 655), 'tensorflow.python.keras.models.clone_model', 'clone_model', (['initial_model'], {}), '(initial_model)\n', (640, 655), False, 'from tensorflow.python.keras.models import Sequential, clone_model\n'), ((1443, 1469), 'tensorflow.python.keras.models.clone_model', 'clone_model', (['initial_model'], {}), '(initial_model)\n', (1454, 1469), False, 'from tensorflow.python.keras.models import Sequential, clone_model\n'), ((2261, 2276), 'numpy.array', 'np.array', (['[[3]]'], {}), '([[3]])\n', (2269, 2276), True, 'import numpy as np\n'), ((2302, 2328), 'tensorflow.python.keras.models.clone_model', 'clone_model', (['initial_model'], {}), '(initial_model)\n', (2313, 2328), False, 'from tensorflow.python.keras.models import Sequential, clone_model\n'), ((3161, 3176), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (3169, 3176), True, 'import numpy as np\n'), ((3217, 3243), 'tensorflow.python.keras.models.clone_model', 'clone_model', (['initial_model'], {}), '(initial_model)\n', (3228, 3243), False, 'from tensorflow.python.keras.models import Sequential, clone_model\n'), ((3252, 3368), 'tensorflow_fewshot.models.fast_gradients.take_n_gradient_step', 'take_n_gradient_step', (['initial_model', 'updated_model'], {'n_step': '(1)', 'alpha': '(1.0)', 'loss': '(lambda y, p: p)', 'data_x': 'x', 'data_y': 'x'}), '(initial_model, updated_model, n_step=1, alpha=1.0,\n loss=lambda y, p: p, data_x=x, data_y=x)\n', (3272, 3368), False, 'from tensorflow_fewshot.models.fast_gradients import take_n_gradient_step\n'), ((3908, 3926), 'tensorflow.python.keras.models.clone_model', 'clone_model', (['model'], {}), '(model)\n', (3919, 3926), False, 'from tensorflow.python.keras.models import Sequential, clone_model\n'), ((3939, 3954), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (3947, 3954), True, 'import numpy as np\n'), ((3967, 3982), 'numpy.array', 'np.array', (['[[4]]'], {}), '([[4]])\n', (3975, 3982), True, 'import numpy as np\n'), ((4046, 4135), 'tensorflow_fewshot.models.fast_gradients.take_n_gradient_step', 'take_n_gradient_step', (['model', 'updated_model', 'n_step', 'alpha', 'tf.keras.losses.mse', 'x', 'y'], {}), '(model, updated_model, n_step, alpha, tf.keras.losses.\n mse, x, y)\n', (4066, 4135), False, 'from tensorflow_fewshot.models.fast_gradients import take_n_gradient_step\n'), ((1107, 1123), 'numpy.zeros', 'np.zeros', (['(1, 2)'], {}), '((1, 2))\n', (1115, 1123), True, 'import numpy as np\n'), ((1197, 1213), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (1205, 1213), True, 'import numpy as np\n'), ((2358, 2375), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (2373, 2375), True, 'import tensorflow as tf\n'), ((2403, 2519), 'tensorflow_fewshot.models.fast_gradients.take_n_gradient_step', 'take_n_gradient_step', (['initial_model', 'updated_model'], {'n_step': '(1)', 'alpha': '(1.0)', 'loss': '(lambda y, p: p)', 'data_x': 'x', 'data_y': 'x'}), '(initial_model, updated_model, n_step=1, alpha=1.0,\n loss=lambda y, p: p, data_x=x, data_y=x)\n', (2423, 2519), False, 'from tensorflow_fewshot.models.fast_gradients import take_n_gradient_step\n'), ((4364, 4391), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (['(1,)'], {}), '((1,))\n', (4385, 4391), True, 'import tensorflow as tf\n'), ((4401, 4468), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)'], {'use_bias': '(False)', 'kernel_initializer': '"""ones"""'}), "(2, use_bias=False, kernel_initializer='ones')\n", (4422, 4468), True, 'import tensorflow as tf\n'), ((4478, 4545), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'use_bias': '(False)', 'kernel_initializer': '"""ones"""'}), "(1, use_bias=False, kernel_initializer='ones')\n", (4499, 4545), True, 'import tensorflow as tf\n'), ((845, 860), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (853, 860), True, 'import numpy as np\n'), ((881, 896), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (889, 896), True, 'import numpy as np\n'), ((1653, 1668), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (1661, 1668), True, 'import numpy as np\n'), ((1689, 1704), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (1697, 1704), True, 'import numpy as np\n'), ((1900, 1915), 'numpy.ones', 'np.ones', (['(1, 2)'], {}), '((1, 2))\n', (1907, 1915), True, 'import numpy as np\n'), ((1986, 2001), 'numpy.ones', 'np.ones', (['(2, 1)'], {}), '((2, 1))\n', (1993, 2001), True, 'import numpy as np\n'), ((2130, 2199), 'tensorflow.python.keras.layers.Dense', 'Dense', (['(1)'], {'use_bias': '(False)', 'kernel_initializer': '"""ones"""', 'input_shape': '(1,)'}), "(1, use_bias=False, kernel_initializer='ones', input_shape=(1,))\n", (2135, 2199), False, 'from tensorflow.python.keras.layers import Dense, Lambda, BatchNormalization\n'), ((2213, 2237), 'tensorflow.python.keras.layers.Lambda', 'Lambda', (['(lambda x: x ** 2)'], {}), '(lambda x: x ** 2)\n', (2219, 2237), False, 'from tensorflow.python.keras.layers import Dense, Lambda, BatchNormalization\n'), ((2978, 3005), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (['(1,)'], {}), '((1,))\n', (2999, 3005), True, 'import tensorflow as tf\n'), ((3019, 3027), 'tensorflow.python.keras.layers.Dense', 'Dense', (['(3)'], {}), '(3)\n', (3024, 3027), False, 'from tensorflow.python.keras.layers import Dense, Lambda, BatchNormalization\n'), ((3041, 3061), 'tensorflow.python.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3059, 3061), False, 'from tensorflow.python.keras.layers import Dense, Lambda, BatchNormalization\n'), ((3075, 3083), 'tensorflow.python.keras.layers.Dense', 'Dense', (['(7)'], {}), '(7)\n', (3080, 3083), False, 'from tensorflow.python.keras.layers import Dense, Lambda, BatchNormalization\n'), ((3779, 3806), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (['(1,)'], {}), '((1,))\n', (3800, 3806), True, 'import tensorflow as tf\n'), ((3820, 3871), 'tensorflow.python.keras.layers.Dense', 'Dense', (['(1)'], {'use_bias': '(False)', 'kernel_initializer': '"""ones"""'}), "(1, use_bias=False, kernel_initializer='ones')\n", (3825, 3871), False, 'from tensorflow.python.keras.layers import Dense, Lambda, BatchNormalization\n')] |
from .global_settings import settings
from .utils import moving_mean
import numpy as np
class SpeedCalculator:
def __init__(self, encoder_data_provider, callback):
self._current_period = []
self._periods = []
self._encoder_data_provider = encoder_data_provider
self._log = open("corrector_log.log", "w")
self._speeds = []
self._callback = callback
self._logfile = open('points.log', 'w', buffering=1)
self._min_points = settings.get_minimal_full_period_points()
self._ticks = settings.get_encoder_ticks()
self._threshold_low = 2*(self._ticks / self._min_points)
self._threshold_high = self._ticks - self._threshold_low
print(f"Thresholds are: LOW={self._threshold_low}, HIGH={self._threshold_high}")
def __del__(self):
self._logfile.close()
self._log.close()
def _add_new_speed(self, speed):
self._speeds.append(speed)
if len(self._speeds) < 3:
return
latest_speeds = np.array(self._speeds[-3:])
average_speed = np.mean(latest_speeds, axis=0)
print(f"Shape of average = {average_speed.shape}")
self._callback(average_speed)
def _calculate_current_period(self):
start_reading = self._current_period[0][2]
final_reading = self._current_period[-1][2]
if start_reading > self._threshold_low or final_reading < self._threshold_high:
print("Discarding period as not full for analysis <<<<<<<<<<<<<!")
return # not full period
# t, y, e
ts = [i[0] for i in self._current_period]
ys = [i[1] for i in self._current_period]
# es = [i[2] for i in self._current_period]
# Eliminate wrapped time:
previous_time = 0
additional_time = 0
for i, t in enumerate(ts):
if previous_time > t:
additional_time += 1
previous_time = t
ts[i] += 4294967 * additional_time
# interpolation:
ts = np.subtract(ts, ts[0])
t = np.linspace(0, ts[-1], num=settings.get_correction_bins() + 1)
fs = np.interp(t, ts, ys)
# smoothing
smoothed = moving_mean(fs, 4)
df = np.diff(smoothed)
dt = np.diff(t)
f_speed = np.divide(df, dt)
self._add_new_speed(f_speed)
def add_point(self, p):
x, y, t = p
e = self._encoder_data_provider.find_readout_by_timestamp(t)
if e is None:
print(f"Could not find right encoder reading for timestamp {t}")
e = 0
else:
print(f"Found encoder reading {e} for timestamp {t}")
if len(self._current_period) > 0:
previous_tick = self._current_period[-1][2]
print(f"Previous tick = {previous_tick}")
if previous_tick > e:
print("Ended reading full period <<<<<<<<<<<<<<<!")
self._log.write(f"Cokolwiek {self._current_period}\n")
self._calculate_current_period()
self._current_period = []
self._current_period.append((t, y, e)) # TODO: should be x or y depending on orientation
print(f"Added point {p} to current period of length: {len(self._current_period)}")
self._logfile.write(str(p)+"\n")
# seek for t in reader encoder
| [
"numpy.divide",
"numpy.subtract",
"numpy.mean",
"numpy.array",
"numpy.diff",
"numpy.interp"
] | [((1034, 1061), 'numpy.array', 'np.array', (['self._speeds[-3:]'], {}), '(self._speeds[-3:])\n', (1042, 1061), True, 'import numpy as np\n'), ((1086, 1116), 'numpy.mean', 'np.mean', (['latest_speeds'], {'axis': '(0)'}), '(latest_speeds, axis=0)\n', (1093, 1116), True, 'import numpy as np\n'), ((2049, 2071), 'numpy.subtract', 'np.subtract', (['ts', 'ts[0]'], {}), '(ts, ts[0])\n', (2060, 2071), True, 'import numpy as np\n'), ((2160, 2180), 'numpy.interp', 'np.interp', (['t', 'ts', 'ys'], {}), '(t, ts, ys)\n', (2169, 2180), True, 'import numpy as np\n'), ((2253, 2270), 'numpy.diff', 'np.diff', (['smoothed'], {}), '(smoothed)\n', (2260, 2270), True, 'import numpy as np\n'), ((2284, 2294), 'numpy.diff', 'np.diff', (['t'], {}), '(t)\n', (2291, 2294), True, 'import numpy as np\n'), ((2313, 2330), 'numpy.divide', 'np.divide', (['df', 'dt'], {}), '(df, dt)\n', (2322, 2330), True, 'import numpy as np\n')] |
"""
This code is modified from the implementation of https://github.com/iyah4888/SIGGRAPH18SSS
Information about the original and unmodified code:
@author: <NAME> (http://taehyunoh.com, <EMAIL>)
@date: Jul 29, 2018
@description: This is a part of the semantic feature extraction implementation used in
[Semantic Soft Segmentation (Aksoy et al., 2018)] (project page: http://people.inf.ethz.ch/aksoyy/sss/).
This code is modified from the implementation by DrSleep (https://github.com/DrSleep/tensorflow-deeplab-resnet)
This code is for protyping research ideas; thus, please use this code only for non-commercial purpose only.
"""
from __future__ import print_function
import argparse
from datetime import datetime
import os
import sys
import time
import scipy.io as sio
from glob import glob
import sys
sys.path.insert(0,'./SIGGRAPH18SSS')
import torch
import tensorflow as tf
import numpy as np
import pdb
from parse_opt import get_arguments
from deeplab_resnet import HyperColumn_Deeplabv2, read_data_list
IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)
import paths
'''
Helper functions
'''
def load_dir_structs(dataset_path):
types = ('*.png')
curflist= []
curflist.extend(dataset_path)
return curflist
def read_img(t_imgfname, input_size, img_mean): # optional pre-processing arguments
"""Read one image and its corresponding mask with optional pre-processing.
Args:
input_queue: tf queue with paths to the image and its mask.
input_size: a tuple with (height, width) values.
If not given, return images of original size.
random_scale: whether to randomly scale the images prior
to random crop.
random_mirror: whether to randomly mirror the images prior
to random crop.
ignore_label: index of label to ignore during the training.
img_mean: vector of mean colour values.
Returns:
Two tensors: the decoded image and its mask.
"""
img_contents = tf.read_file(t_imgfname)
img = tf.image.decode_png(img_contents, channels=3)
img_r, img_g, img_b = tf.split(axis=2, num_or_size_splits=3, value=img)
img = tf.cast(tf.concat(axis=2, values=[img_b, img_g, img_r]), dtype=tf.float32)
# Extract mean.
img -= img_mean
if input_size is not None:
h, w = input_size
# Randomly scale the images and labels.
newshape = tf.squeeze(tf.stack([h, w]), squeeze_dims=[1])
img2 = tf.image.resize_images(img, newshape)
else:
img2 = tf.image.resize_images(img, tf.shape(img)[0:2,]*2)
return img2, img
def main(image_name):
args = get_arguments()
# Set up tf session and initialize variables.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
model = HyperColumn_Deeplabv2(sess, args)
# Load variables if the checkpoint is provided.
model.load(paths.get_path()["model_dir"])
image_dir = os.path.join(paths.get_path()["root_voc_dir"], 'JPEGImages', image_name + '.jpg')
local_imgflist = [image_dir]
save_folder = paths.get_path()["feature_dir"]
if not os.path.exists(save_folder):
os.mkdir(save_folder)
for i in range(len(local_imgflist)):
if os.path.splitext(local_imgflist[i])[1] == '':
continue
print('{} Processing {}'.format(i, local_imgflist[i]))
padsize = 50
_, ori_img = read_img(local_imgflist[i], input_size = None, img_mean = IMG_MEAN)
pad_img = tf.pad(ori_img, [[padsize,padsize], [padsize,padsize], [0,0]], mode='REFLECT')
cur_embed = model.test(pad_img.eval())
cur_embed = np.squeeze(cur_embed)
curfname = os.path.split(os.path.splitext(local_imgflist[i])[0])[1]
save_folder = paths.get_path()["feature_dir"]
cur_svpath = os.path.join(save_folder, curfname + '.pt')
print(cur_svpath)
res = {'embedmap': cur_embed[padsize:(cur_embed.shape[0]-padsize),padsize:(cur_embed.shape[1]-padsize),:]}
torch.save(res,cur_svpath)
| [
"os.mkdir",
"tensorflow.image.decode_png",
"tensorflow.ConfigProto",
"tensorflow.split",
"os.path.join",
"tensorflow.pad",
"os.path.exists",
"tensorflow.concat",
"tensorflow.stack",
"tensorflow.image.resize_images",
"tensorflow.Session",
"numpy.squeeze",
"tensorflow.read_file",
"deeplab_re... | [((818, 855), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./SIGGRAPH18SSS"""'], {}), "(0, './SIGGRAPH18SSS')\n", (833, 855), False, 'import sys\n'), ((1036, 1106), 'numpy.array', 'np.array', (['(104.00698793, 116.66876762, 122.67891434)'], {'dtype': 'np.float32'}), '((104.00698793, 116.66876762, 122.67891434), dtype=np.float32)\n', (1044, 1106), True, 'import numpy as np\n'), ((1957, 1981), 'tensorflow.read_file', 'tf.read_file', (['t_imgfname'], {}), '(t_imgfname)\n', (1969, 1981), True, 'import tensorflow as tf\n'), ((1991, 2036), 'tensorflow.image.decode_png', 'tf.image.decode_png', (['img_contents'], {'channels': '(3)'}), '(img_contents, channels=3)\n', (2010, 2036), True, 'import tensorflow as tf\n'), ((2060, 2109), 'tensorflow.split', 'tf.split', ([], {'axis': '(2)', 'num_or_size_splits': '(3)', 'value': 'img'}), '(axis=2, num_or_size_splits=3, value=img)\n', (2068, 2109), True, 'import tensorflow as tf\n'), ((2546, 2561), 'parse_opt.get_arguments', 'get_arguments', ([], {}), '()\n', (2559, 2561), False, 'from parse_opt import get_arguments\n'), ((2621, 2637), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (2635, 2637), True, 'import tensorflow as tf\n'), ((2125, 2172), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(2)', 'values': '[img_b, img_g, img_r]'}), '(axis=2, values=[img_b, img_g, img_r])\n', (2134, 2172), True, 'import tensorflow as tf\n'), ((2388, 2425), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['img', 'newshape'], {}), '(img, newshape)\n', (2410, 2425), True, 'import tensorflow as tf\n'), ((2684, 2709), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (2694, 2709), True, 'import tensorflow as tf\n'), ((2729, 2762), 'deeplab_resnet.HyperColumn_Deeplabv2', 'HyperColumn_Deeplabv2', (['sess', 'args'], {}), '(sess, args)\n', (2750, 2762), False, 'from deeplab_resnet import HyperColumn_Deeplabv2, read_data_list\n'), ((2343, 2359), 'tensorflow.stack', 'tf.stack', (['[h, w]'], {}), '([h, w])\n', (2351, 2359), True, 'import tensorflow as tf\n'), ((3005, 3021), 'paths.get_path', 'paths.get_path', ([], {}), '()\n', (3019, 3021), False, 'import paths\n'), ((3046, 3073), 'os.path.exists', 'os.path.exists', (['save_folder'], {}), '(save_folder)\n', (3060, 3073), False, 'import os\n'), ((3078, 3099), 'os.mkdir', 'os.mkdir', (['save_folder'], {}), '(save_folder)\n', (3086, 3099), False, 'import os\n'), ((3381, 3467), 'tensorflow.pad', 'tf.pad', (['ori_img', '[[padsize, padsize], [padsize, padsize], [0, 0]]'], {'mode': '"""REFLECT"""'}), "(ori_img, [[padsize, padsize], [padsize, padsize], [0, 0]], mode=\n 'REFLECT')\n", (3387, 3467), True, 'import tensorflow as tf\n'), ((3517, 3538), 'numpy.squeeze', 'np.squeeze', (['cur_embed'], {}), '(cur_embed)\n', (3527, 3538), True, 'import numpy as np\n'), ((3679, 3722), 'os.path.join', 'os.path.join', (['save_folder', "(curfname + '.pt')"], {}), "(save_folder, curfname + '.pt')\n", (3691, 3722), False, 'import os\n'), ((3857, 3884), 'torch.save', 'torch.save', (['res', 'cur_svpath'], {}), '(res, cur_svpath)\n', (3867, 3884), False, 'import torch\n'), ((2827, 2843), 'paths.get_path', 'paths.get_path', ([], {}), '()\n', (2841, 2843), False, 'import paths\n'), ((2886, 2902), 'paths.get_path', 'paths.get_path', ([], {}), '()\n', (2900, 2902), False, 'import paths\n'), ((3631, 3647), 'paths.get_path', 'paths.get_path', ([], {}), '()\n', (3645, 3647), False, 'import paths\n'), ((2470, 2483), 'tensorflow.shape', 'tf.shape', (['img'], {}), '(img)\n', (2478, 2483), True, 'import tensorflow as tf\n'), ((3146, 3181), 'os.path.splitext', 'os.path.splitext', (['local_imgflist[i]'], {}), '(local_imgflist[i])\n', (3162, 3181), False, 'import os\n'), ((3567, 3602), 'os.path.splitext', 'os.path.splitext', (['local_imgflist[i]'], {}), '(local_imgflist[i])\n', (3583, 3602), False, 'import os\n')] |
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Miscellaneous helper methods."""
import multiprocessing
import multiprocessing.dummy
from typing import Callable, Mapping, Optional, Tuple, TypeVar
import numpy as np
import sklearn.utils
K = TypeVar("K")
V = TypeVar("V")
def bootstrap(
*inputs, stat_fn, n_samples: int = 100, random_state: Optional[np.random.RandomState] = None
) -> np.ndarray:
"""Evaluates stat_fn for n_samples from inputs with replacement.
Args:
inputs: The inputs to bootstrap over.
stat_fn: A function computing a statistic on inputs.
n_samples: The number of bootstrapped samples to take.
random_state: Random state passed to `sklearn.utils.resample`.
Returns:
n_samples of the distance computed by `distance_fn`, each on an independent sample with
replacement from `rewa` and `rewb` of the same shape as `rewa` and `rewb`.
"""
vals = []
for _ in range(n_samples):
samples = sklearn.utils.resample(*inputs, random_state=random_state)
if len(inputs) > 1:
val = stat_fn(*samples)
else:
val = stat_fn(samples)
vals.append(val)
return np.array(vals)
def empirical_ci(arr: np.ndarray, alpha: float = 95.0) -> np.ndarray:
"""Computes percentile range in an array of values.
Args:
arr: An array.
alpha: Percentile confidence interval.
Returns:
A triple of the lower bound, median and upper bound of the confidence interval
with a width of alpha.
"""
percentiles = 50 - alpha / 2, 50, 50 + alpha / 2
return np.percentile(arr, percentiles)
def cross_distance(
rewxs: Mapping[K, np.ndarray],
rewys: Mapping[K, np.ndarray],
distance_fn: Callable[[np.ndarray, np.ndarray], V],
parallelism: Optional[int] = None,
threading: bool = True,
) -> Mapping[Tuple[K, K], V]:
"""Helper function to compute distance between all pairs of rewards from `rewxs` and `rewys`.
Args:
rewxs: A mapping from keys to NumPy arrays of shape `(n,)`.
rewys: A mapping from keys to NumPy arrays of shape `(n,)`.
distance_fn: A function to compute the distance between two NumPy arrays.
parallelism: The number of threads/processes to execute in parallel; if not specified,
defaults to `multiprocessing.cpu_count()`.
threading: If true, use multi-threading; otherwise, use multiprocessing. For many NumPy
functions, multi-threading is higher performance since NumPy releases the GIL, and
threading avoids expensive copying of the arrays needed for multiprocessing.
Returns:
A mapping from (i,j) to `distance_fn(rews[i], rews[j])`.
"""
shapes = set((v.shape for v in rewxs.values()))
shapes.update((v.shape for v in rewys.values()))
assert len(shapes) <= 1, "rewards differ in shape"
tasks = {(kx, ky): (rewx, rewy) for kx, rewx in rewxs.items() for ky, rewy in rewys.items()}
if parallelism == 1:
# Only one process? Skip multiprocessing, since creating Pool adds overhead.
results = [distance_fn(rewx, rewy) for rewx, rewy in tasks.values()]
else:
# We want parallelism, use multiprocessing to speed things up.
module = multiprocessing.dummy if threading else multiprocessing
with module.Pool(processes=parallelism) as pool:
results = pool.starmap(distance_fn, tasks.values())
return dict(zip(tasks.keys(), results))
| [
"numpy.percentile",
"typing.TypeVar",
"numpy.array"
] | [((770, 782), 'typing.TypeVar', 'TypeVar', (['"""K"""'], {}), "('K')\n", (777, 782), False, 'from typing import Callable, Mapping, Optional, Tuple, TypeVar\n'), ((787, 799), 'typing.TypeVar', 'TypeVar', (['"""V"""'], {}), "('V')\n", (794, 799), False, 'from typing import Callable, Mapping, Optional, Tuple, TypeVar\n'), ((1725, 1739), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (1733, 1739), True, 'import numpy as np\n'), ((2153, 2184), 'numpy.percentile', 'np.percentile', (['arr', 'percentiles'], {}), '(arr, percentiles)\n', (2166, 2184), True, 'import numpy as np\n')] |
import torch
from typing import Union, List, Callable
from .map_metric_wrapper import MapMetricWrapper
import torch.nn.functional as F
import numpy as np
import math
def _ncc(x, y):
"""
This function is a torch implementation of the normalized cross correlation
Parameters
----------
x : torch.Tensor
y : torch.Tensor
Returns
-------
torch.Tensor
"""
x_reshape, y_reshape = x.reshape(-1), y.reshape(-1)
x_sub, y_sub = x_reshape - x_reshape.mean(), y_reshape - y_reshape.mean()
x_normed, y_normed = x_sub/torch.norm(x), y_sub/torch.norm(y)
return x_normed.dot(y_normed)
def inner_prod_ncc(x, y):
x_normed, y_normed = (x - x.mean())/torch.norm(x), (y - y.mean())/torch.norm(y)
x_normed, y_normed = x_normed.reshape(-1), y_normed.reshape(-1)
return torch.dot(x_normed, y_normed).clamp(0.0, 1.0)
def th_pearsonr(x, y):
"""
mimics scipy.stats.pearsonr
"""
x = torch.flatten(x)
y = torch.flatten(y)
mean_x = torch.mean(x)
mean_y = torch.mean(y)
xm = x.sub(mean_x)
ym = y.sub(mean_y)
r_num = xm.dot(ym)
r_den = torch.norm(xm, 2) * torch.norm(ym, 2)
r_val = r_num / r_den
return r_val
def normalize_cc(x, y, zero_mean=True):
"""
very similar as th_perasonr, but ... different ...
"""
x = torch.flatten(x)
y = torch.flatten(y)
if zero_mean:
x = x.sub(x.mean())
y = y.sub(y.mean())
r_num = x.dot(y) / x.shape[0]
r_den = torch.std(x) * torch.std(y)
r_val = r_num / r_den
return r_val
class NCC(MapMetricWrapper):
def __init__(self, metric_name: str = "NCC", metric_func: Callable = normalize_cc, select_key: Union[List, str] = None, scale_metric: float = 1,
average_method: str = None, save_in_subject_keys: bool = False, metric_kwargs: dict = None,
**kwargs):
super(NCC, self).__init__(metric_name=metric_name, metric_func=metric_func, select_key=select_key,
scale_metric=scale_metric, average_method=average_method,
save_in_subject_keys=save_in_subject_keys, metric_kwargs=metric_kwargs, **kwargs)
def _ncc_conv(y_true, y_pred, win=None):
#je comprend pas cette implementation, car les regions avec des valeurs nulle, vont donner un cc de zero
#long a calculer en cpu ... bof bof bof
I = y_true.unsqueeze(0)
J = y_pred.unsqueeze(0)
# get dimension of volume
# assumes I, J are sized [batch_size, *vol_shape #rrr , nb_feats]
ndims = len(list(I.size())) - 2
assert ndims in [1, 2, 3], "volumes should be 1 to 3 dimensions. found: %d" % ndims
# set window size
win = [9] * ndims if win is None else win
# compute filters
sum_filt = torch.ones([1, 1, *win]) #.to("cuda") ##rr modif only dim #rrr make conv complain, ... may be the data are not in cuda TODO
padding = math.floor(win[0]/2) #I guess to have the same ouput size for cc, but since the mean
stride = 1
# get convolution function
conv_fn = getattr(F, 'conv%dd' % ndims)
# compute CC squares
#for test def CC(I,J, conv_fn=F.conv3d,stride=1, padding=4, sum_filt = torch.ones([1, 1, 9,9,9]), win=[9,9,9]):
I2 = I * I
J2 = J * J
IJ = I * J
I_sum = conv_fn(I, sum_filt, stride=stride, padding=padding)
J_sum = conv_fn(J, sum_filt, stride=stride, padding=padding)
I2_sum = conv_fn(I2, sum_filt, stride=stride, padding=padding)
J2_sum = conv_fn(J2, sum_filt, stride=stride, padding=padding)
IJ_sum = conv_fn(IJ, sum_filt, stride=stride, padding=padding)
win_size = np.prod(win)
u_I = I_sum / win_size
u_J = J_sum / win_size
cross = IJ_sum - u_J * I_sum - u_I * J_sum + u_I * u_J * win_size
I_var = I2_sum - 2 * u_I * I_sum + u_I * u_I * win_size
J_var = J2_sum - 2 * u_J * J_sum + u_J * u_J * win_size
cc = cross * cross / (I_var * J_var + 1e-5)
return cc
#return torch.mean(cc)
class NCC_conv(MapMetricWrapper):
"""
Local (over window) normalized cross correlation loss.
"""
def __init__(self, metric_name: str = "NCC_conv", metric_func: Callable = _ncc_conv, select_key: Union[List, str] = None,
scale_metric: float = 1,
average_method: str = None, save_in_subject_keys: bool = False, metric_kwargs: dict =
{"win": None}, **kwargs):
super(NCC_conv, self).__init__(metric_name=metric_name, metric_func=metric_func, select_key=select_key,
scale_metric=scale_metric, average_method=average_method,
save_in_subject_keys=save_in_subject_keys, metric_kwargs=metric_kwargs, **kwargs)
| [
"torch.flatten",
"torch.ones",
"torch.mean",
"torch.dot",
"torch.norm",
"math.floor",
"torch.std",
"numpy.prod"
] | [((946, 962), 'torch.flatten', 'torch.flatten', (['x'], {}), '(x)\n', (959, 962), False, 'import torch\n'), ((971, 987), 'torch.flatten', 'torch.flatten', (['y'], {}), '(y)\n', (984, 987), False, 'import torch\n'), ((1002, 1015), 'torch.mean', 'torch.mean', (['x'], {}), '(x)\n', (1012, 1015), False, 'import torch\n'), ((1029, 1042), 'torch.mean', 'torch.mean', (['y'], {}), '(y)\n', (1039, 1042), False, 'import torch\n'), ((1325, 1341), 'torch.flatten', 'torch.flatten', (['x'], {}), '(x)\n', (1338, 1341), False, 'import torch\n'), ((1350, 1366), 'torch.flatten', 'torch.flatten', (['y'], {}), '(y)\n', (1363, 1366), False, 'import torch\n'), ((2781, 2805), 'torch.ones', 'torch.ones', (['[1, 1, *win]'], {}), '([1, 1, *win])\n', (2791, 2805), False, 'import torch\n'), ((2920, 2942), 'math.floor', 'math.floor', (['(win[0] / 2)'], {}), '(win[0] / 2)\n', (2930, 2942), False, 'import math\n'), ((3628, 3640), 'numpy.prod', 'np.prod', (['win'], {}), '(win)\n', (3635, 3640), True, 'import numpy as np\n'), ((1124, 1141), 'torch.norm', 'torch.norm', (['xm', '(2)'], {}), '(xm, 2)\n', (1134, 1141), False, 'import torch\n'), ((1144, 1161), 'torch.norm', 'torch.norm', (['ym', '(2)'], {}), '(ym, 2)\n', (1154, 1161), False, 'import torch\n'), ((1489, 1501), 'torch.std', 'torch.std', (['x'], {}), '(x)\n', (1498, 1501), False, 'import torch\n'), ((1504, 1516), 'torch.std', 'torch.std', (['y'], {}), '(y)\n', (1513, 1516), False, 'import torch\n'), ((559, 572), 'torch.norm', 'torch.norm', (['x'], {}), '(x)\n', (569, 572), False, 'import torch\n'), ((580, 593), 'torch.norm', 'torch.norm', (['y'], {}), '(y)\n', (590, 593), False, 'import torch\n'), ((696, 709), 'torch.norm', 'torch.norm', (['x'], {}), '(x)\n', (706, 709), False, 'import torch\n'), ((726, 739), 'torch.norm', 'torch.norm', (['y'], {}), '(y)\n', (736, 739), False, 'import torch\n'), ((819, 848), 'torch.dot', 'torch.dot', (['x_normed', 'y_normed'], {}), '(x_normed, y_normed)\n', (828, 848), False, 'import torch\n')] |
import numpy as np
import scipy.linalg as linalg
class RadialLevelSetTopology(object):
"""
References:
Radial basis functions and level set method for structural topology
optimization, by <NAME> and <NAME>, in Numerical Methods in
Engineering Vol. 65(12) 2005.
Level-set methods for structural topology optimization: a review, by van
<NAME>.; <NAME>.; <NAME>. & <NAME>., in Structural and
Multidisciplinary Optimization, 2013, 48, 437-472
"""
def __init__(self, nknx, nkny, nelx, nely, a, b, mq_basis=True):
"""
Knots are the location at which the level-set interpolation function is
defined exactly. The level-set interpolation function is defined by a
uniformly spaced rectangular grid of knots.
:param nknx: The number of knots in the x-direction.
:param nkny: The number of knots in the y-direction.
:param nelx: The number of elements in the x-direction.
:param nely: The number of elements in the y-direction.
:param a: Half the width of an element in metres.
:param b: Half the length of an element in metres.
mq_basis : bool
If true mq_spline basis functions are employed in the level set
formulation. Else gaussian basis functions are employed.
"""
#self._cparam = c_param
self._dim_elems = (nelx, nely)
self._dim_knots = (nknx, nkny)
self._a = a * 1e6
self._b = b * 1e6
self._topology = None
# Initialise the coordinates of the knots and elements
self._init_knot_coords()
self._init_elem_coords()
basis = self._mq_spline if mq_basis is True else self._gaussian
# Initialise H matrix.
pmat = self._aux(self._xcoords, self._ycoords)
amat = basis(self._xcoords, self._ycoords)
row1 = np.hstack((amat, pmat))
row2 = np.hstack((pmat.T, np.zeros((3, 3))))
self._hmat = np.vstack((row1, row2))
# Initialise G matrix.
pmat = self._aux(self._xelems, self._yelems)
amat = basis(self._xelems, self._yelems)
self._gmat = np.hstack((amat, pmat))
@property
def topology(self):
return self._topology
@property
def ind_size(self):
return self._dim_knots[0] * self._dim_knots[1]
def update_topology(self, xs):
"""
This method takes the design variables of the structure to produce the
discretized topology that is used for finite element modeling.
:param xs: 1d ndarray containing the interpolation data values at the
knot locations.
"""
xs_col = np.atleast_2d(xs).T
f = np.vstack((xs_col, np.zeros((3, 1))))
alpha = linalg.solve(self._hmat, f)
topology = self._direct_mapping(alpha)
self._topology = topology
def _mq_spline(self, x, y):
"""
Evaluates the basis functions for points (x,y).
:param x: 1d ndarray with the x-coordinates of points to evaluate
the basis functions at.
:param y: 1d ndarray with the y-coordinates of points to evaluate
the basis functions at.
:returns: A matrix with the evaluations of the basis functions. The
rows are evaluated for each (x,y) pair and the columns are
associated with each basis function.
"""
# Put the corresponding (x,y) values and basis function parameters
# into a matrix form.
n_vals = x.shape[0]
n_basis = self._xcoords.shape[0]
xmat = np.tile(np.atleast_2d(x).T, (1, n_basis))
ximat = np.tile(self._xcoords, (n_vals, 1))
ymat = np.tile(np.atleast_2d(y).T, (1, n_basis))
yimat = np.tile(self._ycoords, (n_vals, 1))
# Evaluate the basis functions.
norm_squared = (xmat - ximat) ** 2 + (ymat - yimat) ** 2
return np.sqrt(norm_squared + self._cparam ** 2)
def _gaussian(self, x, y):
"""
Evaluates the basis functions for points (x,y).
:param x: 1d ndarray with the x-coordinates of points to evaluate
the basis functions at.
:param y: 1d ndarray with the y-coordinates of points to evaluate
the basis functions at.
:returns: A matrix with the evaluations of the basis functions. The
rows are evaluated for each (x,y) pair and the columns are
associated with each basis function.
"""
# Put the corresponding (x,y) values and basis function parameters
# into a matrix form.
n_vals = x.shape[0]
n_basis = self._xcoords.shape[0]
xmat = np.tile(np.atleast_2d(x).T, (1, n_basis))
ximat = np.tile(self._xcoords, (n_vals, 1))
ymat = np.tile(np.atleast_2d(y).T, (1, n_basis))
yimat = np.tile(self._ycoords, (n_vals, 1))
# Evaluate the basis functions.
norm_squared = (xmat - ximat) ** 2 + (ymat - yimat) ** 2
return np.exp(norm_squared / (self._dparam ** 2))
def _aux(self, x, y):
a = np.ones((x.shape[0], 1))
b = np.atleast_2d(x).T
c = np.atleast_2d(y).T
pmat = np.hstack((a, b, c))
return pmat
def _direct_mapping(self, alpha):
"""
The simplest way to map the level-set function to a discretized
topology is to take the level-set function value at the center of the
element and compare it to the threshold to detect whether the element
is void or solid.
"""
lsf = self._gmat @ alpha
lsf_2d = lsf.reshape(self._dim_elems, order='F')
topology = lsf_2d < 0
return topology
def _init_knot_coords(self):
"""
Calculate the coordinates of the knots.
"""
nelx, nely = self._dim_elems
nknx, nkny = self._dim_knots
dx = 2 * self._a * nelx / (nknx + 1)
dy = 2 * self._b * nely / (nkny + 1)
xc = np.linspace(dx, nknx*dx, nknx)
yc = np.linspace(dy, nkny*dy, nkny)
xv, yv = np.meshgrid(xc, yc)
self._xcoords = xv.ravel()
self._ycoords = yv.ravel()
self._dparam = min((dx, dy))
self._cparam = 1 / min((dx, dy))
def _init_elem_coords(self):
"""
Computes the center coordinate of the elements in the finite elemement
mesh.
"""
nelx, nely = self._dim_elems
dx = 2 * self._a
dy = 2 * self._b
xc = np.arange(self._a, nelx*dx, dx)
yc = np.arange(self._b, nely*dy, dy)
xv, yv = np.meshgrid(xc, yc)
self._xelems = xv.ravel()
self._yelems = yv.ravel()
| [
"scipy.linalg.solve",
"numpy.atleast_2d",
"numpy.meshgrid",
"numpy.zeros",
"numpy.ones",
"numpy.hstack",
"numpy.arange",
"numpy.tile",
"numpy.exp",
"numpy.linspace",
"numpy.vstack",
"numpy.sqrt"
] | [((1961, 1984), 'numpy.hstack', 'np.hstack', (['(amat, pmat)'], {}), '((amat, pmat))\n', (1970, 1984), True, 'import numpy as np\n'), ((2059, 2082), 'numpy.vstack', 'np.vstack', (['(row1, row2)'], {}), '((row1, row2))\n', (2068, 2082), True, 'import numpy as np\n'), ((2246, 2269), 'numpy.hstack', 'np.hstack', (['(amat, pmat)'], {}), '((amat, pmat))\n', (2255, 2269), True, 'import numpy as np\n'), ((2906, 2933), 'scipy.linalg.solve', 'linalg.solve', (['self._hmat', 'f'], {}), '(self._hmat, f)\n', (2918, 2933), True, 'import scipy.linalg as linalg\n'), ((3863, 3898), 'numpy.tile', 'np.tile', (['self._xcoords', '(n_vals, 1)'], {}), '(self._xcoords, (n_vals, 1))\n', (3870, 3898), True, 'import numpy as np\n'), ((3972, 4007), 'numpy.tile', 'np.tile', (['self._ycoords', '(n_vals, 1)'], {}), '(self._ycoords, (n_vals, 1))\n', (3979, 4007), True, 'import numpy as np\n'), ((4137, 4178), 'numpy.sqrt', 'np.sqrt', (['(norm_squared + self._cparam ** 2)'], {}), '(norm_squared + self._cparam ** 2)\n', (4144, 4178), True, 'import numpy as np\n'), ((5022, 5057), 'numpy.tile', 'np.tile', (['self._xcoords', '(n_vals, 1)'], {}), '(self._xcoords, (n_vals, 1))\n', (5029, 5057), True, 'import numpy as np\n'), ((5131, 5166), 'numpy.tile', 'np.tile', (['self._ycoords', '(n_vals, 1)'], {}), '(self._ycoords, (n_vals, 1))\n', (5138, 5166), True, 'import numpy as np\n'), ((5296, 5336), 'numpy.exp', 'np.exp', (['(norm_squared / self._dparam ** 2)'], {}), '(norm_squared / self._dparam ** 2)\n', (5302, 5336), True, 'import numpy as np\n'), ((5396, 5420), 'numpy.ones', 'np.ones', (['(x.shape[0], 1)'], {}), '((x.shape[0], 1))\n', (5403, 5420), True, 'import numpy as np\n'), ((5498, 5518), 'numpy.hstack', 'np.hstack', (['(a, b, c)'], {}), '((a, b, c))\n', (5507, 5518), True, 'import numpy as np\n'), ((6322, 6354), 'numpy.linspace', 'np.linspace', (['dx', '(nknx * dx)', 'nknx'], {}), '(dx, nknx * dx, nknx)\n', (6333, 6354), True, 'import numpy as np\n'), ((6366, 6398), 'numpy.linspace', 'np.linspace', (['dy', '(nkny * dy)', 'nkny'], {}), '(dy, nkny * dy, nkny)\n', (6377, 6398), True, 'import numpy as np\n'), ((6414, 6433), 'numpy.meshgrid', 'np.meshgrid', (['xc', 'yc'], {}), '(xc, yc)\n', (6425, 6433), True, 'import numpy as np\n'), ((6851, 6884), 'numpy.arange', 'np.arange', (['self._a', '(nelx * dx)', 'dx'], {}), '(self._a, nelx * dx, dx)\n', (6860, 6884), True, 'import numpy as np\n'), ((6896, 6929), 'numpy.arange', 'np.arange', (['self._b', '(nely * dy)', 'dy'], {}), '(self._b, nely * dy, dy)\n', (6905, 6929), True, 'import numpy as np\n'), ((6945, 6964), 'numpy.meshgrid', 'np.meshgrid', (['xc', 'yc'], {}), '(xc, yc)\n', (6956, 6964), True, 'import numpy as np\n'), ((2820, 2837), 'numpy.atleast_2d', 'np.atleast_2d', (['xs'], {}), '(xs)\n', (2833, 2837), True, 'import numpy as np\n'), ((5433, 5449), 'numpy.atleast_2d', 'np.atleast_2d', (['x'], {}), '(x)\n', (5446, 5449), True, 'import numpy as np\n'), ((5464, 5480), 'numpy.atleast_2d', 'np.atleast_2d', (['y'], {}), '(y)\n', (5477, 5480), True, 'import numpy as np\n'), ((2019, 2035), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (2027, 2035), True, 'import numpy as np\n'), ((2871, 2887), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (2879, 2887), True, 'import numpy as np\n'), ((3813, 3829), 'numpy.atleast_2d', 'np.atleast_2d', (['x'], {}), '(x)\n', (3826, 3829), True, 'import numpy as np\n'), ((3922, 3938), 'numpy.atleast_2d', 'np.atleast_2d', (['y'], {}), '(y)\n', (3935, 3938), True, 'import numpy as np\n'), ((4972, 4988), 'numpy.atleast_2d', 'np.atleast_2d', (['x'], {}), '(x)\n', (4985, 4988), True, 'import numpy as np\n'), ((5081, 5097), 'numpy.atleast_2d', 'np.atleast_2d', (['y'], {}), '(y)\n', (5094, 5097), True, 'import numpy as np\n')] |
from ipycanvas import MultiCanvas, hold_canvas
from IPython import display as ipydisp
import numpy as np
from time import sleep
class _State():
'''Data structure to store Turtle's state'''
def __init__(self):
canvas_size = 300
self.speed = 4
self.angular_speed_multiplier = 2
self.animation_freq = 1/50
self.canvas = MultiCanvas(2, width=canvas_size, height=canvas_size)
self.bg = self.canvas[0]
self.fg = self.canvas[1]
self.bg.fill_style = 'lightgray'
self.fg.fill_style = 'red'
_s = _State()
def reset():
'''Clear the drawing and return Turtle to the center'''
_s.x = _s.canvas.width / 2
_s.y = _s.x
_s.a = 0
_s.bg.fill_rect(0, 0, _s.canvas.width)
_draw_turtle()
def speed(s):
'''Set Turtle's speed to s (must be strictly positive)'''
_s.speed = s
def display_turtle():
'''Show a canvas for drawing'''
reset()
ipydisp.display(_s.canvas)
def right(n):
'''Turn Turtle n degrees to right'''
num_steps = int(np.ceil(np.abs(n) / (_s.speed * _s.angular_speed_multiplier)))
ns = np.linspace(0, n, num_steps + 1)
dns = ns[1:] - ns[:-1]
for dn in dns:
with hold_canvas(_s.canvas):
_s.a += dn
_s.a %= 360
_draw_turtle()
sleep(_s.animation_freq)
def left(n):
'''Turn Turtle n degrees to left'''
right(-n)
def forward(n):
'''Move Turtle forward by n units'''
angle = np.deg2rad(_s.a)
num_steps = int(np.ceil(np.abs(n) / _s.speed))
ns = np.linspace(0, n, num_steps + 1)
dns = ns[1:] - ns[:-1]
dx = np.cos(angle)
dy = np.sin(angle)
for dn in dns:
x, y = dn*dx + _s.x, dn*dy + _s.y
with hold_canvas(_s.canvas):
_s.bg.stroke_line(_s.x, _s.y, x, y)
_s.x, _s.y = x, y
_draw_turtle()
sleep(_s.animation_freq)
def backward(n):
'''Move Turtle backward by n units'''
forward(-n)
## Drawing the Turtle
def _rot_mat(theta):
'''Rotation matrix'''
return np.array([
[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)],
])
# Vertices forming the triangle depicting the Turtle
_vertices = np.array([
[-1, 1],
[-1, -1],
[ 2, 0],
])
def _draw_turtle():
scale = 4
p = np.array([_s.x, _s.y])
angle = np.deg2rad(_s.a)
R = _rot_mat(angle)
ws = []
for v in _vertices:
ws.append(scale * R @ v + p)
_s.fg.clear()
_s.fg.fill_polygon(ws)
| [
"ipycanvas.MultiCanvas",
"numpy.abs",
"ipycanvas.hold_canvas",
"numpy.deg2rad",
"IPython.display.display",
"time.sleep",
"numpy.sin",
"numpy.array",
"numpy.linspace",
"numpy.cos"
] | [((2217, 2254), 'numpy.array', 'np.array', (['[[-1, 1], [-1, -1], [2, 0]]'], {}), '([[-1, 1], [-1, -1], [2, 0]])\n', (2225, 2254), True, 'import numpy as np\n'), ((950, 976), 'IPython.display.display', 'ipydisp.display', (['_s.canvas'], {}), '(_s.canvas)\n', (965, 976), True, 'from IPython import display as ipydisp\n'), ((1125, 1157), 'numpy.linspace', 'np.linspace', (['(0)', 'n', '(num_steps + 1)'], {}), '(0, n, num_steps + 1)\n', (1136, 1157), True, 'import numpy as np\n'), ((1490, 1506), 'numpy.deg2rad', 'np.deg2rad', (['_s.a'], {}), '(_s.a)\n', (1500, 1506), True, 'import numpy as np\n'), ((1567, 1599), 'numpy.linspace', 'np.linspace', (['(0)', 'n', '(num_steps + 1)'], {}), '(0, n, num_steps + 1)\n', (1578, 1599), True, 'import numpy as np\n'), ((1636, 1649), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1642, 1649), True, 'import numpy as np\n'), ((1659, 1672), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1665, 1672), True, 'import numpy as np\n'), ((2316, 2338), 'numpy.array', 'np.array', (['[_s.x, _s.y]'], {}), '([_s.x, _s.y])\n', (2324, 2338), True, 'import numpy as np\n'), ((2351, 2367), 'numpy.deg2rad', 'np.deg2rad', (['_s.a'], {}), '(_s.a)\n', (2361, 2367), True, 'import numpy as np\n'), ((366, 419), 'ipycanvas.MultiCanvas', 'MultiCanvas', (['(2)'], {'width': 'canvas_size', 'height': 'canvas_size'}), '(2, width=canvas_size, height=canvas_size)\n', (377, 419), False, 'from ipycanvas import MultiCanvas, hold_canvas\n'), ((1323, 1347), 'time.sleep', 'sleep', (['_s.animation_freq'], {}), '(_s.animation_freq)\n', (1328, 1347), False, 'from time import sleep\n'), ((1884, 1908), 'time.sleep', 'sleep', (['_s.animation_freq'], {}), '(_s.animation_freq)\n', (1889, 1908), False, 'from time import sleep\n'), ((1217, 1239), 'ipycanvas.hold_canvas', 'hold_canvas', (['_s.canvas'], {}), '(_s.canvas)\n', (1228, 1239), False, 'from ipycanvas import MultiCanvas, hold_canvas\n'), ((1747, 1769), 'ipycanvas.hold_canvas', 'hold_canvas', (['_s.canvas'], {}), '(_s.canvas)\n', (1758, 1769), False, 'from ipycanvas import MultiCanvas, hold_canvas\n'), ((1061, 1070), 'numpy.abs', 'np.abs', (['n'], {}), '(n)\n', (1067, 1070), True, 'import numpy as np\n'), ((1535, 1544), 'numpy.abs', 'np.abs', (['n'], {}), '(n)\n', (1541, 1544), True, 'import numpy as np\n'), ((2079, 2092), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2085, 2092), True, 'import numpy as np\n'), ((2112, 2125), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2118, 2125), True, 'import numpy as np\n'), ((2128, 2141), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2134, 2141), True, 'import numpy as np\n'), ((2095, 2108), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2101, 2108), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import os
import numpy as np
import cv2
import imgproc
from bisect import bisect_right as upper_bound
from PIL import Image
import pytesseract
import statistics
def ocr(image):
try:
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# write the grayscale image to disk as a temporary file so we can
# apply OCR to it
filename = "{}.png".format(os.getpid())
cv2.imwrite(filename, gray)
# load the image as a PIL/Pillow image, apply OCR, and then delete
# the temporary file
text = pytesseract.image_to_string(Image.open(filename))
os.remove(filename)
except:
text = ""
return text
# borrowed from https://github.com/lengstrom/fast-style-transfer/blob/master/src/utils.py
def get_files(img_dir):
imgs, masks, xmls = list_files(img_dir)
return imgs, masks, xmls
def list_files(in_path):
img_files = []
mask_files = []
gt_files = []
for (dirpath, dirnames, filenames) in os.walk(in_path):
for file in filenames:
filename, ext = os.path.splitext(file)
ext = str.lower(ext)
if ext == '.jpg' or ext == '.jpeg' or ext == '.gif' or ext == '.png' or ext == '.pgm':
img_files.append(os.path.join(dirpath, file))
elif ext == '.bmp':
mask_files.append(os.path.join(dirpath, file))
elif ext == '.xml' or ext == '.gt' or ext == '.txt':
gt_files.append(os.path.join(dirpath, file))
elif ext == '.zip':
continue
# img_files.sort()
# mask_files.sort()
# gt_files.sort()
return img_files, mask_files, gt_files
def binaryMedian(m, r, d):
for i in range(0,r):
m[i].sort()
mi = m[0][0]
mx = 0
for i in range(r):
if m[i][0] < mi:
mi = m[i][0]
if m[i][d-1] > mx :
mx = m[i][d-1]
desired = (r * d + 1) // 2
while (mi < mx):
mid = mi + (mx - mi) // 2
place = [0];
# Find count of elements smaller than mid
for i in range(r):
j = upper_bound(m[i], mid)
place[0] = place[0] + j
if place[0] < desired:
mi = mid + 1
else:
mx = mid
return mi
def drawboxes(frame, img, boxes, texts, depth_img, verticals=None, dirname='./result/'):
""" save text detection result one by one
Args:
img_file (str): image file name
img (array): raw image context
boxes (array): array of result file
Shape: [num_detections, 4] for BB output / [num_detections, 4] for QUAD output
Return:
None
"""
img = np.array(img)
f = 0
for i, box in enumerate(boxes):
f = f+1
poly = np.array(box).astype(np.int32).reshape((-1))
poly = poly.reshape(-1, 2)
mask = np.zeros(img.shape[0:2], dtype=np.uint8)
cv2.drawContours(mask, [poly], -1, (255, 255, 255), -1, cv2.LINE_AA)
res = cv2.bitwise_and(img,img,mask = mask)
rect = cv2.boundingRect(poly) # returns (x,y,w,h) of the rect
cropped = res[rect[1]: rect[1] + rect[3], rect[0]: rect[0] + rect[2]]
depth_mask = np.zeros(depth_img.shape[0:2], dtype=np.uint8)
cv2.drawContours(depth_mask, [poly], -1, (255, 255, 255), -1, cv2.LINE_AA)
depth_res = cv2.bitwise_and(depth_img, depth_img, mask = depth_mask)
depth_cropped = depth_res[rect[1]: rect[1] + rect[3], rect[0]: rect[0] + rect[2]]
try:
texts.append(ocr(cropped))
except:
continue
avg_depth = 0
# cv2.imshow("output", cropped)
# cv2.waitKey(20)
res_file = dirname + str(frame) + "_" + "textno" + str(f) + '.jpg'
cv2.imwrite(res_file, cropped)
res_file = dirname + str(frame) + "_" + "textno" + str(f) + '.txt'
grid = np.asarray(depth_cropped)
r = grid.shape[0]
c = grid.shape[1]
median = binaryMedian(grid, r, c)
print(median)
res = 0
cnt = 0
for i in range(0,r):
for j in range(0,c):
if abs(median-grid[i][j]) <= 50:
res += grid[i][j]
cnt = cnt+1
res /= cnt
file1 = open(res_file,"w")
file1.write(str(res))
print(res)
return img
def saveResult(char_boxes, img_file, img, boxes, dirname='./result/', verticals=None, texts=None):
""" save text detection result one by one
Args:
img_file (str): image file name
img (array): raw image context
boxes (array): array of result file
Shape: [num_detections, 4] for BB output / [num_detections, 4] for QUAD output
Return:
None
"""
img = np.array(img)
# make result file list
filename, file_ext = os.path.splitext(os.path.basename(img_file))
# result directory
res_file = dirname + "res_" + filename + '.txt'
if(char_boxes==True):
res_img_file = dirname + "cboxes_" + filename + '.jpg'
else:
res_img_file = dirname + "wboxes_" + filename + '.jpg'
if not os.path.isdir(dirname):
os.mkdir(dirname)
with open(res_file, 'w') as f:
for i, box in enumerate(boxes):
poly = np.array(box).astype(np.int32).reshape((-1))
strResult = ','.join([str(p) for p in poly]) + '\r\n'
f.write(strResult)
poly = poly.reshape(-1, 2)
cv2.polylines(img, [poly.reshape((-1, 1, 2))], True, color=(0, 0, 255), thickness=2)
ptColor = (0, 255, 255)
if verticals is not None:
if verticals[i]:
ptColor = (255, 0, 0)
if texts is not None:
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.5
cv2.putText(img, "{}".format(texts[i]), (poly[0][0]+1, poly[0][1]+1), font, font_scale, (0, 0, 0), thickness=1)
cv2.putText(img, "{}".format(texts[i]), tuple(poly[0]), font, font_scale, (0, 255, 255), thickness=1)
# Save result image
cv2.imwrite(res_img_file, img)
| [
"os.mkdir",
"os.remove",
"os.getpid",
"cv2.bitwise_and",
"os.path.basename",
"cv2.cvtColor",
"cv2.imwrite",
"numpy.asarray",
"os.walk",
"numpy.zeros",
"os.path.isdir",
"bisect.bisect_right",
"PIL.Image.open",
"numpy.array",
"os.path.splitext",
"cv2.drawContours",
"cv2.boundingRect",
... | [((1019, 1035), 'os.walk', 'os.walk', (['in_path'], {}), '(in_path)\n', (1026, 1035), False, 'import os\n'), ((2764, 2777), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2772, 2777), True, 'import numpy as np\n'), ((5074, 5087), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (5082, 5087), True, 'import numpy as np\n'), ((6522, 6552), 'cv2.imwrite', 'cv2.imwrite', (['res_img_file', 'img'], {}), '(res_img_file, img)\n', (6533, 6552), False, 'import cv2\n'), ((226, 265), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (238, 265), False, 'import cv2\n'), ((433, 460), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'gray'], {}), '(filename, gray)\n', (444, 460), False, 'import cv2\n'), ((639, 658), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (648, 658), False, 'import os\n'), ((2976, 3016), 'numpy.zeros', 'np.zeros', (['img.shape[0:2]'], {'dtype': 'np.uint8'}), '(img.shape[0:2], dtype=np.uint8)\n', (2984, 3016), True, 'import numpy as np\n'), ((3030, 3098), 'cv2.drawContours', 'cv2.drawContours', (['mask', '[poly]', '(-1)', '(255, 255, 255)', '(-1)', 'cv2.LINE_AA'], {}), '(mask, [poly], -1, (255, 255, 255), -1, cv2.LINE_AA)\n', (3046, 3098), False, 'import cv2\n'), ((3117, 3153), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'img'], {'mask': 'mask'}), '(img, img, mask=mask)\n', (3132, 3153), False, 'import cv2\n'), ((3173, 3195), 'cv2.boundingRect', 'cv2.boundingRect', (['poly'], {}), '(poly)\n', (3189, 3195), False, 'import cv2\n'), ((3336, 3382), 'numpy.zeros', 'np.zeros', (['depth_img.shape[0:2]'], {'dtype': 'np.uint8'}), '(depth_img.shape[0:2], dtype=np.uint8)\n', (3344, 3382), True, 'import numpy as np\n'), ((3396, 3470), 'cv2.drawContours', 'cv2.drawContours', (['depth_mask', '[poly]', '(-1)', '(255, 255, 255)', '(-1)', 'cv2.LINE_AA'], {}), '(depth_mask, [poly], -1, (255, 255, 255), -1, cv2.LINE_AA)\n', (3412, 3470), False, 'import cv2\n'), ((3495, 3549), 'cv2.bitwise_and', 'cv2.bitwise_and', (['depth_img', 'depth_img'], {'mask': 'depth_mask'}), '(depth_img, depth_img, mask=depth_mask)\n', (3510, 3549), False, 'import cv2\n'), ((3954, 3984), 'cv2.imwrite', 'cv2.imwrite', (['res_file', 'cropped'], {}), '(res_file, cropped)\n', (3965, 3984), False, 'import cv2\n'), ((4083, 4108), 'numpy.asarray', 'np.asarray', (['depth_cropped'], {}), '(depth_cropped)\n', (4093, 4108), True, 'import numpy as np\n'), ((5167, 5193), 'os.path.basename', 'os.path.basename', (['img_file'], {}), '(img_file)\n', (5183, 5193), False, 'import os\n'), ((5482, 5504), 'os.path.isdir', 'os.path.isdir', (['dirname'], {}), '(dirname)\n', (5495, 5504), False, 'import os\n'), ((5518, 5535), 'os.mkdir', 'os.mkdir', (['dirname'], {}), '(dirname)\n', (5526, 5535), False, 'import os\n'), ((412, 423), 'os.getpid', 'os.getpid', ([], {}), '()\n', (421, 423), False, 'import os\n'), ((609, 629), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (619, 629), False, 'from PIL import Image\n'), ((1096, 1118), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (1112, 1118), False, 'import os\n'), ((2156, 2178), 'bisect.bisect_right', 'upper_bound', (['m[i]', 'mid'], {}), '(m[i], mid)\n', (2167, 2178), True, 'from bisect import bisect_right as upper_bound\n'), ((1284, 1311), 'os.path.join', 'os.path.join', (['dirpath', 'file'], {}), '(dirpath, file)\n', (1296, 1311), False, 'import os\n'), ((1379, 1406), 'os.path.join', 'os.path.join', (['dirpath', 'file'], {}), '(dirpath, file)\n', (1391, 1406), False, 'import os\n'), ((2873, 2886), 'numpy.array', 'np.array', (['box'], {}), '(box)\n', (2881, 2886), True, 'import numpy as np\n'), ((1505, 1532), 'os.path.join', 'os.path.join', (['dirpath', 'file'], {}), '(dirpath, file)\n', (1517, 1532), False, 'import os\n'), ((5643, 5656), 'numpy.array', 'np.array', (['box'], {}), '(box)\n', (5651, 5656), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from ..utils.convert import wh_to_xy
from ..utils.summary_stats import accuracy, accuracy_3d, summarize_accuracy
def evaluate_accuracy(df, df_gt, dist_thr, return_full=False):
df_gt = wh_to_xy(df_gt)
cols = ['x1', 'y1', 'x2', 'y2']
accuracy_df = []
for image_id in df_gt['image_id'].unique():
bboxes = df[df['image_id'] == image_id][cols].values
gt_boxes = np.array(df_gt[df_gt['image_id'] == image_id][cols].values)
accuracy_df.append(accuracy(bboxes, gt_boxes, image_id=image_id, dist_thr=dist_thr))
stats = summarize_accuracy(accuracy_df)
if return_full:
return stats, pd.concat(accuracy_df, ignore_index=True)
else:
return stats
def evaluate_accuracy_3d(df, df_gt, dist_thr, return_full=False):
cols = ['z', 'y', 'x']
accuracy_df = []
for image_id in df_gt['image_id'].unique():
centr = df[df['image_id'] == image_id][cols].values
gt_centr = np.array(df_gt[df_gt['image_id'] == image_id][cols].values)
accuracy_df.append(accuracy_3d(centr, gt_centr, image_id=image_id, dist_thr=dist_thr))
stats = summarize_accuracy(accuracy_df)
if return_full:
return stats, pd.concat(accuracy_df, ignore_index=True)
else:
return stats | [
"numpy.array",
"pandas.concat"
] | [((432, 491), 'numpy.array', 'np.array', (["df_gt[df_gt['image_id'] == image_id][cols].values"], {}), "(df_gt[df_gt['image_id'] == image_id][cols].values)\n", (440, 491), True, 'import numpy as np\n'), ((988, 1047), 'numpy.array', 'np.array', (["df_gt[df_gt['image_id'] == image_id][cols].values"], {}), "(df_gt[df_gt['image_id'] == image_id][cols].values)\n", (996, 1047), True, 'import numpy as np\n'), ((671, 712), 'pandas.concat', 'pd.concat', (['accuracy_df'], {'ignore_index': '(True)'}), '(accuracy_df, ignore_index=True)\n', (680, 712), True, 'import pandas as pd\n'), ((1229, 1270), 'pandas.concat', 'pd.concat', (['accuracy_df'], {'ignore_index': '(True)'}), '(accuracy_df, ignore_index=True)\n', (1238, 1270), True, 'import pandas as pd\n')] |
import os
import argparse
from tqdm import tqdm
from lib import vasp
from lib.preprocessing import interpolate, interpolate_normalize
from lib import fake
import numpy as np
from annoy import AnnoyIndex
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument('--width', type=float, default=.4, help='sliding window width')
parser.add_argument('--dimensions', type=int, default=16, help='number of data points for each band')
parser.add_argument('--stride', type=int, default=1, help='number of data points to slide each window (1 means zero datapoints are skipped)')
parser.add_argument('--trees', type=int, default=10, help='number of trees in annoy index')
opt = parser.parse_args()
print(opt)
# Annoy index stores the electronic band structure with each vector corresponding to a sliding window.
annoyindex = AnnoyIndex(int(2*opt.dimensions), metric='angular')
# A lookuptable is necessary to link annoy vectors to their correct material and k-space position.
lookuptable = []
k_strided = [fake.k[i] for i in range(0, len(fake.k), opt.stride)]
for window_left in k_strided:
window_right = window_left + opt.width
if window_right > np.max(fake.k):
break
selection = ((fake.k >= window_left) & (fake.k <= window_right))
window_k = fake.k[selection]
window_band_l = fake.E_lower[selection]
window_band_u = fake.E_upper[selection]
window_size = np.max(window_k) - np.min(window_k)
gap = np.min(window_band_u) - np.max(window_band_l)
window_band_l = interpolate_normalize(window_k, window_band_l, opt.dimensions)
window_band_u = interpolate_normalize(window_k, window_band_u, opt.dimensions)
#plt.plot(window_band_l)
#plt.plot(window_band_u)
#plt.title('k =' + str(window_left) + ' gap = '+ str( gap))
#plt.show()
annoyindex.add_item(len(lookuptable), np.concatenate([window_band_l, window_band_u]))
lookuptable.append([window_left, gap])
annoyindex.build(opt.trees)
annoyindex.save('index_test.ann')
np.save('lookuptable_test', lookuptable)
| [
"numpy.save",
"argparse.ArgumentParser",
"lib.preprocessing.interpolate_normalize",
"numpy.max",
"numpy.min",
"numpy.concatenate"
] | [((246, 271), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (269, 271), False, 'import argparse\n'), ((2013, 2053), 'numpy.save', 'np.save', (['"""lookuptable_test"""', 'lookuptable'], {}), "('lookuptable_test', lookuptable)\n", (2020, 2053), True, 'import numpy as np\n'), ((1531, 1593), 'lib.preprocessing.interpolate_normalize', 'interpolate_normalize', (['window_k', 'window_band_l', 'opt.dimensions'], {}), '(window_k, window_band_l, opt.dimensions)\n', (1552, 1593), False, 'from lib.preprocessing import interpolate, interpolate_normalize\n'), ((1614, 1676), 'lib.preprocessing.interpolate_normalize', 'interpolate_normalize', (['window_k', 'window_band_u', 'opt.dimensions'], {}), '(window_k, window_band_u, opt.dimensions)\n', (1635, 1676), False, 'from lib.preprocessing import interpolate, interpolate_normalize\n'), ((1180, 1194), 'numpy.max', 'np.max', (['fake.k'], {}), '(fake.k)\n', (1186, 1194), True, 'import numpy as np\n'), ((1418, 1434), 'numpy.max', 'np.max', (['window_k'], {}), '(window_k)\n', (1424, 1434), True, 'import numpy as np\n'), ((1437, 1453), 'numpy.min', 'np.min', (['window_k'], {}), '(window_k)\n', (1443, 1453), True, 'import numpy as np\n'), ((1465, 1486), 'numpy.min', 'np.min', (['window_band_u'], {}), '(window_band_u)\n', (1471, 1486), True, 'import numpy as np\n'), ((1489, 1510), 'numpy.max', 'np.max', (['window_band_l'], {}), '(window_band_l)\n', (1495, 1510), True, 'import numpy as np\n'), ((1859, 1905), 'numpy.concatenate', 'np.concatenate', (['[window_band_l, window_band_u]'], {}), '([window_band_l, window_band_u])\n', (1873, 1905), True, 'import numpy as np\n')] |
from dataclasses import dataclass
from astropy import units as un
from astropy.coordinates import SkyCoord, EarthLocation, AltAz, Angle
import numpy as np
from scipy.special import j1
import scipy.constants as const
import scipy.signal as sig
from astroplan import Observer
from vipy.simulation.utils import single_occurance, get_pairs
from vipy.layouts import layouts
import torch
import itertools
import time as t
import numexpr as ne # fast exponential
from einsumt import einsumt as einsum
@dataclass
class Baselines:
name: [str]
st1: [object]
st2: [object]
u: [float]
v: [float]
w: [float]
valid: [bool]
def __getitem__(self, i):
baseline = Baseline(
self.name[i],
self.st1[i],
self.st2[i],
self.u[i],
self.v[i],
self.w[i],
self.valid[i],
)
return baseline
def add(self, baselines):
self.name = np.concatenate([self.name, baselines.name])
self.st1 = np.concatenate([self.st1, baselines.st1])
self.st2 = np.concatenate([self.st2, baselines.st2])
self.u = np.concatenate([self.u, baselines.u])
self.v = np.concatenate([self.v, baselines.v])
self.w = np.concatenate([self.w, baselines.w])
self.valid = np.concatenate([self.valid, baselines.valid])
@dataclass
class Baseline:
name: str
st1: object
st2: object
u: float
v: float
w: float
valid: bool
def baselineNum(self):
return 256 * (self.st1.st_num + 1) + self.st2.st_num + 1
def get_baselines(src_crd, time, array_layout):
"""Calculates baselines from source coordinates and time of observation for
every antenna station in array_layout.
Parameters
----------
src_crd : astropy SkyCoord object
ra and dec of source location / pointing center
time : w time object
time of observation
array_layout : dataclass object
station information
Returns
-------
dataclass object
baselines between telescopes with visinility flags
"""
# Calculate for all times
# calculate GHA, Greenwich as reference for EHT
ha_all = Angle(
[t.sidereal_time("apparent", "greenwich") - src_crd.ra for t in time]
)
# calculate elevations
el_st_all = src_crd.transform_to(
AltAz(
obstime=time.reshape(len(time), -1),
location=EarthLocation.from_geocentric(
np.repeat([array_layout.x], len(time), axis=0),
np.repeat([array_layout.y], len(time), axis=0),
np.repeat([array_layout.z], len(time), axis=0),
unit=un.m,
),
)
).alt.degree
# fails for 1 timestep
assert len(ha_all.value) == len(el_st_all)
# always the same
delta_x, delta_y, delta_z = get_pairs(array_layout)
indices = single_occurance(delta_x)
delta_x = delta_x[indices]
delta_y = delta_y[indices]
delta_z = delta_z[indices]
mask = [i * len(array_layout.x) + i for i in range(len(array_layout.x))]
pairs = np.delete(
np.array(np.meshgrid(array_layout.name, array_layout.name)).T.reshape(-1, 2),
mask,
axis=0,
)[indices]
st_nums = np.delete(
np.array(np.meshgrid(array_layout.st_num, array_layout.st_num)).T.reshape(
-1, 2
),
mask,
axis=0,
)[indices]
els_low = np.delete(
np.array(np.meshgrid(array_layout.el_low, array_layout.el_low)).T.reshape(
-1, 2
),
mask,
axis=0,
)[indices]
els_high = np.delete(
np.array(np.meshgrid(array_layout.el_high, array_layout.el_high)).T.reshape(
-1, 2
),
mask,
axis=0,
)[indices]
# Loop over ha and el_st
baselines = Baselines([], [], [], [], [], [], [])
for ha, el_st in zip(ha_all, el_st_all):
u = np.sin(ha) * delta_x + np.cos(ha) * delta_y
v = (
-np.sin(src_crd.ra) * np.cos(ha) * delta_x
+ np.sin(src_crd.ra) * np.sin(ha) * delta_y
+ np.cos(src_crd.ra) * delta_z
)
w = (
np.cos(src_crd.ra) * np.cos(ha) * delta_x
- np.cos(src_crd.ra) * np.sin(ha) * delta_y
+ np.sin(src_crd.ra) * delta_z
)
assert u.shape == v.shape == w.shape
els_st = np.delete(
np.array(np.meshgrid(el_st, el_st)).T.reshape(-1, 2),
mask,
axis=0,
)[indices]
valid = np.ones(u.shape).astype(bool)
m1 = (els_st < els_low).any(axis=1)
m2 = (els_st > els_high).any(axis=1)
valid_mask = np.ma.mask_or(m1, m2)
valid[valid_mask] = False
names = pairs[:, 0] + "-" + pairs[:, 1]
u = u.reshape(-1)
v = v.reshape(-1)
w = w.reshape(-1)
valid = valid.reshape(-1)
# collect baselines
base = Baselines(
names,
array_layout[st_nums[:, 0]],
array_layout[st_nums[:, 1]],
u,
v,
w,
valid,
)
baselines.add(base)
return baselines
def rd_grid(fov, samples, src_crd):
"""Calculates RA and Dec values for a given fov around a source position
Parameters
----------
fov : float
FOV size
samples : int
number of pixels
src_crd : astropy SkyCoord
position of source
Returns
-------
3d array
Returns a 3d array with every pixel containing a RA and Dec value
"""
res = fov/samples
rd_grid = np.zeros((samples,samples,2))
for i in range(samples):
rd_grid[i,:,0] = np.array([(i-samples/2)*res + src_crd.ra.rad for i in range(samples)])
rd_grid[:,i,1] = np.array([-(i-samples/2)*res + src_crd.dec.rad for i in range(samples)])
return rd_grid
def lm_grid(rd_grid, src_crd):
"""Calculates sine projection for fov
Parameters
----------
rd_grid : 3d array
array containing a RA and Dec value in every pixel
src_crd : astropy SkyCoord
source position
Returns
-------
3d array
Returns a 3d array with every pixel containing a l and m value
"""
lm_grid = np.zeros(rd_grid.shape)
lm_grid[:,:,0] = np.cos(rd_grid[:,:,1]) * np.sin(rd_grid[:,:,0] - src_crd.ra.rad)
lm_grid[:,:,1] = np.sin(rd_grid[:,:,1]) * np.cos(src_crd.dec.rad) - np.cos(src_crd.dec.rad) * np.sin(src_crd.dec.rad) * np.cos(rd_grid[:,:,0] - src_crd.ra.rad)
return lm_grid
def uncorrupted(lm, baselines, wave, time, src_crd, array_layout, I):
"""Calculates uncorrupted visibility
Parameters
----------
lm : 3d array
every pixel containing a l and m value
baselines : dataclass
baseline information
wave : float
wavelength of observation
time : astropy Time
Time steps of observation
src_crd : astropy SkyCoord
source position
array_layout : dataclass
station information
I : 2d array
source brightness distribution / input img
Returns
-------
4d array
Returns visibility for every lm and baseline
"""
stat_num = array_layout.st_num.shape[0]
base_num = int(stat_num * (stat_num - 1) / 2)
vectorized_num = np.vectorize(lambda st: st.st_num, otypes=[int])
st1, st2 = get_valid_baselines(baselines, base_num)
st1_num = vectorized_num(st1)
if st1_num.shape[0] == 0:
return torch.zeros(1)
K = getK(baselines, lm, wave, base_num)
B = np.zeros((lm.shape[0], lm.shape[1], 2, 2), dtype=complex)
B[:,:,0,0] = I[:,:,0]+I[:,:,1]
B[:,:,0,1] = I[:,:,2]+1j*I[:,:,3]
B[:,:,1,0] = I[:,:,2]-1j*I[:,:,3]
B[:,:,1,1] = I[:,:,0]-I[:,:,1]
X = torch.einsum('lmij,lmb->lmbij', torch.tensor(B), K)
return X
def corrupted(lm, baselines, wave, time, src_crd, array_layout, I, rd):
"""Calculates corrupted visibility
Parameters
----------
lm : 3d array
every pixel containing a l and m value
baselines : dataclass
baseline information
wave : float
wavelength of observation
time : astropy Time
Time steps of observation
src_crd : astropy SkyCoord
source position
array_layout : dataclass
station information
I : 2d array
source brightness distribution / input img
rd : 3d array
RA and dec values for every pixel
Returns
-------
4d array
Returns visibility for every lm and baseline
"""
stat_num = array_layout.st_num.shape[0]
base_num = int(stat_num * (stat_num - 1) / 2)
vectorized_num = np.vectorize(lambda st: st.st_num, otypes=[int])
st1, st2 = get_valid_baselines(baselines, base_num)
st1_num = vectorized_num(st1)
st2_num = vectorized_num(st2)
if st1_num.shape[0] == 0:
return torch.zeros(1)
K = getK(baselines, lm, wave, base_num)
B = np.zeros((lm.shape[0], lm.shape[1], 2, 2), dtype=complex)
B[:,:,0,0] = I[:,:,0]+I[:,:,1]
B[:,:,0,1] = I[:,:,2]+1j*I[:,:,3]
B[:,:,1,0] = I[:,:,2]-1j*I[:,:,3]
B[:,:,1,1] = I[:,:,0]-I[:,:,1]
# coherency
X = torch.einsum('lmij,lmb->lmbij', torch.tensor(B), K)
# X = np.einsum('lmij,lmb->lmbij', B, K, optimize=True)
# X = torch.tensor(B)[:,:,None,:,:] * K[:,:,:,None,None]
del K
# telescope response
E_st = getE(rd, array_layout, wave, src_crd)
# E1 = torch.tensor(E_st[:, :, st1_num, :, :], dtype=torch.cdouble)
# E2 = torch.tensor(E_st[:, :, st2_num, :, :], dtype=torch.cdouble)
E1 = torch.tensor(E_st[:, :, st1_num], dtype=torch.cdouble)
E2 = torch.tensor(E_st[:, :, st2_num], dtype=torch.cdouble)
EX = torch.einsum('lmb,lmbij->lmbij',E1,X)
del E1, X
# EXE = torch.einsum('lmbij,lmbjk->lmbik',EX,torch.transpose(torch.conj(E2),3,4))
EXE = torch.einsum('lmbij,lmb->lmbij',EX,E2)
del EX, E2
# P matrix
# parallactic angle
beta = np.array(
[
Observer(
EarthLocation(st.x * un.m, st.y * un.m, st.z * un.m)
).parallactic_angle(time, src_crd)
for st in array_layout
]
)
tsob = time_step_of_baseline(baselines, base_num)
b1 = np.array([beta[st1_num[i], tsob[i]] for i in range(st1_num.shape[0])])
b2 = np.array([beta[st2_num[i], tsob[i]] for i in range(st2_num.shape[0])])
P1 = torch.tensor(getP(b1),dtype=torch.cdouble)
P2 = torch.tensor(getP(b2),dtype=torch.cdouble)
PEXE = torch.einsum('bij,lmbjk->lmbik',P1,EXE)
del EXE
PEXEP = torch.einsum('lmbij,bjk->lmbik',PEXE,torch.transpose(torch.conj(P2),1,2))
del PEXE
return PEXEP
def direction_independent(lm, baselines, wave, time, src_crd, array_layout, I, rd):
"""Calculates direction independet visibility
Parameters
----------
lm : 3d array
every pixel containing a l and m value
baselines : dataclass
baseline information
wave : float
wavelength of observation
time : astropy Time
Time steps of observation
src_crd : astropy SkyCoord
source position
array_layout : dataclass
station information
I : 2d array
source brightness distribution / input img
rd : 3d array
RA and dec values for every pixel
Returns
-------
4d array
Returns visibility for every lm and baseline
"""
stat_num = array_layout.st_num.shape[0]
base_num = int(stat_num * (stat_num - 1) / 2)
vectorized_num = np.vectorize(lambda st: st.st_num, otypes=[int])
st1, st2 = get_valid_baselines(baselines, base_num)
st1_num = vectorized_num(st1)
st2_num = vectorized_num(st2)
if st1_num.shape[0] == 0:
return torch.zeros(1)
K = getK(baselines, lm, wave, base_num)
B = np.zeros((lm.shape[0], lm.shape[1], 2, 2), dtype=complex)
B[:,:,0,0] = I[:,:,0]+I[:,:,1]
B[:,:,0,1] = I[:,:,2]+1j*I[:,:,3]
B[:,:,1,0] = I[:,:,2]-1j*I[:,:,3]
B[:,:,1,1] = I[:,:,0]-I[:,:,1]
# coherency
X = torch.einsum('lmij,lmb->lmbij', torch.tensor(B), K)
del K
# telescope response
E_st = getE(rd, array_layout, wave, src_crd)
E1 = torch.tensor(E_st[:, :, st1_num], dtype=torch.cdouble)
E2 = torch.tensor(E_st[:, :, st2_num], dtype=torch.cdouble)
EX = torch.einsum('lmb,lmbij->lmbij',E1,X)
del E1, X
EXE = torch.einsum('lmbij,lmb->lmbij',EX,E2)
del EX, E2
return EXE
def integrate(X1, X2):
"""Summation over l and m and avering over time and freq
Parameters
----------
X1 : 4d array
visibility for every l,m and baseline for freq1
X2 : 4d array
visibility for every l,m and baseline for freq2
Returns
-------
2d array
Returns visibility for every baseline
"""
X_f = torch.stack((X1, X2))
int_m = torch.sum(X_f,dim=2)
del X_f
int_l = torch.sum(int_m,dim=1)
del int_m
int_f = 0.5*torch.sum(int_l, dim=0)
del int_l
X_t = torch.stack(torch.split(int_f, int(int_f.shape[0] / 2), dim=0))
del int_f
int_t = 0.5*torch.sum(X_t, dim=0)
del X_t
return int_t
def getE(rd, array_layout, wave, src_crd):
"""Calculates Jones matrix E for every pixel in lm grid and every station given.
Parameters
----------
lm : 2d array
lm grid for FOV
array_layout : dataclass object
station information
wave : float
wavelenght
Returns
-------
5d array
Returns Jones matrix for every pixel in lm grid and every station.
Shape is given by lm-grid axes, station axis, and (2,2) Jones matrix axes
"""
# calculate matrix E for every point in grid
# E = np.zeros((rd.shape[0], rd.shape[1], array_layout.st_num.shape[0], 2, 2))
E = np.zeros((rd.shape[0], rd.shape[1], array_layout.st_num.shape[0]))
# get diameters of all stations and do vectorizing stuff
diameters = array_layout.diam
theta = angularDistance(rd,src_crd)
x = 2*np.pi/wave * np.einsum('s,rd->rds',diameters,theta)
E[:,:,:] = jinc(x)
# E[:,:,:,0,0] = jinc(x)
# E[..., 1, 1] = E[..., 0, 0]
return E
def angularDistance(rd, src_crd):
"""Calculates angular distance from source position
Parameters
----------
rd : 3d array
every pixel containing ra and dec
src_crd : astropy SkyCoord
source position
Returns
-------
2d array
Returns angular Distance for every pixel in rd grid with respect to source position
"""
r = rd[:,:,0] - src_crd.ra.rad
d = rd[:,:,1] - src_crd.dec.rad
theta = np.arcsin(np.sqrt(r**2+d**2))
return theta
def getP(beta):
"""Calculates Jones matrix P for given parallactic angles beta
Parameters
----------
beta : float array
parallactic angles
Returns
-------
3d array
Return Jones matrix for every angle.
Shape is given by beta axis and (2,2) Jones matrix axes
"""
# calculate matrix P with parallactic angle beta
P = np.zeros((beta.shape[0], 2, 2))
P[:, 0, 0] = np.cos(beta)
P[:, 0, 1] = -np.sin(beta)
P[:, 1, 0] = np.sin(beta)
P[:, 1, 1] = np.cos(beta)
return P
def getK(baselines, lm, wave, base_num):
"""Calculates Fouriertransformation Kernel for every baseline and pixel in lm grid.
Parameters
----------
baselines : dataclass object
basline information
lm : 2d array
lm grid for FOV
wave : float
wavelength
Returns
-------
3d array
Return Fourier Kernel for every pixel in lm grid and given baselines.
Shape is given by lm axes and baseline axis
"""
# new valid baseline calculus. for details see function get_valid_baselines()
valid = baselines.valid.reshape(-1, base_num)
mask = np.array(valid[:-1]).astype(bool) & np.array(valid[1:]).astype(bool)
u = baselines.u.reshape(-1, base_num) / wave
v = baselines.v.reshape(-1, base_num) / wave
w = baselines.w.reshape(-1, base_num) / wave
u_start = u[:-1][mask]
u_stop = u[1:][mask]
v_start = v[:-1][mask]
v_stop = v[1:][mask]
w_start = w[:-1][mask]
w_stop = w[1:][mask]
u_cmplt = np.append(u_start, u_stop)
v_cmplt = np.append(v_start, v_stop)
w_cmplt = np.append(w_start, w_stop)
l = torch.tensor(lm[:, :, 0])
m = torch.tensor(lm[:, :, 1])
n = torch.sqrt(1-l**2-m**2)
ul = torch.einsum("b,ij->ijb", torch.tensor(u_cmplt), l)
vm = torch.einsum("b,ij->ijb", torch.tensor(v_cmplt), m)
wn = torch.einsum('b,ij->ijb', torch.tensor(w_cmplt), (n-1))
pi = np.pi
test = ul + vm + wn
K = ne.evaluate('exp(-2 * pi * 1j * (ul + vm + wn))') #-0.4 secs for vlba
return torch.tensor(K)
def jinc(x):
"""Create jinc function.
Parameters
----------
x : array
value of (?)
Returns
-------
array
value of jinc function at x
"""
# if x == 0:
# return 1
# jinc = 2 * j1(x) / x
# jinc[x == 0] = 1
jinc = np.ones(x.shape)
jinc[x != 0] = 2 * j1(x[x != 0]) / x[x != 0]
return jinc
def get_valid_baselines(baselines, base_num):
"""Calculates all valid baselines. This depens on the baselines that are visible at start and stop times
Parameters
----------
baselines : dataclass object
baseline spec
base_num : number of all baselines per time step
N*(N-1)/2
Returns
-------
2 1d arrays
Returns valid stations for every baselines as array
"""
# reshape valid mask to (time, total baselines per time)
valid = baselines.valid.reshape(-1, base_num)
# generate a mask to only take baselines that are visible at start and stop time
# example: telescope is visible at time t_0 but not visible at time t_1, therefore throw away baseline
# this is checked for every pair of time: t_0-t_1, t_1-t_2,...
# t_0<-mask[0]->t_1, t_1<-mask[1]->t_2,...
mask = np.array(valid[:-1]).astype(bool) & np.array(valid[1:]).astype(bool)
# reshape stations to apply mask
st1 = baselines.st1.reshape(-1, base_num)
st2 = baselines.st2.reshape(-1, base_num)
# apply mask
# bas_stx[:-1][mask] gives all start stx
# bas_stx[1:][mask] gives all stop stx
st1_start = st1[:-1][mask]
st1_stop = st1[1:][mask]
st2_start = st2[:-1][mask]
st2_stop = st2[1:][mask]
st1_cmplt = np.append(st1_start, st1_stop)
st2_cmplt = np.append(st2_start, st2_stop)
return st1_cmplt, st2_cmplt
def time_step_of_baseline(baselines, base_num):
"""Calculates the time step for every valid baseline
Parameters
----------
baselines : dataclass object
baseline specs
base_num : number of all baselines per time step
N*(N-1)/2
Returns
-------
1d array
Return array with every time step repeated N times, where N is the number of valid baselines per time step
"""
# reshape valid mask to (time, total baselines per time)
valid = baselines.valid.reshape(-1, base_num)
# generate a mask to only take baselines that are visible at start and stop time
# example: telescope is visible at time t_0 but not visible at time t_1, therefore throw away baseline
# this is checked for every pair of time: t_0-t_1, t_1-t_2,...
# t_0<-mask[0]->t_1, t_1<-mask[1]->t_2,...
mask = np.array(valid[:-1]).astype(bool) & np.array(valid[1:]).astype(bool)
# DIFFERENCE TO get_valid_baselines
# calculate sum over axis 1 to get number of valid baselines at each time step
valid_per_step = np.sum(mask, axis=1)
# write time for every valid basline into list and reshape
time_step = [[t_idx] * vps for t_idx, vps in enumerate(valid_per_step)]
time_step = np.array(list(itertools.chain(*time_step)))
time_step = np.append(time_step, time_step + 1) # +1???
return time_step
| [
"numpy.sum",
"torch.sqrt",
"numpy.einsum",
"numpy.ones",
"numpy.sin",
"numpy.meshgrid",
"numexpr.evaluate",
"numpy.append",
"torch.zeros",
"itertools.chain",
"vipy.simulation.utils.single_occurance",
"numpy.vectorize",
"time.sidereal_time",
"torch.einsum",
"astropy.coordinates.EarthLocat... | [((2863, 2886), 'vipy.simulation.utils.get_pairs', 'get_pairs', (['array_layout'], {}), '(array_layout)\n', (2872, 2886), False, 'from vipy.simulation.utils import single_occurance, get_pairs\n'), ((2901, 2926), 'vipy.simulation.utils.single_occurance', 'single_occurance', (['delta_x'], {}), '(delta_x)\n', (2917, 2926), False, 'from vipy.simulation.utils import single_occurance, get_pairs\n'), ((5632, 5663), 'numpy.zeros', 'np.zeros', (['(samples, samples, 2)'], {}), '((samples, samples, 2))\n', (5640, 5663), True, 'import numpy as np\n'), ((6279, 6302), 'numpy.zeros', 'np.zeros', (['rd_grid.shape'], {}), '(rd_grid.shape)\n', (6287, 6302), True, 'import numpy as np\n'), ((7347, 7395), 'numpy.vectorize', 'np.vectorize', (['(lambda st: st.st_num)'], {'otypes': '[int]'}), '(lambda st: st.st_num, otypes=[int])\n', (7359, 7395), True, 'import numpy as np\n'), ((7602, 7659), 'numpy.zeros', 'np.zeros', (['(lm.shape[0], lm.shape[1], 2, 2)'], {'dtype': 'complex'}), '((lm.shape[0], lm.shape[1], 2, 2), dtype=complex)\n', (7610, 7659), True, 'import numpy as np\n'), ((8717, 8765), 'numpy.vectorize', 'np.vectorize', (['(lambda st: st.st_num)'], {'otypes': '[int]'}), '(lambda st: st.st_num, otypes=[int])\n', (8729, 8765), True, 'import numpy as np\n'), ((9007, 9064), 'numpy.zeros', 'np.zeros', (['(lm.shape[0], lm.shape[1], 2, 2)'], {'dtype': 'complex'}), '((lm.shape[0], lm.shape[1], 2, 2), dtype=complex)\n', (9015, 9064), True, 'import numpy as np\n'), ((9654, 9708), 'torch.tensor', 'torch.tensor', (['E_st[:, :, st1_num]'], {'dtype': 'torch.cdouble'}), '(E_st[:, :, st1_num], dtype=torch.cdouble)\n', (9666, 9708), False, 'import torch\n'), ((9718, 9772), 'torch.tensor', 'torch.tensor', (['E_st[:, :, st2_num]'], {'dtype': 'torch.cdouble'}), '(E_st[:, :, st2_num], dtype=torch.cdouble)\n', (9730, 9772), False, 'import torch\n'), ((9784, 9823), 'torch.einsum', 'torch.einsum', (['"""lmb,lmbij->lmbij"""', 'E1', 'X'], {}), "('lmb,lmbij->lmbij', E1, X)\n", (9796, 9823), False, 'import torch\n'), ((9933, 9973), 'torch.einsum', 'torch.einsum', (['"""lmbij,lmb->lmbij"""', 'EX', 'E2'], {}), "('lmbij,lmb->lmbij', EX, E2)\n", (9945, 9973), False, 'import torch\n'), ((10580, 10621), 'torch.einsum', 'torch.einsum', (['"""bij,lmbjk->lmbik"""', 'P1', 'EXE'], {}), "('bij,lmbjk->lmbik', P1, EXE)\n", (10592, 10621), False, 'import torch\n'), ((11606, 11654), 'numpy.vectorize', 'np.vectorize', (['(lambda st: st.st_num)'], {'otypes': '[int]'}), '(lambda st: st.st_num, otypes=[int])\n', (11618, 11654), True, 'import numpy as np\n'), ((11896, 11953), 'numpy.zeros', 'np.zeros', (['(lm.shape[0], lm.shape[1], 2, 2)'], {'dtype': 'complex'}), '((lm.shape[0], lm.shape[1], 2, 2), dtype=complex)\n', (11904, 11953), True, 'import numpy as np\n'), ((12280, 12334), 'torch.tensor', 'torch.tensor', (['E_st[:, :, st1_num]'], {'dtype': 'torch.cdouble'}), '(E_st[:, :, st1_num], dtype=torch.cdouble)\n', (12292, 12334), False, 'import torch\n'), ((12344, 12398), 'torch.tensor', 'torch.tensor', (['E_st[:, :, st2_num]'], {'dtype': 'torch.cdouble'}), '(E_st[:, :, st2_num], dtype=torch.cdouble)\n', (12356, 12398), False, 'import torch\n'), ((12410, 12449), 'torch.einsum', 'torch.einsum', (['"""lmb,lmbij->lmbij"""', 'E1', 'X'], {}), "('lmb,lmbij->lmbij', E1, X)\n", (12422, 12449), False, 'import torch\n'), ((12474, 12514), 'torch.einsum', 'torch.einsum', (['"""lmbij,lmb->lmbij"""', 'EX', 'E2'], {}), "('lmbij,lmb->lmbij', EX, E2)\n", (12486, 12514), False, 'import torch\n'), ((12910, 12931), 'torch.stack', 'torch.stack', (['(X1, X2)'], {}), '((X1, X2))\n', (12921, 12931), False, 'import torch\n'), ((12945, 12966), 'torch.sum', 'torch.sum', (['X_f'], {'dim': '(2)'}), '(X_f, dim=2)\n', (12954, 12966), False, 'import torch\n'), ((12990, 13013), 'torch.sum', 'torch.sum', (['int_m'], {'dim': '(1)'}), '(int_m, dim=1)\n', (12999, 13013), False, 'import torch\n'), ((13885, 13951), 'numpy.zeros', 'np.zeros', (['(rd.shape[0], rd.shape[1], array_layout.st_num.shape[0])'], {}), '((rd.shape[0], rd.shape[1], array_layout.st_num.shape[0]))\n', (13893, 13951), True, 'import numpy as np\n'), ((15143, 15174), 'numpy.zeros', 'np.zeros', (['(beta.shape[0], 2, 2)'], {}), '((beta.shape[0], 2, 2))\n', (15151, 15174), True, 'import numpy as np\n'), ((15193, 15205), 'numpy.cos', 'np.cos', (['beta'], {}), '(beta)\n', (15199, 15205), True, 'import numpy as np\n'), ((15254, 15266), 'numpy.sin', 'np.sin', (['beta'], {}), '(beta)\n', (15260, 15266), True, 'import numpy as np\n'), ((15284, 15296), 'numpy.cos', 'np.cos', (['beta'], {}), '(beta)\n', (15290, 15296), True, 'import numpy as np\n'), ((16324, 16350), 'numpy.append', 'np.append', (['u_start', 'u_stop'], {}), '(u_start, u_stop)\n', (16333, 16350), True, 'import numpy as np\n'), ((16365, 16391), 'numpy.append', 'np.append', (['v_start', 'v_stop'], {}), '(v_start, v_stop)\n', (16374, 16391), True, 'import numpy as np\n'), ((16406, 16432), 'numpy.append', 'np.append', (['w_start', 'w_stop'], {}), '(w_start, w_stop)\n', (16415, 16432), True, 'import numpy as np\n'), ((16442, 16467), 'torch.tensor', 'torch.tensor', (['lm[:, :, 0]'], {}), '(lm[:, :, 0])\n', (16454, 16467), False, 'import torch\n'), ((16476, 16501), 'torch.tensor', 'torch.tensor', (['lm[:, :, 1]'], {}), '(lm[:, :, 1])\n', (16488, 16501), False, 'import torch\n'), ((16510, 16541), 'torch.sqrt', 'torch.sqrt', (['(1 - l ** 2 - m ** 2)'], {}), '(1 - l ** 2 - m ** 2)\n', (16520, 16541), False, 'import torch\n'), ((16783, 16832), 'numexpr.evaluate', 'ne.evaluate', (['"""exp(-2 * pi * 1j * (ul + vm + wn))"""'], {}), "('exp(-2 * pi * 1j * (ul + vm + wn))')\n", (16794, 16832), True, 'import numexpr as ne\n'), ((16864, 16879), 'torch.tensor', 'torch.tensor', (['K'], {}), '(K)\n', (16876, 16879), False, 'import torch\n'), ((17166, 17182), 'numpy.ones', 'np.ones', (['x.shape'], {}), '(x.shape)\n', (17173, 17182), True, 'import numpy as np\n'), ((18543, 18573), 'numpy.append', 'np.append', (['st1_start', 'st1_stop'], {}), '(st1_start, st1_stop)\n', (18552, 18573), True, 'import numpy as np\n'), ((18590, 18620), 'numpy.append', 'np.append', (['st2_start', 'st2_stop'], {}), '(st2_start, st2_stop)\n', (18599, 18620), True, 'import numpy as np\n'), ((19724, 19744), 'numpy.sum', 'np.sum', (['mask'], {'axis': '(1)'}), '(mask, axis=1)\n', (19730, 19744), True, 'import numpy as np\n'), ((19961, 19996), 'numpy.append', 'np.append', (['time_step', '(time_step + 1)'], {}), '(time_step, time_step + 1)\n', (19970, 19996), True, 'import numpy as np\n'), ((955, 998), 'numpy.concatenate', 'np.concatenate', (['[self.name, baselines.name]'], {}), '([self.name, baselines.name])\n', (969, 998), True, 'import numpy as np\n'), ((1018, 1059), 'numpy.concatenate', 'np.concatenate', (['[self.st1, baselines.st1]'], {}), '([self.st1, baselines.st1])\n', (1032, 1059), True, 'import numpy as np\n'), ((1079, 1120), 'numpy.concatenate', 'np.concatenate', (['[self.st2, baselines.st2]'], {}), '([self.st2, baselines.st2])\n', (1093, 1120), True, 'import numpy as np\n'), ((1138, 1175), 'numpy.concatenate', 'np.concatenate', (['[self.u, baselines.u]'], {}), '([self.u, baselines.u])\n', (1152, 1175), True, 'import numpy as np\n'), ((1193, 1230), 'numpy.concatenate', 'np.concatenate', (['[self.v, baselines.v]'], {}), '([self.v, baselines.v])\n', (1207, 1230), True, 'import numpy as np\n'), ((1248, 1285), 'numpy.concatenate', 'np.concatenate', (['[self.w, baselines.w]'], {}), '([self.w, baselines.w])\n', (1262, 1285), True, 'import numpy as np\n'), ((1307, 1352), 'numpy.concatenate', 'np.concatenate', (['[self.valid, baselines.valid]'], {}), '([self.valid, baselines.valid])\n', (1321, 1352), True, 'import numpy as np\n'), ((4698, 4719), 'numpy.ma.mask_or', 'np.ma.mask_or', (['m1', 'm2'], {}), '(m1, m2)\n', (4711, 4719), True, 'import numpy as np\n'), ((6324, 6348), 'numpy.cos', 'np.cos', (['rd_grid[:, :, 1]'], {}), '(rd_grid[:, :, 1])\n', (6330, 6348), True, 'import numpy as np\n'), ((6349, 6390), 'numpy.sin', 'np.sin', (['(rd_grid[:, :, 0] - src_crd.ra.rad)'], {}), '(rd_grid[:, :, 0] - src_crd.ra.rad)\n', (6355, 6390), True, 'import numpy as np\n'), ((7533, 7547), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (7544, 7547), False, 'import torch\n'), ((7848, 7863), 'torch.tensor', 'torch.tensor', (['B'], {}), '(B)\n', (7860, 7863), False, 'import torch\n'), ((8935, 8949), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (8946, 8949), False, 'import torch\n'), ((9269, 9284), 'torch.tensor', 'torch.tensor', (['B'], {}), '(B)\n', (9281, 9284), False, 'import torch\n'), ((11824, 11838), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (11835, 11838), False, 'import torch\n'), ((12158, 12173), 'torch.tensor', 'torch.tensor', (['B'], {}), '(B)\n', (12170, 12173), False, 'import torch\n'), ((13043, 13066), 'torch.sum', 'torch.sum', (['int_l'], {'dim': '(0)'}), '(int_l, dim=0)\n', (13052, 13066), False, 'import torch\n'), ((13186, 13207), 'torch.sum', 'torch.sum', (['X_t'], {'dim': '(0)'}), '(X_t, dim=0)\n', (13195, 13207), False, 'import torch\n'), ((14113, 14153), 'numpy.einsum', 'np.einsum', (['"""s,rd->rds"""', 'diameters', 'theta'], {}), "('s,rd->rds', diameters, theta)\n", (14122, 14153), True, 'import numpy as np\n'), ((14723, 14747), 'numpy.sqrt', 'np.sqrt', (['(r ** 2 + d ** 2)'], {}), '(r ** 2 + d ** 2)\n', (14730, 14747), True, 'import numpy as np\n'), ((15224, 15236), 'numpy.sin', 'np.sin', (['beta'], {}), '(beta)\n', (15230, 15236), True, 'import numpy as np\n'), ((16574, 16595), 'torch.tensor', 'torch.tensor', (['u_cmplt'], {}), '(u_cmplt)\n', (16586, 16595), False, 'import torch\n'), ((16635, 16656), 'torch.tensor', 'torch.tensor', (['v_cmplt'], {}), '(v_cmplt)\n', (16647, 16656), False, 'import torch\n'), ((16696, 16717), 'torch.tensor', 'torch.tensor', (['w_cmplt'], {}), '(w_cmplt)\n', (16708, 16717), False, 'import torch\n'), ((6410, 6434), 'numpy.sin', 'np.sin', (['rd_grid[:, :, 1]'], {}), '(rd_grid[:, :, 1])\n', (6416, 6434), True, 'import numpy as np\n'), ((6435, 6458), 'numpy.cos', 'np.cos', (['src_crd.dec.rad'], {}), '(src_crd.dec.rad)\n', (6441, 6458), True, 'import numpy as np\n'), ((6513, 6554), 'numpy.cos', 'np.cos', (['(rd_grid[:, :, 0] - src_crd.ra.rad)'], {}), '(rd_grid[:, :, 0] - src_crd.ra.rad)\n', (6519, 6554), True, 'import numpy as np\n'), ((10697, 10711), 'torch.conj', 'torch.conj', (['P2'], {}), '(P2)\n', (10707, 10711), False, 'import torch\n'), ((17206, 17219), 'scipy.special.j1', 'j1', (['x[x != 0]'], {}), '(x[x != 0])\n', (17208, 17219), False, 'from scipy.special import j1\n'), ((19915, 19942), 'itertools.chain', 'itertools.chain', (['*time_step'], {}), '(*time_step)\n', (19930, 19942), False, 'import itertools\n'), ((2215, 2255), 'time.sidereal_time', 't.sidereal_time', (['"""apparent"""', '"""greenwich"""'], {}), "('apparent', 'greenwich')\n", (2230, 2255), True, 'import time as t\n'), ((3944, 3954), 'numpy.sin', 'np.sin', (['ha'], {}), '(ha)\n', (3950, 3954), True, 'import numpy as np\n'), ((3967, 3977), 'numpy.cos', 'np.cos', (['ha'], {}), '(ha)\n', (3973, 3977), True, 'import numpy as np\n'), ((4127, 4145), 'numpy.cos', 'np.cos', (['src_crd.ra'], {}), '(src_crd.ra)\n', (4133, 4145), True, 'import numpy as np\n'), ((4304, 4322), 'numpy.sin', 'np.sin', (['src_crd.ra'], {}), '(src_crd.ra)\n', (4310, 4322), True, 'import numpy as np\n'), ((4557, 4573), 'numpy.ones', 'np.ones', (['u.shape'], {}), '(u.shape)\n', (4564, 4573), True, 'import numpy as np\n'), ((6461, 6484), 'numpy.cos', 'np.cos', (['src_crd.dec.rad'], {}), '(src_crd.dec.rad)\n', (6467, 6484), True, 'import numpy as np\n'), ((6487, 6510), 'numpy.sin', 'np.sin', (['src_crd.dec.rad'], {}), '(src_crd.dec.rad)\n', (6493, 6510), True, 'import numpy as np\n'), ((15935, 15955), 'numpy.array', 'np.array', (['valid[:-1]'], {}), '(valid[:-1])\n', (15943, 15955), True, 'import numpy as np\n'), ((15971, 15990), 'numpy.array', 'np.array', (['valid[1:]'], {}), '(valid[1:])\n', (15979, 15990), True, 'import numpy as np\n'), ((18101, 18121), 'numpy.array', 'np.array', (['valid[:-1]'], {}), '(valid[:-1])\n', (18109, 18121), True, 'import numpy as np\n'), ((18137, 18156), 'numpy.array', 'np.array', (['valid[1:]'], {}), '(valid[1:])\n', (18145, 18156), True, 'import numpy as np\n'), ((19510, 19530), 'numpy.array', 'np.array', (['valid[:-1]'], {}), '(valid[:-1])\n', (19518, 19530), True, 'import numpy as np\n'), ((19546, 19565), 'numpy.array', 'np.array', (['valid[1:]'], {}), '(valid[1:])\n', (19554, 19565), True, 'import numpy as np\n'), ((4036, 4046), 'numpy.cos', 'np.cos', (['ha'], {}), '(ha)\n', (4042, 4046), True, 'import numpy as np\n'), ((4071, 4089), 'numpy.sin', 'np.sin', (['src_crd.ra'], {}), '(src_crd.ra)\n', (4077, 4089), True, 'import numpy as np\n'), ((4092, 4102), 'numpy.sin', 'np.sin', (['ha'], {}), '(ha)\n', (4098, 4102), True, 'import numpy as np\n'), ((4192, 4210), 'numpy.cos', 'np.cos', (['src_crd.ra'], {}), '(src_crd.ra)\n', (4198, 4210), True, 'import numpy as np\n'), ((4213, 4223), 'numpy.cos', 'np.cos', (['ha'], {}), '(ha)\n', (4219, 4223), True, 'import numpy as np\n'), ((4248, 4266), 'numpy.cos', 'np.cos', (['src_crd.ra'], {}), '(src_crd.ra)\n', (4254, 4266), True, 'import numpy as np\n'), ((4269, 4279), 'numpy.sin', 'np.sin', (['ha'], {}), '(ha)\n', (4275, 4279), True, 'import numpy as np\n'), ((10097, 10149), 'astropy.coordinates.EarthLocation', 'EarthLocation', (['(st.x * un.m)', '(st.y * un.m)', '(st.z * un.m)'], {}), '(st.x * un.m, st.y * un.m, st.z * un.m)\n', (10110, 10149), False, 'from astropy.coordinates import SkyCoord, EarthLocation, AltAz, Angle\n'), ((3137, 3186), 'numpy.meshgrid', 'np.meshgrid', (['array_layout.name', 'array_layout.name'], {}), '(array_layout.name, array_layout.name)\n', (3148, 3186), True, 'import numpy as np\n'), ((3294, 3347), 'numpy.meshgrid', 'np.meshgrid', (['array_layout.st_num', 'array_layout.st_num'], {}), '(array_layout.st_num, array_layout.st_num)\n', (3305, 3347), True, 'import numpy as np\n'), ((3477, 3530), 'numpy.meshgrid', 'np.meshgrid', (['array_layout.el_low', 'array_layout.el_low'], {}), '(array_layout.el_low, array_layout.el_low)\n', (3488, 3530), True, 'import numpy as np\n'), ((3661, 3716), 'numpy.meshgrid', 'np.meshgrid', (['array_layout.el_high', 'array_layout.el_high'], {}), '(array_layout.el_high, array_layout.el_high)\n', (3672, 3716), True, 'import numpy as np\n'), ((4015, 4033), 'numpy.sin', 'np.sin', (['src_crd.ra'], {}), '(src_crd.ra)\n', (4021, 4033), True, 'import numpy as np\n'), ((4438, 4463), 'numpy.meshgrid', 'np.meshgrid', (['el_st', 'el_st'], {}), '(el_st, el_st)\n', (4449, 4463), True, 'import numpy as np\n')] |
"""
"""
#| - Import Modules
import os
import sys
# print("import pickle")
import pickle
import copy
# import copy
import numpy as np
import pandas as pd
# import plotly.graph_objects as go
import plotly.graph_objs as go
import plotly.express as px
import scipy.integrate as integrate
from pymatgen_diffusion.aimd.van_hove import RadialDistributionFunction
from pymatgen.io.ase import AseAtomsAdaptor
# #########################################################
from plotting.my_plotly import my_plotly_plot
# from plotting.my_plotly import my_plotly_plot
# #########################################################
from methods import (
get_structure_coord_df,
get_df_coord,
# get_df_dft,
# symmetrize_atoms,
# remove_atoms,
)
#__|
def mean_O_metal_coord(df_coord=None):
"""
"""
#| - mean_O_metal_coord
df_coord_bulk_i = df_coord
df_i = df_coord_bulk_i[df_coord_bulk_i.element == "O"]
def method(row_i, metal_elem=None):
neighbor_count = row_i.neighbor_count
elem_num = neighbor_count.get(metal_elem, None)
return(elem_num)
df_i["num_metal"] = df_i.apply(
method, axis=1,
metal_elem="Ir")
mean_O_metal_coord = df_i.num_metal.mean()
return(mean_O_metal_coord)
#__|
def get_indices_of_neigh(
active_oxy_ind=None,
df_coord_slab_i=None,
metal_atom_symbol=None,
):
"""
Given an index of an active oxygen, use the neighbor list to find the indices of the active metal atom and then the oxygens bound to the active metal atom.
In the future, I will expand this to go to further shells.
"""
#| - get_indices_of_neigh
out_dict = dict()
neighbor_metal_indices = []
neighbor_oxy_indices = []
df_i = df_coord_slab_i[df_coord_slab_i.structure_index == active_oxy_ind]
row_i = df_i.iloc[0]
for nn_i in row_i.nn_info:
elem_i = nn_i["site"].specie.name
if elem_i == metal_atom_symbol:
active_metal_index = nn_i["site_index"]
neighbor_metal_indices.append(active_metal_index)
df_j = df_coord_slab_i[df_coord_slab_i.structure_index == active_metal_index]
row_j = df_j.iloc[0]
for nn_j in row_j.nn_info:
elem_j = nn_j["site"].specie.name
if elem_j == "O":
neighbor_oxy_indices.append(nn_j["site_index"])
# #####################################################
shell_2_metal_atoms = []
for nn_j in row_j.nn_info:
df_k = df_coord_slab_i[df_coord_slab_i.structure_index == nn_j["site_index"]]
row_k = df_k.iloc[0]
for nn_k in row_k.nn_info:
if nn_k["site"].specie.name == "Ir":
shell_2_metal_atoms.append(nn_k["site_index"])
shell_2_metal_atoms = np.sort(list(set(shell_2_metal_atoms)))
shell_2_metal_atoms_2 = []
for i in shell_2_metal_atoms:
if i in neighbor_metal_indices:
pass
else:
shell_2_metal_atoms_2.append(i)
out_dict["neighbor_oxy_indices"] = neighbor_oxy_indices
out_dict["neighbor_metal_indices"] = neighbor_metal_indices
out_dict["shell_2_metal_atoms"] = shell_2_metal_atoms_2
return(out_dict)
#__|
def process_rdf(
atoms=None,
active_site_i=None,
df_coord_slab_i=None,
metal_atom_symbol=None,
custom_name=None,
TEST_MODE=False,
):
"""
"""
#| - process_rdf
if custom_name is None:
custom_name = ""
#| - Create out folders
import os
# directory = "out_data"
directory = os.path.join(
os.environ["PROJ_irox_oer"],
"workflow/enumerate_adsorption",
"out_data")
if not os.path.exists(directory):
os.makedirs(directory)
# assert False, "Fix os.makedirs"
directory = "out_plot/rdf_figures"
if not os.path.exists(directory):
os.makedirs(directory)
directory = "out_data/rdf_data"
if not os.path.exists(directory):
os.makedirs(directory)
#__|
AAA = AseAtomsAdaptor()
slab_structure = AAA.get_structure(atoms)
# Pickling data ###########################################
out_dict = dict()
out_dict["active_site_i"] = active_site_i
out_dict["df_coord_slab_i"] = df_coord_slab_i
out_dict["metal_atom_symbol"] = metal_atom_symbol
import os; import pickle
path_i = os.path.join(
os.environ["HOME"],
"__temp__",
"temp.pickle")
with open(path_i, "wb") as fle:
pickle.dump(out_dict, fle)
# #########################################################
neigh_dict = get_indices_of_neigh(
active_oxy_ind=active_site_i,
df_coord_slab_i=df_coord_slab_i,
metal_atom_symbol=metal_atom_symbol)
neighbor_oxy_indices = neigh_dict["neighbor_oxy_indices"]
neighbor_metal_indices = neigh_dict["neighbor_metal_indices"]
shell_2_metal_atoms = neigh_dict["shell_2_metal_atoms"]
neighbor_indices = neighbor_oxy_indices + neighbor_metal_indices + shell_2_metal_atoms
# neighbor_indices = neighbor_indices[0:1]
# print("neighbor_indices:", neighbor_indices)
#| - Get RDF
RDF = RadialDistributionFunction(
[slab_structure, ],
[active_site_i, ],
neighbor_indices,
# ngrid=1801,
ngrid=4801,
rmax=8.0,
cell_range=2,
# sigma=0.2,
# sigma=0.08,
sigma=0.015,
# sigma=0.008,
# sigma=0.0005,
)
# data_file = "out_data/rdf_data/rdf_out.csv"
data_file = os.path.join(
"out_data/rdf_data",
custom_name + "_" + str(active_site_i).zfill(4) + "_" + "rdf_out.csv")
RDF.export_rdf(data_file)
df_rdf = pd.read_csv(data_file)
df_rdf = df_rdf.rename(columns={" g(r)": "g"})
#__|
#| - Plotting
import plotly.graph_objs as go
x_array = df_rdf.r
# y_array = df_rdf[" g(r)"]
y_array = df_rdf["g"]
trace = go.Scatter(
x=x_array,
y=y_array,
)
data = [trace]
fig = go.Figure(data=data)
# fig.show()
from plotting.my_plotly import my_plotly_plot
if TEST_MODE:
plot_dir = "__temp__"
else:
plot_dir = "rdf_figures"
out_plot_file = os.path.join(
plot_dir,
custom_name + "_" + str(active_site_i).zfill(4) + "_rdf")
my_plotly_plot(
figure=fig,
# plot_name=str(active_site_i).zfill(4) + "_rdf",
plot_name=out_plot_file,
write_html=True,
write_png=False,
png_scale=6.0,
write_pdf=False,
write_svg=False,
try_orca_write=False,
)
#__|
return(df_rdf)
#__|
def compare_rdf_ij(
df_rdf_i=None,
df_rdf_j=None,
):
"""
"""
#| - compare_rdf_ij
df_0 = df_rdf_i
df_1 = df_rdf_j
# df_0 = pd.read_csv(
# "out_data/rdf_data/vtc49sbtxg__011__honorupo_58_0112_rdf_out.csv")
# # df_1 = pd.read_csv("out_data/rdf_data/vtc49sbtxg__011__honorupo_58_0114_rdf_out.csv")
# df_1 = pd.read_csv(
# "out_data/rdf_data/vtc49sbtxg__011__honorupo_58_0105_rdf_out.csv")
df_0 = df_0.rename(
columns={df_0.columns[1]: "g0", })
df_1 = df_1.rename(
columns={df_1.columns[1]: "g1", })
df_0 = df_0.set_index("r")
df_1 = df_1.set_index("r")
norm_0 = integrate.trapz(df_0.g0, x=df_0.index)
df_0["g0_norm"] = df_0.g0 / norm_0
norm_1 = integrate.trapz(df_1.g1, x=df_1.index)
df_1["g1_norm"] = df_1.g1 / norm_1
df_comb = pd.concat([df_0, df_1], axis=1)
df_comb["g_diff"] = df_comb.g1_norm - df_comb.g0_norm
df_comb["g_diff_abs"] = np.abs(df_comb["g_diff"])
#| - Plotting
df_i = df_comb
x_array = df_i.index.tolist()
y_array = df_i.g_diff_abs
trace = go.Scatter(
x=x_array,
y=y_array,
)
data = [trace]
fig = go.Figure(data=data)
# fig.show()
#__|
df_comb_i = df_comb[df_comb.index < 10]
integrated_diff = integrate.trapz(df_comb_i.g_diff_abs, x=df_comb_i.index)
return(integrated_diff)
#__|
def get_all_active_sites(
slab=None,
slab_id=None,
bulk_id=None,
df_coord_slab_i=None,
):
"""
"""
#| - get_all_active_sites
data_dict_i = dict()
#| - Collecting df_coord objects
if df_coord_slab_i is None:
# #####################################################
df_coord_slab_i = get_df_coord(slab_id=slab_id, mode="slab")
df_coord_bulk_i = get_df_coord(bulk_id=bulk_id, mode="bulk")
#__|
# #########################################################
def method(row_i, metal_elem=None):
neighbor_count = row_i.neighbor_count
elem_num = neighbor_count.get(metal_elem, None)
return(elem_num)
df_i = df_coord_bulk_i
df_i["num_metal"] = df_i.apply(
method, axis=1,
metal_elem="Ir")
df_i = df_coord_slab_i
df_i["num_metal"] = df_i.apply(
method, axis=1,
metal_elem="Ir")
# #########################################################
# mean_O_metal_coord = mean_O_metal_coord(df_coord=df_coord_bulk_i)
dz = 4
positions = slab.positions
z_min = np.min(positions[:,2])
z_max = np.max(positions[:,2])
# #########################################################
active_sites = []
for atom in slab:
if atom.symbol == "O":
if atom.position[2] > z_max - dz:
df_row_i = df_coord_slab_i[
df_coord_slab_i.structure_index == atom.index]
df_row_i = df_row_i.iloc[0]
num_metal = df_row_i.num_metal
if num_metal == 1:
active_sites.append(atom.index)
data_dict_i["active_sites"] = active_sites
data_dict_i["num_active_sites"] = len(active_sites)
return(active_sites)
#__|
def get_unique_active_sites(
slab=None,
active_sites=None,
bulk_id=None,
facet=None,
slab_id=None,
metal_atom_symbol=None,
):
"""
"""
#| - get_unique_active_sites
df_coord_slab_i = get_df_coord(slab_id=slab_id, mode="slab")
df_coord_bulk_i = get_df_coord(bulk_id=bulk_id, mode="bulk")
# #########################################################
# active_sites_i = df_active_sites[df_active_sites.slab_id == slab_id]
# active_sites_i = active_sites_i.iloc[0]
#
# active_sites = active_sites_i.active_sites
# #########################################################
custom_name_pre = bulk_id + "__" + facet + "__" + slab_id
df_rdf_dict = dict()
for i in active_sites:
# print(i)
df_rdf_i = process_rdf(
atoms=slab,
active_site_i=i,
df_coord_slab_i=df_coord_slab_i,
metal_atom_symbol=metal_atom_symbol,
custom_name=custom_name_pre,
)
df_rdf_dict[i] = df_rdf_i
# #########################################################
diff_rdf_matrix = np.empty((len(active_sites), len(active_sites), ))
diff_rdf_matrix[:] = np.nan
for i_cnt, active_site_i in enumerate(active_sites):
df_rdf_i = df_rdf_dict[active_site_i]
for j_cnt, active_site_j in enumerate(active_sites):
df_rdf_j = df_rdf_dict[active_site_j]
diff_i = compare_rdf_ij(
df_rdf_i=df_rdf_i,
df_rdf_j=df_rdf_j,
)
diff_rdf_matrix[i_cnt, j_cnt] = diff_i
# #########################################################
df_rdf_ij = pd.DataFrame(diff_rdf_matrix, columns=active_sites)
df_rdf_ij.index = active_sites
# #########################################################
active_sites_cpy = copy.deepcopy(active_sites)
diff_threshold = 0.3
duplicate_active_sites = []
for active_site_i in active_sites:
if active_site_i in duplicate_active_sites:
continue
for active_site_j in active_sites:
if active_site_i == active_site_j:
continue
diff_ij = df_rdf_ij.loc[active_site_i, active_site_j]
if diff_ij < diff_threshold:
try:
active_sites_cpy.remove(active_site_j)
duplicate_active_sites.append(active_site_j)
except:
pass
active_sites_unique = active_sites_cpy
# #########################################################
#| - Plotting heat map
active_sites_str = [str(i) for i in active_sites]
fig = go.Figure(data=go.Heatmap(
z=df_rdf_ij.to_numpy(),
x=active_sites_str,
y=active_sites_str,
# type="category",
))
fig["layout"]["xaxis"]["type"] = "category"
fig["layout"]["yaxis"]["type"] = "category"
directory = "out_plot/rdf_heat_maps_1"
assert False, "Fix os.makedirs"
if not os.path.exists(directory):
os.makedirs(directory)
file_name = "rdf_heat_maps/" + custom_name_pre + "_rdf_diff_heat_map"
my_plotly_plot(
figure=fig,
plot_name=file_name,
write_html=True,
write_png=False,
png_scale=6.0,
write_pdf=False,
write_svg=False,
try_orca_write=False,
)
#__|
return(active_sites_unique)
#__|
def return_modified_rdf(
df_rdf=None,
chunks_to_edit=None,
dx=None,
):
"""
"""
#| - TEMP
df_rdf_j = df_rdf
if type(chunks_to_edit) is not list:
chunks_to_edit = [chunks_to_edit]
# df_rdf_j = df_rdf_j.rename(columns={" g(r)": "g"})
# x-axis spacing of data
dr = df_rdf_j.r.tolist()[1] - df_rdf_j.r.tolist()[0]
df_i = df_rdf_j[df_rdf_j.g > 1e-5]
trace = go.Scatter(
x=df_i.r, y=df_i.g,
mode="markers")
data = [trace]
fig = go.Figure(data=data)
my_plotly_plot(
figure=fig,
plot_name="temp_rds_distr",
write_html=True)
# fig.show()
# chunk_coord_list = []
chunk_start_coords = []
chunk_end_coords = []
row_i = df_i.iloc[0]
chunk_start_coords.append(row_i.r)
for i in range(1, df_i.shape[0] - 1):
# #####################################################
row_i = df_i.iloc[i]
row_ip1 = df_i.iloc[i + 1]
row_im1 = df_i.iloc[i - 1]
# #####################################################
r_i = row_i.r
r_ip1 = row_ip1.r
r_im1 = row_im1.r
# #####################################################
# if i == 0:
# chunk_coord_list.append(r_i)
if r_i - r_im1 > 3 * dr:
chunk_start_coords.append(r_i)
if r_ip1 - r_i > 3 * dr:
chunk_end_coords.append(r_i)
# #########################################################
row_i = df_i.iloc[-1]
chunk_end_coords.append(row_i.r)
chunk_coord_list = []
for i in range(len(chunk_end_coords)):
start_i = chunk_start_coords[i]
end_i = chunk_end_coords[i]
# print(
# str(np.round(start_i, 2)).zfill(5),
# str(np.round(end_i, 2)).zfill(5),
# )
chunk_coord_list.append([
start_i, end_i
])
df_chunks_list = []
for i_cnt, chunk_i in enumerate(chunk_coord_list):
# if i_cnt == chunk_to_edit:
if i_cnt in chunks_to_edit:
if type(dx) == list:
dx_tmp = dx[i_cnt]
else:
dx_tmp = dx
# dx_tmp = dx
else:
dx_tmp = 0
df_j = df_rdf_j[(df_rdf_j.r >= chunk_i[0]) & (df_rdf_j.r <= chunk_i[1])]
df_j.r += dx_tmp
df_chunks_list.append(df_j)
import pandas as pd
df_i = pd.concat(df_chunks_list)
# trace = go.Scatter(
# x=df_i.r, y=df_i.g,
# mode="markers")
# data = [trace]
# fig = go.Figure(data=data)
# # my_plotly_plot(
# # figure=fig,
# # plot_name="temp_rds_distr",
# # write_html=True)
# fig.show()
return(df_i)
#__|
def create_interp_df(df_i, x_combined):
"""
"""
#| - create_interp_df
r_combined = x_combined
# df_i = df_rdf_j
tmp_list = []
data_dict_list = []
for r_i in r_combined:
# print("r_i:", r_i)
data_dict_i = dict()
# #################################################
min_r = df_i.r.min()
max_r = df_i.r.max()
# #################################################
if r_i in df_i.r.tolist():
row_i = df_i[df_i.r == r_i].iloc[0]
g_new = row_i.g
else:
# print(r_i)
# tmp_list.append(r_i)
if (r_i < min_r) or (r_i > max_r):
g_new = 0.
else:
# break
from scipy.interpolate import interp1d
inter_fun = interp1d(
df_i.r, df_i.g,
kind='linear',
axis=-1,
copy=True,
bounds_error=None,
# fill_value=None,
assume_sorted=False,
)
g_new = inter_fun(r_i)
data_dict_i["r"] = r_i
data_dict_i["g"] = g_new
data_dict_list.append(data_dict_i)
df_tmp = pd.DataFrame(data_dict_list)
return(df_tmp)
#__|
def get_unique_active_sites_temp(
slab=None,
active_sites=None,
bulk_id=None,
facet=None,
slab_id=None,
metal_atom_symbol=None,
df_coord_slab_i=None,
create_heatmap_plot=False,
):
"""
"""
#| - get_unique_active_sites
#| - __temp__
import os
import pickle
#__|
if df_coord_slab_i is None:
df_coord_slab_i = get_df_coord(slab_id=slab_id, mode="slab")
df_coord_bulk_i = get_df_coord(bulk_id=bulk_id, mode="bulk")
# #########################################################
# active_sites_i = df_active_sites[df_active_sites.slab_id == slab_id]
# active_sites_i = active_sites_i.iloc[0]
#
# active_sites = active_sites_i.active_sites
# #########################################################
custom_name_pre = bulk_id + "__" + facet + "__" + slab_id
df_rdf_dict = dict()
for i in active_sites:
# print(i)
df_rdf_i = process_rdf(
atoms=slab,
active_site_i=i,
df_coord_slab_i=df_coord_slab_i,
metal_atom_symbol=metal_atom_symbol,
custom_name=custom_name_pre,
)
df_rdf_dict[i] = df_rdf_i
# Saving df_rdf_dict
# Pickling data ###########################################
# directory = "out_data/df_rdf_dict"
directory = os.path.join(
os.environ["PROJ_irox_oer"],
"workflow/enumerate_adsorption",
"out_data/df_rdf_dict",
)
# assert False, "Fix os.makedirs"
if not os.path.exists(directory): os.makedirs(directory)
with open(os.path.join(directory, custom_name_pre + ".pickle"), "wb") as fle:
pickle.dump(df_rdf_dict, fle)
# #########################################################
# #########################################################
diff_rdf_matrix = np.empty((len(active_sites), len(active_sites), ))
diff_rdf_matrix[:] = np.nan
for i_cnt, active_site_i in enumerate(active_sites):
df_rdf_i = df_rdf_dict[active_site_i]
for j_cnt, active_site_j in enumerate(active_sites):
df_rdf_j = df_rdf_dict[active_site_j]
diff_i = compare_rdf_ij(
df_rdf_i=df_rdf_i,
df_rdf_j=df_rdf_j,
)
diff_rdf_matrix[i_cnt, j_cnt] = diff_i
# #########################################################
df_rdf_ij = pd.DataFrame(diff_rdf_matrix, columns=active_sites)
df_rdf_ij.index = active_sites
# Pickling data ###########################################
# directory = "out_data/df_rdf_ij"
directory = os.path.join(
os.environ["PROJ_irox_oer"],
"workflow/enumerate_adsorption",
"out_data/df_rdf_ij",
)
# assert False, "Fix os.makedirs"
if not os.path.exists(directory): os.makedirs(directory)
with open(os.path.join(directory, custom_name_pre + ".pickle"), "wb") as fle:
pickle.dump(df_rdf_ij, fle)
# #########################################################
# #########################################################
active_sites_cpy = copy.deepcopy(active_sites)
diff_threshold = 0.2
duplicate_active_sites = []
for active_site_i in active_sites:
if active_site_i in duplicate_active_sites:
continue
for active_site_j in active_sites:
if active_site_i == active_site_j:
continue
diff_ij = df_rdf_ij.loc[active_site_i, active_site_j]
if diff_ij < diff_threshold:
try:
active_sites_cpy.remove(active_site_j)
duplicate_active_sites.append(active_site_j)
except:
pass
active_sites_unique = active_sites_cpy
# #########################################################
#| - Creating Figure
# print("TEMP isjdfjsd8sfs8d")
# print("create_heatmap_plot:", create_heatmap_plot)
if create_heatmap_plot:
# print("SIDJFIDISJFIDSIFI")
import plotly.express as px
import plotly.graph_objects as go
active_sites_str = [str(i) for i in active_sites]
fig = go.Figure(data=go.Heatmap(
z=df_rdf_ij.to_numpy(),
x=active_sites_str,
y=active_sites_str,
xgap=3,
ygap=3,
# type="category",
))
fig["layout"]["xaxis"]["type"] = "category"
fig["layout"]["yaxis"]["type"] = "category"
# fig.show()
# directory = "out_plot/rdf_heat_maps_1"
directory = os.path.join(
os.environ["PROJ_irox_oer"],
"workflow/enumerate_adsorption",
"out_data/rdf_heat_maps_1",
)
# assert False, "Fix os.makedirs"
if not os.path.exists(directory):
os.makedirs(directory)
# from plotting.my_plotly import my_plotly_plot
# file_name = "rdf_heat_maps_1/" + custom_name_pre + "_rdf_diff_heat_map"
# file_name = os.path.join(
# "/".join(directory.split("/")[1:]),
# custom_name_pre + "_rdf_diff_heat_map",
# )
save_dir = os.path.join(
"/".join(directory.split("/")[1:]),
# custom_name_pre + "_rdf_diff_heat_map",
)
file_name = custom_name_pre + "_rdf_diff_heat_map"
print(file_name)
my_plotly_plot(
figure=fig,
save_dir=save_dir,
place_in_out_plot=False,
# plot_name="rdf_heat_maps/rdf_diff_heat_map",
plot_name=file_name,
write_html=True,
write_png=False,
png_scale=6.0,
write_pdf=False,
write_svg=False,
try_orca_write=False,
)
#__|
return(active_sites_unique)
#__|
| [
"pickle.dump",
"numpy.abs",
"pandas.read_csv",
"methods.get_df_coord",
"pymatgen.io.ase.AseAtomsAdaptor",
"scipy.interpolate.interp1d",
"os.path.join",
"pandas.DataFrame",
"os.path.exists",
"numpy.max",
"plotting.my_plotly.my_plotly_plot",
"pandas.concat",
"plotly.graph_objects.Scatter",
"... | [((3586, 3676), 'os.path.join', 'os.path.join', (["os.environ['PROJ_irox_oer']", '"""workflow/enumerate_adsorption"""', '"""out_data"""'], {}), "(os.environ['PROJ_irox_oer'], 'workflow/enumerate_adsorption',\n 'out_data')\n", (3598, 3676), False, 'import os\n'), ((4042, 4059), 'pymatgen.io.ase.AseAtomsAdaptor', 'AseAtomsAdaptor', ([], {}), '()\n', (4057, 4059), False, 'from pymatgen.io.ase import AseAtomsAdaptor\n'), ((4387, 4446), 'os.path.join', 'os.path.join', (["os.environ['HOME']", '"""__temp__"""', '"""temp.pickle"""'], {}), "(os.environ['HOME'], '__temp__', 'temp.pickle')\n", (4399, 4446), False, 'import os\n'), ((5178, 5310), 'pymatgen_diffusion.aimd.van_hove.RadialDistributionFunction', 'RadialDistributionFunction', (['[slab_structure]', '[active_site_i]', 'neighbor_indices'], {'ngrid': '(4801)', 'rmax': '(8.0)', 'cell_range': '(2)', 'sigma': '(0.015)'}), '([slab_structure], [active_site_i],\n neighbor_indices, ngrid=4801, rmax=8.0, cell_range=2, sigma=0.015)\n', (5204, 5310), False, 'from pymatgen_diffusion.aimd.van_hove import RadialDistributionFunction\n'), ((5724, 5746), 'pandas.read_csv', 'pd.read_csv', (['data_file'], {}), '(data_file)\n', (5735, 5746), True, 'import pandas as pd\n'), ((5957, 5989), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x_array', 'y': 'y_array'}), '(x=x_array, y=y_array)\n', (5967, 5989), True, 'import plotly.graph_objects as go\n'), ((6047, 6067), 'plotly.graph_objects.Figure', 'go.Figure', ([], {'data': 'data'}), '(data=data)\n', (6056, 6067), True, 'import plotly.graph_objects as go\n'), ((6351, 6515), 'plotting.my_plotly.my_plotly_plot', 'my_plotly_plot', ([], {'figure': 'fig', 'plot_name': 'out_plot_file', 'write_html': '(True)', 'write_png': '(False)', 'png_scale': '(6.0)', 'write_pdf': '(False)', 'write_svg': '(False)', 'try_orca_write': '(False)'}), '(figure=fig, plot_name=out_plot_file, write_html=True,\n write_png=False, png_scale=6.0, write_pdf=False, write_svg=False,\n try_orca_write=False)\n', (6365, 6515), False, 'from plotting.my_plotly import my_plotly_plot\n'), ((7339, 7377), 'scipy.integrate.trapz', 'integrate.trapz', (['df_0.g0'], {'x': 'df_0.index'}), '(df_0.g0, x=df_0.index)\n', (7354, 7377), True, 'import scipy.integrate as integrate\n'), ((7431, 7469), 'scipy.integrate.trapz', 'integrate.trapz', (['df_1.g1'], {'x': 'df_1.index'}), '(df_1.g1, x=df_1.index)\n', (7446, 7469), True, 'import scipy.integrate as integrate\n'), ((7524, 7555), 'pandas.concat', 'pd.concat', (['[df_0, df_1]'], {'axis': '(1)'}), '([df_0, df_1], axis=1)\n', (7533, 7555), True, 'import pandas as pd\n'), ((7642, 7667), 'numpy.abs', 'np.abs', (["df_comb['g_diff']"], {}), "(df_comb['g_diff'])\n", (7648, 7667), True, 'import numpy as np\n'), ((7783, 7815), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x_array', 'y': 'y_array'}), '(x=x_array, y=y_array)\n', (7793, 7815), True, 'import plotly.graph_objects as go\n'), ((7873, 7893), 'plotly.graph_objects.Figure', 'go.Figure', ([], {'data': 'data'}), '(data=data)\n', (7882, 7893), True, 'import plotly.graph_objects as go\n'), ((7987, 8043), 'scipy.integrate.trapz', 'integrate.trapz', (['df_comb_i.g_diff_abs'], {'x': 'df_comb_i.index'}), '(df_comb_i.g_diff_abs, x=df_comb_i.index)\n', (8002, 8043), True, 'import scipy.integrate as integrate\n'), ((8491, 8533), 'methods.get_df_coord', 'get_df_coord', ([], {'bulk_id': 'bulk_id', 'mode': '"""bulk"""'}), "(bulk_id=bulk_id, mode='bulk')\n", (8503, 8533), False, 'from methods import get_structure_coord_df, get_df_coord\n'), ((9194, 9217), 'numpy.min', 'np.min', (['positions[:, 2]'], {}), '(positions[:, 2])\n', (9200, 9217), True, 'import numpy as np\n'), ((9229, 9252), 'numpy.max', 'np.max', (['positions[:, 2]'], {}), '(positions[:, 2])\n', (9235, 9252), True, 'import numpy as np\n'), ((10096, 10138), 'methods.get_df_coord', 'get_df_coord', ([], {'slab_id': 'slab_id', 'mode': '"""slab"""'}), "(slab_id=slab_id, mode='slab')\n", (10108, 10138), False, 'from methods import get_structure_coord_df, get_df_coord\n'), ((10161, 10203), 'methods.get_df_coord', 'get_df_coord', ([], {'bulk_id': 'bulk_id', 'mode': '"""bulk"""'}), "(bulk_id=bulk_id, mode='bulk')\n", (10173, 10203), False, 'from methods import get_structure_coord_df, get_df_coord\n'), ((11559, 11610), 'pandas.DataFrame', 'pd.DataFrame', (['diff_rdf_matrix'], {'columns': 'active_sites'}), '(diff_rdf_matrix, columns=active_sites)\n', (11571, 11610), True, 'import pandas as pd\n'), ((11736, 11763), 'copy.deepcopy', 'copy.deepcopy', (['active_sites'], {}), '(active_sites)\n', (11749, 11763), False, 'import copy\n'), ((13032, 13194), 'plotting.my_plotly.my_plotly_plot', 'my_plotly_plot', ([], {'figure': 'fig', 'plot_name': 'file_name', 'write_html': '(True)', 'write_png': '(False)', 'png_scale': '(6.0)', 'write_pdf': '(False)', 'write_svg': '(False)', 'try_orca_write': '(False)'}), '(figure=fig, plot_name=file_name, write_html=True, write_png=\n False, png_scale=6.0, write_pdf=False, write_svg=False, try_orca_write=\n False)\n', (13046, 13194), False, 'from plotting.my_plotly import my_plotly_plot\n'), ((13732, 13778), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'df_i.r', 'y': 'df_i.g', 'mode': '"""markers"""'}), "(x=df_i.r, y=df_i.g, mode='markers')\n", (13742, 13778), True, 'import plotly.graph_objects as go\n'), ((13826, 13846), 'plotly.graph_objects.Figure', 'go.Figure', ([], {'data': 'data'}), '(data=data)\n', (13835, 13846), True, 'import plotly.graph_objects as go\n'), ((13851, 13922), 'plotting.my_plotly.my_plotly_plot', 'my_plotly_plot', ([], {'figure': 'fig', 'plot_name': '"""temp_rds_distr"""', 'write_html': '(True)'}), "(figure=fig, plot_name='temp_rds_distr', write_html=True)\n", (13865, 13922), False, 'from plotting.my_plotly import my_plotly_plot\n'), ((15736, 15761), 'pandas.concat', 'pd.concat', (['df_chunks_list'], {}), '(df_chunks_list)\n', (15745, 15761), True, 'import pandas as pd\n'), ((17336, 17364), 'pandas.DataFrame', 'pd.DataFrame', (['data_dict_list'], {}), '(data_dict_list)\n', (17348, 17364), True, 'import pandas as pd\n'), ((17871, 17913), 'methods.get_df_coord', 'get_df_coord', ([], {'bulk_id': 'bulk_id', 'mode': '"""bulk"""'}), "(bulk_id=bulk_id, mode='bulk')\n", (17883, 17913), False, 'from methods import get_structure_coord_df, get_df_coord\n'), ((18773, 18875), 'os.path.join', 'os.path.join', (["os.environ['PROJ_irox_oer']", '"""workflow/enumerate_adsorption"""', '"""out_data/df_rdf_dict"""'], {}), "(os.environ['PROJ_irox_oer'], 'workflow/enumerate_adsorption',\n 'out_data/df_rdf_dict')\n", (18785, 18875), False, 'import os\n'), ((19836, 19887), 'pandas.DataFrame', 'pd.DataFrame', (['diff_rdf_matrix'], {'columns': 'active_sites'}), '(diff_rdf_matrix, columns=active_sites)\n', (19848, 19887), True, 'import pandas as pd\n'), ((20044, 20144), 'os.path.join', 'os.path.join', (["os.environ['PROJ_irox_oer']", '"""workflow/enumerate_adsorption"""', '"""out_data/df_rdf_ij"""'], {}), "(os.environ['PROJ_irox_oer'], 'workflow/enumerate_adsorption',\n 'out_data/df_rdf_ij')\n", (20056, 20144), False, 'import os\n'), ((20546, 20573), 'copy.deepcopy', 'copy.deepcopy', (['active_sites'], {}), '(active_sites)\n', (20559, 20573), False, 'import copy\n'), ((3710, 3735), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (3724, 3735), False, 'import os\n'), ((3745, 3767), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (3756, 3767), False, 'import os\n'), ((3858, 3883), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (3872, 3883), False, 'import os\n'), ((3893, 3915), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (3904, 3915), False, 'import os\n'), ((3964, 3989), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (3978, 3989), False, 'import os\n'), ((3999, 4021), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (4010, 4021), False, 'import os\n'), ((4516, 4542), 'pickle.dump', 'pickle.dump', (['out_dict', 'fle'], {}), '(out_dict, fle)\n', (4527, 4542), False, 'import pickle\n'), ((8425, 8467), 'methods.get_df_coord', 'get_df_coord', ([], {'slab_id': 'slab_id', 'mode': '"""slab"""'}), "(slab_id=slab_id, mode='slab')\n", (8437, 8467), False, 'from methods import get_structure_coord_df, get_df_coord\n'), ((12895, 12920), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (12909, 12920), False, 'import os\n'), ((12930, 12952), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (12941, 12952), False, 'import os\n'), ((17805, 17847), 'methods.get_df_coord', 'get_df_coord', ([], {'slab_id': 'slab_id', 'mode': '"""slab"""'}), "(slab_id=slab_id, mode='slab')\n", (17817, 17847), False, 'from methods import get_structure_coord_df, get_df_coord\n'), ((18957, 18982), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (18971, 18982), False, 'import os\n'), ((18984, 19006), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (18995, 19006), False, 'import os\n'), ((19097, 19126), 'pickle.dump', 'pickle.dump', (['df_rdf_dict', 'fle'], {}), '(df_rdf_dict, fle)\n', (19108, 19126), False, 'import pickle\n'), ((20226, 20251), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (20240, 20251), False, 'import os\n'), ((20253, 20275), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (20264, 20275), False, 'import os\n'), ((20366, 20393), 'pickle.dump', 'pickle.dump', (['df_rdf_ij', 'fle'], {}), '(df_rdf_ij, fle)\n', (20377, 20393), False, 'import pickle\n'), ((22017, 22123), 'os.path.join', 'os.path.join', (["os.environ['PROJ_irox_oer']", '"""workflow/enumerate_adsorption"""', '"""out_data/rdf_heat_maps_1"""'], {}), "(os.environ['PROJ_irox_oer'], 'workflow/enumerate_adsorption',\n 'out_data/rdf_heat_maps_1')\n", (22029, 22123), False, 'import os\n'), ((22833, 23037), 'plotting.my_plotly.my_plotly_plot', 'my_plotly_plot', ([], {'figure': 'fig', 'save_dir': 'save_dir', 'place_in_out_plot': '(False)', 'plot_name': 'file_name', 'write_html': '(True)', 'write_png': '(False)', 'png_scale': '(6.0)', 'write_pdf': '(False)', 'write_svg': '(False)', 'try_orca_write': '(False)'}), '(figure=fig, save_dir=save_dir, place_in_out_plot=False,\n plot_name=file_name, write_html=True, write_png=False, png_scale=6.0,\n write_pdf=False, write_svg=False, try_orca_write=False)\n', (22847, 23037), False, 'from plotting.my_plotly import my_plotly_plot\n'), ((19021, 19073), 'os.path.join', 'os.path.join', (['directory', "(custom_name_pre + '.pickle')"], {}), "(directory, custom_name_pre + '.pickle')\n", (19033, 19073), False, 'import os\n'), ((20290, 20342), 'os.path.join', 'os.path.join', (['directory', "(custom_name_pre + '.pickle')"], {}), "(directory, custom_name_pre + '.pickle')\n", (20302, 20342), False, 'import os\n'), ((22229, 22254), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (22243, 22254), False, 'import os\n'), ((22268, 22290), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (22279, 22290), False, 'import os\n'), ((16891, 16995), 'scipy.interpolate.interp1d', 'interp1d', (['df_i.r', 'df_i.g'], {'kind': '"""linear"""', 'axis': '(-1)', 'copy': '(True)', 'bounds_error': 'None', 'assume_sorted': '(False)'}), "(df_i.r, df_i.g, kind='linear', axis=-1, copy=True, bounds_error=\n None, assume_sorted=False)\n", (16899, 16995), False, 'from scipy.interpolate import interp1d\n')] |
# Using Word2Vec for prediction: In this example, we will load the CBOW trained embeddings to perform movie review predictions using LR model. From this dataset we will compute/fit the CBOW model using the Word2Vec algorithm
# -----------------------------------------
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import random
import os
import pickle
import string
import requests
import collections
import io
import tarfile
#import urllib.request
import preprocessor
from nltk.corpus import stopwords
from tensorflow.python.framework import ops
import warnings
import random
import os
warnings.filterwarnings("ignore")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
ops.reset_default_graph()
os.chdir(os.path.dirname(os.path.realpath(__file__)))
# Start a graph session
sess = tf.Session()
# Declare model parameters
batch_size = 500
embedding_size = 200
vocabulary_size = 15000
generations = 100000
model_learning_rate = 0.001
max_words = 100
# Declare stop words
stops = stopwords.words('english')
# Load Data
print('Loading Data... ')
data_folder_name = 'temp'
texts, target = preprocessor.load_movie_data()
# Normalize text
print('Normalizing Text Data... ')
texts = preprocessor.normalize_text(texts, stops)
# Texts must contain at least 4 words
target = [target[ix] for ix, x in enumerate(texts) if len(x.split()) > 3]
texts = [x for x in texts if len(x.split()) > 3]
# Split up data set into train/test
train_indices = np.random.choice(len(target), round(0.75*len(target)), replace=False)
test_indices = np.array(list(set(range(len(target))) - set(train_indices)))
texts_train = [x for ix, x in enumerate(texts) if ix in train_indices]
texts_test = [x for ix, x in enumerate(texts) if ix in test_indices]
target_train = np.array([x for ix, x in enumerate(target) if ix in train_indices])
target_test = np.array([x for ix, x in enumerate(target) if ix in test_indices])
# Load dictionary and embedding matrix
dict_file = 'temp/movie_vocab.pkl'
word_dictionary = pickle.load(open(dict_file, 'rb'))
# Convert texts to lists of indices
text_data_train = np.array(preprocessor.text_to_numbers(texts_train, word_dictionary))
text_data_test = np.array(preprocessor.text_to_numbers(texts_test, word_dictionary))
# Pad/crop movie reviews to specific length
text_data_train = np.array([x[0:max_words] for x in [y+[0]*max_words for y in text_data_train]])
text_data_test = np.array([x[0:max_words] for x in [y+[0]*max_words for y in text_data_test]])
print('Creating Model... ')
# Define Embeddings:
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
# Define model:
# Create variables for logistic regression
A = tf.Variable(tf.random_normal(shape=[embedding_size,1]))
b = tf.Variable(tf.random_normal(shape=[1,1]))
# Initialize placeholders
x_data = tf.placeholder(shape=[None, max_words], dtype=tf.int32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
# Lookup embeddings vectors
embed = tf.nn.embedding_lookup(embeddings, x_data)
# Take average of all word embeddings in documents
embed_avg = tf.reduce_mean(embed, 1)
# Declare logistic model (sigmoid in loss function)
model_output = tf.add(tf.matmul(embed_avg, A), b)
# Declare loss function (Cross Entropy loss)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=model_output, labels=y_target))
# Actual Prediction
prediction = tf.round(tf.sigmoid(model_output))
predictions_correct = tf.cast(tf.equal(prediction, y_target), tf.float32)
accuracy = tf.reduce_mean(predictions_correct)
# Declare optimizer
training_op = tf.train.GradientDescentOptimizer(0.001)
train_step = training_op.minimize(loss)
# Intitialize Variables
init_op = tf.global_variables_initializer()
sess.run(init_op)
# Load model embeddings
model_checkpoint_path = 'temp/cbow_movie_embeddings.ckpt'
saver = tf.train.Saver({"embeddings": embeddings})
saver.restore(sess, model_checkpoint_path)
# Start Logistic Regression
print('Starting Model Training... ')
train_loss = []
test_loss = []
train_acc = []
test_acc = []
i_data = []
for i in range(3000):
rand_index = np.random.choice(text_data_train.shape[0], size=batch_size)
rand_x = text_data_train[rand_index]
rand_y = np.transpose([target_train[rand_index]])
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
# Only record loss and accuracy every 100 generations
if (i+1)%100==0:
i_data.append(i+1)
train_loss_temp = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y})
train_loss.append(train_loss_temp)
test_loss_temp = sess.run(loss, feed_dict={x_data: text_data_test, y_target: np.transpose([target_test])})
test_loss.append(test_loss_temp)
train_acc_temp = sess.run(accuracy, feed_dict={x_data: rand_x, y_target: rand_y})
train_acc.append(train_acc_temp)
test_acc_temp = sess.run(accuracy, feed_dict={x_data: text_data_test, y_target: np.transpose([target_test])})
test_acc.append(test_acc_temp)
if (i+1)%500==0:
acc_and_loss = [i+1, train_loss_temp, test_loss_temp, train_acc_temp, test_acc_temp]
acc_and_loss = [np.round(x,2) for x in acc_and_loss]
print('Iteration # {}. Train Loss (Test Loss): {:.2f} ({:.2f}). Train Acc (Test Acc): {:.2f} ({:.2f})'.format(*acc_and_loss))
print('\nOverall accuracy on test set (%): {}'.format(np.mean(test_acc)*100.0))
# Plot loss over time
plt.plot(i_data, train_loss, 'k-', label='Training loss')
plt.plot(i_data, test_loss, 'r--', label='Test loss', linewidth=4)
plt.title('Cross entropy loss per iteration')
plt.xlabel('Iteration')
plt.ylabel('Cross entropy loss')
plt.legend(loc='upper right')
plt.show()
# Plot train and test accuracy
plt.plot(i_data, train_acc, 'k-', label='Accuracy on the training set')
plt.plot(i_data, test_acc, 'r--', label='Accuracy on the test set', linewidth=4)
plt.title('Accuracy on the train and test set')
plt.xlabel('Generation')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.show()
| [
"matplotlib.pyplot.title",
"preprocessor.normalize_text",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.matmul",
"numpy.mean",
"tensorflow.python.framework.ops.reset_default_graph",
"preprocessor.load_movie_data",
"numpy.round",
"numpy.transpose",
"tensorflow.placeholder",
"nump... | [((618, 651), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (641, 651), False, 'import warnings\n'), ((693, 718), 'tensorflow.python.framework.ops.reset_default_graph', 'ops.reset_default_graph', ([], {}), '()\n', (716, 718), False, 'from tensorflow.python.framework import ops\n'), ((806, 818), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (816, 818), True, 'import tensorflow as tf\n'), ((1004, 1030), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (1019, 1030), False, 'from nltk.corpus import stopwords\n'), ((1112, 1142), 'preprocessor.load_movie_data', 'preprocessor.load_movie_data', ([], {}), '()\n', (1140, 1142), False, 'import preprocessor\n'), ((1204, 1245), 'preprocessor.normalize_text', 'preprocessor.normalize_text', (['texts', 'stops'], {}), '(texts, stops)\n', (1231, 1245), False, 'import preprocessor\n'), ((2311, 2399), 'numpy.array', 'np.array', (['[x[0:max_words] for x in [(y + [0] * max_words) for y in text_data_train]]'], {}), '([x[0:max_words] for x in [(y + [0] * max_words) for y in\n text_data_train]])\n', (2319, 2399), True, 'import numpy as np\n'), ((2407, 2494), 'numpy.array', 'np.array', (['[x[0:max_words] for x in [(y + [0] * max_words) for y in text_data_test]]'], {}), '([x[0:max_words] for x in [(y + [0] * max_words) for y in\n text_data_test]])\n', (2415, 2494), True, 'import numpy as np\n'), ((2828, 2883), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, max_words]', 'dtype': 'tf.int32'}), '(shape=[None, max_words], dtype=tf.int32)\n', (2842, 2883), True, 'import tensorflow as tf\n'), ((2895, 2944), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, 1]', 'dtype': 'tf.float32'}), '(shape=[None, 1], dtype=tf.float32)\n', (2909, 2944), True, 'import tensorflow as tf\n'), ((2982, 3024), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embeddings', 'x_data'], {}), '(embeddings, x_data)\n', (3004, 3024), True, 'import tensorflow as tf\n'), ((3088, 3112), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['embed', '(1)'], {}), '(embed, 1)\n', (3102, 3112), True, 'import tensorflow as tf\n'), ((3517, 3552), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['predictions_correct'], {}), '(predictions_correct)\n', (3531, 3552), True, 'import tensorflow as tf\n'), ((3588, 3628), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['(0.001)'], {}), '(0.001)\n', (3621, 3628), True, 'import tensorflow as tf\n'), ((3704, 3737), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3735, 3737), True, 'import tensorflow as tf\n'), ((3847, 3889), 'tensorflow.train.Saver', 'tf.train.Saver', (["{'embeddings': embeddings}"], {}), "({'embeddings': embeddings})\n", (3861, 3889), True, 'import tensorflow as tf\n'), ((5459, 5516), 'matplotlib.pyplot.plot', 'plt.plot', (['i_data', 'train_loss', '"""k-"""'], {'label': '"""Training loss"""'}), "(i_data, train_loss, 'k-', label='Training loss')\n", (5467, 5516), True, 'import matplotlib.pyplot as plt\n'), ((5517, 5583), 'matplotlib.pyplot.plot', 'plt.plot', (['i_data', 'test_loss', '"""r--"""'], {'label': '"""Test loss"""', 'linewidth': '(4)'}), "(i_data, test_loss, 'r--', label='Test loss', linewidth=4)\n", (5525, 5583), True, 'import matplotlib.pyplot as plt\n'), ((5584, 5629), 'matplotlib.pyplot.title', 'plt.title', (['"""Cross entropy loss per iteration"""'], {}), "('Cross entropy loss per iteration')\n", (5593, 5629), True, 'import matplotlib.pyplot as plt\n'), ((5630, 5653), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (5640, 5653), True, 'import matplotlib.pyplot as plt\n'), ((5654, 5686), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cross entropy loss"""'], {}), "('Cross entropy loss')\n", (5664, 5686), True, 'import matplotlib.pyplot as plt\n'), ((5687, 5716), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (5697, 5716), True, 'import matplotlib.pyplot as plt\n'), ((5717, 5727), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5725, 5727), True, 'import matplotlib.pyplot as plt\n'), ((5760, 5831), 'matplotlib.pyplot.plot', 'plt.plot', (['i_data', 'train_acc', '"""k-"""'], {'label': '"""Accuracy on the training set"""'}), "(i_data, train_acc, 'k-', label='Accuracy on the training set')\n", (5768, 5831), True, 'import matplotlib.pyplot as plt\n'), ((5832, 5917), 'matplotlib.pyplot.plot', 'plt.plot', (['i_data', 'test_acc', '"""r--"""'], {'label': '"""Accuracy on the test set"""', 'linewidth': '(4)'}), "(i_data, test_acc, 'r--', label='Accuracy on the test set', linewidth=4\n )\n", (5840, 5917), True, 'import matplotlib.pyplot as plt\n'), ((5913, 5960), 'matplotlib.pyplot.title', 'plt.title', (['"""Accuracy on the train and test set"""'], {}), "('Accuracy on the train and test set')\n", (5922, 5960), True, 'import matplotlib.pyplot as plt\n'), ((5961, 5985), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Generation"""'], {}), "('Generation')\n", (5971, 5985), True, 'import matplotlib.pyplot as plt\n'), ((5986, 6008), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (5996, 6008), True, 'import matplotlib.pyplot as plt\n'), ((6009, 6038), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (6019, 6038), True, 'import matplotlib.pyplot as plt\n'), ((6039, 6049), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6047, 6049), True, 'import matplotlib.pyplot as plt\n'), ((2103, 2161), 'preprocessor.text_to_numbers', 'preprocessor.text_to_numbers', (['texts_train', 'word_dictionary'], {}), '(texts_train, word_dictionary)\n', (2131, 2161), False, 'import preprocessor\n'), ((2189, 2246), 'preprocessor.text_to_numbers', 'preprocessor.text_to_numbers', (['texts_test', 'word_dictionary'], {}), '(texts_test, word_dictionary)\n', (2217, 2246), False, 'import preprocessor\n'), ((2560, 2623), 'tensorflow.random_uniform', 'tf.random_uniform', (['[vocabulary_size, embedding_size]', '(-1.0)', '(1.0)'], {}), '([vocabulary_size, embedding_size], -1.0, 1.0)\n', (2577, 2623), True, 'import tensorflow as tf\n'), ((2701, 2744), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '[embedding_size, 1]'}), '(shape=[embedding_size, 1])\n', (2717, 2744), True, 'import tensorflow as tf\n'), ((2761, 2791), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '[1, 1]'}), '(shape=[1, 1])\n', (2777, 2791), True, 'import tensorflow as tf\n'), ((3188, 3211), 'tensorflow.matmul', 'tf.matmul', (['embed_avg', 'A'], {}), '(embed_avg, A)\n', (3197, 3211), True, 'import tensorflow as tf\n'), ((3284, 3361), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'logits': 'model_output', 'labels': 'y_target'}), '(logits=model_output, labels=y_target)\n', (3323, 3361), True, 'import tensorflow as tf\n'), ((3406, 3430), 'tensorflow.sigmoid', 'tf.sigmoid', (['model_output'], {}), '(model_output)\n', (3416, 3430), True, 'import tensorflow as tf\n'), ((3462, 3492), 'tensorflow.equal', 'tf.equal', (['prediction', 'y_target'], {}), '(prediction, y_target)\n', (3470, 3492), True, 'import tensorflow as tf\n'), ((4112, 4171), 'numpy.random.choice', 'np.random.choice', (['text_data_train.shape[0]'], {'size': 'batch_size'}), '(text_data_train.shape[0], size=batch_size)\n', (4128, 4171), True, 'import numpy as np\n'), ((4226, 4266), 'numpy.transpose', 'np.transpose', (['[target_train[rand_index]]'], {}), '([target_train[rand_index]])\n', (4238, 4266), True, 'import numpy as np\n'), ((745, 771), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (761, 771), False, 'import os\n'), ((5184, 5198), 'numpy.round', 'np.round', (['x', '(2)'], {}), '(x, 2)\n', (5192, 5198), True, 'import numpy as np\n'), ((5410, 5427), 'numpy.mean', 'np.mean', (['test_acc'], {}), '(test_acc)\n', (5417, 5427), True, 'import numpy as np\n'), ((4673, 4700), 'numpy.transpose', 'np.transpose', (['[target_test]'], {}), '([target_test])\n', (4685, 4700), True, 'import numpy as np\n'), ((4977, 5004), 'numpy.transpose', 'np.transpose', (['[target_test]'], {}), '([target_test])\n', (4989, 5004), True, 'import numpy as np\n')] |
# Copyright 2019 DIVERSIS Software. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
import pickle
trainDataSize = 50000
testDataSize = 10000
classNum = 10
inputSize = 3072
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo)
return dict
def get_data(dataDirectory):
dataMatrix = np.zeros((inputSize,trainDataSize))
labelMatrix = np.zeros((classNum,trainDataSize))
dataNames = np.zeros((trainDataSize)).astype("string")
for batchIndx in range(5):
file = dataDirectory + 'data_batch_%d'%(batchIndx+1)
absFile = os.path.abspath(file)
dict = unpickle(absFile)
data = (np.asarray(dict[b'data'].T).astype("uint8") / 255.0).astype("float32")
label = np.asarray(dict[b'labels'])
for i in range(testDataSize):
labelMatrix[label[i],batchIndx*testDataSize + i] = 1
dataMatrix[:,batchIndx*testDataSize:(batchIndx+1)*testDataSize] = data
names = np.asarray(dict[b'filenames'])
dataNames[batchIndx*testDataSize:(batchIndx+1)*testDataSize] = names
# Reshape for RGB
dataMatrix = dataMatrix.reshape(3,32,32,trainDataSize).transpose([1, 2, 0, 3])
return dataMatrix,labelMatrix,dataNames
def get_data_test(dataDirectory):
labelMatrix = np.zeros((classNum,testDataSize))
file = dataDirectory + 'test_batch'
absFile = os.path.abspath(file)
dict = unpickle(absFile)
dataMatrix = (np.asarray(dict[b'data'].T).astype("uint8") / 255.0).astype("float32")
label = np.asarray(dict[b'labels'])
for i in range(testDataSize):
labelMatrix[label[i],i] = 1
dataNames = np.asarray(dict[b'filenames'])
# Reshape for RGB
dataMatrix = dataMatrix.reshape(3,32,32,testDataSize).transpose([1, 2, 0, 3])
return dataMatrix,labelMatrix,dataNames
| [
"numpy.asarray",
"os.path.abspath",
"pickle.load",
"numpy.zeros"
] | [((851, 887), 'numpy.zeros', 'np.zeros', (['(inputSize, trainDataSize)'], {}), '((inputSize, trainDataSize))\n', (859, 887), True, 'import numpy as np\n'), ((902, 937), 'numpy.zeros', 'np.zeros', (['(classNum, trainDataSize)'], {}), '((classNum, trainDataSize))\n', (910, 937), True, 'import numpy as np\n'), ((1719, 1753), 'numpy.zeros', 'np.zeros', (['(classNum, testDataSize)'], {}), '((classNum, testDataSize))\n', (1727, 1753), True, 'import numpy as np\n'), ((1801, 1822), 'os.path.abspath', 'os.path.abspath', (['file'], {}), '(file)\n', (1816, 1822), False, 'import os\n'), ((1944, 1971), 'numpy.asarray', 'np.asarray', (["dict[b'labels']"], {}), "(dict[b'labels'])\n", (1954, 1971), True, 'import numpy as np\n'), ((2048, 2078), 'numpy.asarray', 'np.asarray', (["dict[b'filenames']"], {}), "(dict[b'filenames'])\n", (2058, 2078), True, 'import numpy as np\n'), ((778, 793), 'pickle.load', 'pickle.load', (['fo'], {}), '(fo)\n', (789, 793), False, 'import pickle\n'), ((1088, 1109), 'os.path.abspath', 'os.path.abspath', (['file'], {}), '(file)\n', (1103, 1109), False, 'import os\n'), ((1228, 1255), 'numpy.asarray', 'np.asarray', (["dict[b'labels']"], {}), "(dict[b'labels'])\n", (1238, 1255), True, 'import numpy as np\n'), ((1427, 1457), 'numpy.asarray', 'np.asarray', (["dict[b'filenames']"], {}), "(dict[b'filenames'])\n", (1437, 1457), True, 'import numpy as np\n'), ((950, 973), 'numpy.zeros', 'np.zeros', (['trainDataSize'], {}), '(trainDataSize)\n', (958, 973), True, 'import numpy as np\n'), ((1864, 1891), 'numpy.asarray', 'np.asarray', (["dict[b'data'].T"], {}), "(dict[b'data'].T)\n", (1874, 1891), True, 'import numpy as np\n'), ((1147, 1174), 'numpy.asarray', 'np.asarray', (["dict[b'data'].T"], {}), "(dict[b'data'].T)\n", (1157, 1174), True, 'import numpy as np\n')] |
# pylint: disable=missing-docstring, invalid-name
import unittest
import scipy.ndimage
import numpy as np
class TestMapCoordinates(unittest.TestCase):
def setUp(self):
self.values = np.array([
[0, 1, 2],
[3, 4, 5]
], dtype=np.float)
def test_scipy_ndimage_map_coordinates(self):
i = [1]
j = [2]
result = scipy.ndimage.map_coordinates(self.values, (i, j))
expect = 5
np.testing.assert_array_almost_equal(expect, result)
def test_scipy_ndimage_map_coordinates_given_all_indices(self):
i = [[0, 0, 0],
[1, 1, 1]]
j = [[0, 1, 2],
[0, 1, 2]]
result = scipy.ndimage.map_coordinates(self.values, (i, j))
expect = self.values
np.testing.assert_array_almost_equal(expect, result)
def test_scipy_ndimage_map_coordinates_no_spline_filter(self):
"""
A spline filter is only used when order is greater than 1
Calling map_coordinates with order=1 is equivalent to
a bilinear interpolator
"""
i = np.array([[0, 0, 0],
[1, 1, 1]], dtype=np.float)
j = np.array([[0, 0.5, 2],
[0, 1, 2]], dtype=np.float)
result = scipy.ndimage.map_coordinates(self.values, (i, j), order=1)
expect = np.array([
[0, 0.5, 2],
[3, 4, 5]
], dtype=np.float)
np.testing.assert_array_almost_equal(expect, result)
def test_scipy_ndimage_map_coordinates_bilinear_example(self):
"""order=1 equivalent to bilinear interpolation
0---1---2
| x | |
3---4---5
x = 2 under bilinear interpolation
"""
i = np.array([[0.5]], dtype=np.float)
j = np.array([[0.5]], dtype=np.float)
result = scipy.ndimage.map_coordinates(self.values, (i, j), order=1)
expect = np.array([[2.]], dtype=np.float)
np.testing.assert_array_almost_equal(expect, result)
class TestMeshgrid(unittest.TestCase):
def test_meshgrid_indexing_ij(self):
i = [0, 1]
j = [0, 1, 2]
result_i, result_j = np.meshgrid(i, j, indexing="ij")
expect_i = [
[0, 0, 0],
[1, 1, 1],
]
expect_j = [
[0, 1, 2],
[0, 1, 2],
]
np.testing.assert_array_almost_equal(expect_i, result_i)
np.testing.assert_array_almost_equal(expect_j, result_j)
| [
"numpy.testing.assert_array_almost_equal",
"numpy.meshgrid",
"numpy.array"
] | [((196, 244), 'numpy.array', 'np.array', (['[[0, 1, 2], [3, 4, 5]]'], {'dtype': 'np.float'}), '([[0, 1, 2], [3, 4, 5]], dtype=np.float)\n', (204, 244), True, 'import numpy as np\n'), ((457, 509), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['expect', 'result'], {}), '(expect, result)\n', (493, 509), True, 'import numpy as np\n'), ((780, 832), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['expect', 'result'], {}), '(expect, result)\n', (816, 832), True, 'import numpy as np\n'), ((1098, 1146), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 1, 1]]'], {'dtype': 'np.float'}), '([[0, 0, 0], [1, 1, 1]], dtype=np.float)\n', (1106, 1146), True, 'import numpy as np\n'), ((1181, 1231), 'numpy.array', 'np.array', (['[[0, 0.5, 2], [0, 1, 2]]'], {'dtype': 'np.float'}), '([[0, 0.5, 2], [0, 1, 2]], dtype=np.float)\n', (1189, 1231), True, 'import numpy as np\n'), ((1348, 1398), 'numpy.array', 'np.array', (['[[0, 0.5, 2], [3, 4, 5]]'], {'dtype': 'np.float'}), '([[0, 0.5, 2], [3, 4, 5]], dtype=np.float)\n', (1356, 1398), True, 'import numpy as np\n'), ((1441, 1493), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['expect', 'result'], {}), '(expect, result)\n', (1477, 1493), True, 'import numpy as np\n'), ((1741, 1774), 'numpy.array', 'np.array', (['[[0.5]]'], {'dtype': 'np.float'}), '([[0.5]], dtype=np.float)\n', (1749, 1774), True, 'import numpy as np\n'), ((1787, 1820), 'numpy.array', 'np.array', (['[[0.5]]'], {'dtype': 'np.float'}), '([[0.5]], dtype=np.float)\n', (1795, 1820), True, 'import numpy as np\n'), ((1915, 1948), 'numpy.array', 'np.array', (['[[2.0]]'], {'dtype': 'np.float'}), '([[2.0]], dtype=np.float)\n', (1923, 1948), True, 'import numpy as np\n'), ((1956, 2008), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['expect', 'result'], {}), '(expect, result)\n', (1992, 2008), True, 'import numpy as np\n'), ((2161, 2193), 'numpy.meshgrid', 'np.meshgrid', (['i', 'j'], {'indexing': '"""ij"""'}), "(i, j, indexing='ij')\n", (2172, 2193), True, 'import numpy as np\n'), ((2356, 2412), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['expect_i', 'result_i'], {}), '(expect_i, result_i)\n', (2392, 2412), True, 'import numpy as np\n'), ((2421, 2477), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['expect_j', 'result_j'], {}), '(expect_j, result_j)\n', (2457, 2477), True, 'import numpy as np\n')] |
import matplotlib
import os
from termcolor import cprint
import matplotlib.pyplot as plt
import numpy as np
from itertools import chain
from utils import *
from utils_torch_filter import TORCHIEKF
import math
import copy
def normalize_rot(Rot):
# The SVD is commonly written as a = U S V.H.
# The v returned by this function is V.H and u = U.
U, _, V = np.linalg.svd(Rot, full_matrices=False)
S = np.identity(3)
S[2, 2] = np.linalg.det(U) * np.linalg.det(V)
return U.dot(S).dot(V)
def run_imu(t_imu,u_imu):
dt_imu = t_imu[1:] - t_imu[:-1]
N = len(u_imu)
Rot_imu = np.zeros((N, 3, 3))
v_imu = np.zeros((N, 3))
p_imu = np.zeros((N, 3))
b_omega_imu = np.zeros((N, 3))
b_acc_imu = np.zeros((N, 3))
Rot_imu[0] = np.identity(3)
for i in range(1,N):
Rot_imu[i], v_imu[i], p_imu[i], b_omega_imu[i], b_acc_imu[i] = propagate_imu(Rot_imu[i-1], v_imu[i-1], p_imu[i-1], b_omega_imu[i-1], b_acc_imu[i-1], u_imu[i], dt_imu[i-1], u_imu[0])
n_normalize_rot = 200
# correct numerical error every second
if i % n_normalize_rot == 0:
Rot_imu[i] = normalize_rot(Rot_imu[i])
roll_imu = np.zeros(len(Rot_imu))
pitch_imu = np.zeros(len(Rot_imu))
yaw_imu = np.zeros(len(Rot_imu))
for i in range(len(Rot_imu)):
roll_imu[i], pitch_imu[i], yaw_imu[i] = to_rpy(Rot_imu[i])
ang_imu = np.zeros((len(Rot_imu),3))
ang_imu[:,0] = roll_imu
ang_imu[:,1] = pitch_imu
ang_imu[:,2] = yaw_imu
return Rot_imu, ang_imu, v_imu, p_imu, b_omega_imu, b_acc_imu
def to_rpy(Rot):
pitch = np.arctan2(-Rot[2, 0], np.sqrt(Rot[0, 0]**2 + Rot[1, 0]**2))
if np.isclose(pitch, np.pi / 2.):
yaw = 0.
roll = np.arctan2(Rot[0, 1], Rot[1, 1])
elif np.isclose(pitch, -np.pi / 2.):
yaw = 0.
roll = -np.arctan2(Rot[0, 1], Rot[1, 1])
else:
sec_pitch = 1. / np.cos(pitch)
yaw = np.arctan2(Rot[1, 0] * sec_pitch,
Rot[0, 0] * sec_pitch)
roll = np.arctan2(Rot[2, 1] * sec_pitch,
Rot[2, 2] * sec_pitch)
return roll, pitch, yaw
def propagate_imu(Rot_prev, v_prev, p_prev, b_omega_prev, b_acc_prev, u, dt, u_0):
g = np.array([0, 0, -9.80665])
# acc = Rot_prev.dot(u[3:6]- u_0[3:6] - b_acc_prev)
acc = Rot_prev.dot(u[3:6] + g - b_acc_prev)
v = v_prev + acc * dt
p = p_prev + v_prev*dt + 1/2 * acc * math.pow(dt,2)
omega = u[:3] - b_omega_prev
Rot = Rot_prev.dot(so3exp(omega * dt))
b_omega = b_omega_prev
b_acc = b_acc_prev
return Rot, v, p, b_omega, b_acc
def so3exp(phi):
angle = np.linalg.norm(phi)
# Near phi==0, use first order Taylor expansion
if np.abs(angle) < 1e-8:
skew_phi = np.array([[0, -phi[2], phi[1]],
[phi[2], 0, -phi[0]],
[-phi[1], phi[0], 0]])
return np.identity(3) + skew_phi
axis = phi / angle
skew_axis = np.array([[0, -axis[2], axis[1]],
[axis[2], 0, -axis[0]],
[-axis[1], axis[0], 0]])
s = np.sin(angle)
c = np.cos(angle)
return c * np.identity(3) + (1 - c) * np.outer(axis, axis) + s * skew_axis
def results_filter(args, dataset):
for i in range(0, len(dataset.datasets)):
plt.close('all')
dataset_name = dataset.dataset_name(i)
# file_name = os.path.join(dataset.path_results, dataset_name + "_filter.p")
# if not os.path.exists(file_name):
# print('No result for ' + dataset_name)
# continue
# print("\nResults for: " + dataset_name)
# Rot, v, p, b_omega, b_acc, Rot_c_i, t_c_i, measurements_covs = dataset.get_estimates(
# dataset_name)
# get data
t, ang_gt, p_gt, v_gt, u = dataset.get_data(dataset_name)
# # get data for nets
# u_normalized = dataset.normalize(u).numpy()
# # shift for better viewing
# u_normalized[:, [0, 3]] += 5
# u_normalized[:, [2, 5]] -= 5
t = (t - t[0]).numpy()
u = u.cpu().numpy()
ang_gt = ang_gt.cpu().numpy()
v_gt = v_gt.cpu().numpy()
p_gt = (p_gt - p_gt[0]).cpu().numpy()
print("Total sequence time: {:.2f} s".format(t[-1]))
Rot_imu, ang_imu, v_imu, p_imu, b_omega_imu, b_acc_imu = run_imu(t,u)
# # position, velocity and velocity in body frame
# fig1, ax1 = plt.subplots(sharex=True, figsize=(20, 10))
# ax1.plot(t, v_gt[:,:])
# ax1.plot(t, v_imu[:,:])
# ax1.set(xlabel='time (s)', ylabel='$\mathbf{v}_n$ (m/s)', title="Velocity")
# ax1.grid()
# ax1.legend(
# ['$gt^x$', '$gt^y$', '$gt^z$', '$imu^x$', '$imu^y$', '$imu^z$'])
# orientation, bias gyro and bias accelerometer
fig2, ax2 = plt.subplots(sharex=True, figsize=(20, 10))
ang_gt_init = copy.deepcopy(ang_gt[0,:])
for j in range(len(ang_gt)):
ang_gt[j,:] = ang_gt[j,:] - ang_gt_init
pi_up = np.zeros(len(t))
pi_down = np.zeros(len(t))
for j in range(len(ang_gt)):
pi_up[j] = 3.141592
pi_down[j] = -3.141592
# ax2.plot(t_gt, ang_gt[:,2])
# ax2.plot(t_imu, ang_imu[:,2])
ax2.plot(t, ang_gt[:,2])
ax2.plot(t, ang_imu[:,2])
# ax2.plot(t_wheel, ang_wheel[:,2])
ax2.plot(t, pi_up, 'k',linestyle='--')
ax2.plot(t, pi_down, 'k',linestyle='--')
ax2.set(xlabel='time (s)', ylabel=r'$\phi_n, \theta_n, \psi_n$ (rad)',
title="Orientation")
ax2.grid()
# ax2.legend([r'gt', r'wheel'])
ax2.legend([r'gt', r'imu'])
# ax2.legend([r'gt', r'imu', r'wheel'])
# # position in plan
# fig3, ax3 = plt.subplots(figsize=(20, 10))
# ax3.plot(p_gt[:, 0], p_gt[:, 1])
# ax3.plot(p_imu[:, 0], p_imu[:, 1])
# ax3.axis('equal')
# ax3.set(xlabel=r'$p_n^x$ (m)', ylabel=r'$p_n^y$ (m)', title="Position on $xy$")
# ax3.grid()
# ax3.legend(['gt', 'imu'])
# ang = np.zeros((Rot.shape[0], 3))
# Rot_gt = torch.zeros((Rot.shape[0], 3, 3))
# for j in range(Rot.shape[0]):
# roll, pitch, yaw = TORCHIEKF.to_rpy(torch.from_numpy(Rot[j]))
# ang[j, 0] = roll.numpy()
# ang[j, 1] = pitch.numpy()
# ang[j, 2] = yaw.numpy()
# # unwrap
# Rot_gt[j] = TORCHIEKF.from_rpy(torch.Tensor([ang_gt[j, 0]]),
# torch.Tensor([ang_gt[j, 1]]),
# torch.Tensor([ang_gt[j, 2]]))
# roll, pitch, yaw = TORCHIEKF.to_rpy(Rot_gt[j])
# ang_gt[j, 0] = roll.numpy()
# ang_gt[j, 1] = pitch.numpy()
# ang_gt[j, 2] = yaw.numpy()
# Rot_align, t_align, _ = umeyama_alignment(p_gt[:, :3].T, p[:, :3].T)
# p_align = (Rot_align.T.dot(p[:, :3].T)).T - Rot_align.T.dot(t_align)
# v_norm = np.sqrt(np.sum(v_gt ** 2, 1))
# v_norm /= np.max(v_norm)
# # Compute various errors
# error_p = np.abs(p_gt - p)
# # MATE
# mate_xy = np.mean(error_p[:, :2], 1)
# mate_z = error_p[:, 2]
# # CATE
# cate_xy = np.cumsum(mate_xy)
# cate_z = np.cumsum(mate_z)
# # RMSE
# rmse_xy = 1 / 2 * np.sqrt(error_p[:, 0] ** 2 + error_p[:, 1] ** 2)
# rmse_z = error_p[:, 2]
# RotT = torch.from_numpy(Rot).float().transpose(-1, -2)
# v_r = (RotT.matmul(torch.from_numpy(v).float().unsqueeze(-1)).squeeze()).numpy()
# v_r_gt = (Rot_gt.transpose(-1, -2).matmul(
# torch.from_numpy(v_gt).float().unsqueeze(-1)).squeeze()).numpy()
# p_r = (RotT.matmul(torch.from_numpy(p).float().unsqueeze(-1)).squeeze()).numpy()
# p_bis = (Rot_gt.matmul(torch.from_numpy(p_r).float().unsqueeze(-1)).squeeze()).numpy()
# error_p = p_gt - p_bis
# # plot and save plot
# folder_path = os.path.join(args.path_results, dataset_name)
# create_folder(folder_path)
# # position, velocity and velocity in body frame
# fig1, axs1 = plt.subplots(3, 1, sharex=True, figsize=(20, 10))
# # orientation, bias gyro and bias accelerometer
# fig2, axs2 = plt.subplots(3, 1, sharex=True, figsize=(20, 10))
# # position in plan
# fig3, ax3 = plt.subplots(figsize=(20, 10))
# # position in plan after alignment
# fig4, ax4 = plt.subplots(figsize=(20, 10))
# # Measurement covariance in log scale and normalized inputs
# fig5, axs5 = plt.subplots(3, 1, sharex=True, figsize=(20, 10))
# # input: gyro, accelerometer
# fig6, axs6 = plt.subplots(2, 1, sharex=True, figsize=(20, 10))
# # errors: MATE, CATE RMSE
# fig7, axs7 = plt.subplots(3, 1, sharex=True, figsize=(20, 10))
# axs1[0].plot(t, p_gt)
# axs1[0].plot(t, p)
# axs1[1].plot(t, v_gt)
# axs1[1].plot(t, v)
# axs1[2].plot(t, v_r_gt)
# axs1[2].plot(t, v_r)
# axs2[0].plot(t, ang_gt)
# axs2[0].plot(t, ang)
# axs2[1].plot(t, b_omega)
# axs2[2].plot(t, b_acc)
# ax3.plot(p_gt[:, 0], p_gt[:, 1])
# ax3.plot(p[:, 0], p[:, 1])
# ax3.axis('equal')
# ax4.plot(p_gt[:, 0], p_gt[:, 1])
# ax4.plot(p_align[:, 0], p_align[:, 1])
# ax4.axis('equal')
# axs5[0].plot(t, np.log10(measurements_covs))
# axs5[1].plot(t, u_normalized[:, :3])
# axs5[2].plot(t, u_normalized[:, 3:])
# axs6[0].plot(t, u[:, :3])
# axs6[1].plot(t, u[:, 3:6])
# axs7[0].plot(t, mate_xy)
# axs7[0].plot(t, mate_z)
# axs7[0].plot(t, rmse_xy)
# axs7[0].plot(t, rmse_z)
# axs7[1].plot(t, cate_xy)
# axs7[1].plot(t, cate_z)
# axs7[2].plot(t, error_p)
# axs1[0].set(xlabel='time (s)', ylabel='$\mathbf{p}_n$ (m)', title="Position")
# axs1[1].set(xlabel='time (s)', ylabel='$\mathbf{v}_n$ (m/s)', title="Velocity")
# axs1[2].set(xlabel='time (s)', ylabel='$\mathbf{R}_n^T \mathbf{v}_n$ (m/s)',
# title="Velocity in body frame")
# axs2[0].set(xlabel='time (s)', ylabel=r'$\phi_n, \theta_n, \psi_n$ (rad)',
# title="Orientation")
# axs2[1].set(xlabel='time (s)', ylabel=r'$\mathbf{b}_{n}^{\mathbf{\omega}}$ (rad/s)',
# title="Bias gyro")
# axs2[2].set(xlabel='time (s)', ylabel=r'$\mathbf{b}_{n}^{\mathbf{a}}$ (m/$\mathrm{s}^2$)',
# title="Bias accelerometer")
# ax3.set(xlabel=r'$p_n^x$ (m)', ylabel=r'$p_n^y$ (m)', title="Position on $xy$")
# ax4.set(xlabel=r'$p_n^x$ (m)', ylabel=r'$p_n^y$ (m)', title="Aligned position on $xy$")
# axs5[0].set(xlabel='time (s)', ylabel=r' $\mathrm{cov}(\mathbf{y}_{n})$ (log scale)',
# title="Covariance on the zero lateral and vertical velocity measurements (log "
# "scale)")
# axs5[1].set(xlabel='time (s)', ylabel=r'Normalized gyro measurements',
# title="Normalized gyro measurements")
# axs5[2].set(xlabel='time (s)', ylabel=r'Normalized accelerometer measurements',
# title="Normalized accelerometer measurements")
# axs6[0].set(xlabel='time (s)', ylabel=r'$\omega^x_n, \omega^y_n, \omega^z_n$ (rad/s)',
# title="Gyrometer")
# axs6[1].set(xlabel='time (s)', ylabel=r'$a^x_n, a^y_n, a^z_n$ (m/$\mathrm{s}^2$)',
# title="Accelerometer")
# axs7[0].set(xlabel='time (s)', ylabel=r'$|| \mathbf{p}_{n}-\hat{\mathbf{p}}_{n} ||$ (m)',
# title="Mean Absolute Trajectory Error (MATE) and Root Mean Square Error (RMSE)")
# axs7[1].set(xlabel='time (s)',
# ylabel=r'$\Sigma_{i=0}^{n} || \mathbf{p}_{i}-\hat{\mathbf{p}}_{i} ||$ (m)',
# title="Cumulative Absolute Trajectory Error (CATE)")
# axs7[2].set(xlabel='time (s)', ylabel=r' $\mathbf{\xi}_{n}^{\mathbf{p}}$',
# title="$SE(3)$ error on position")
# for ax in chain(axs1, axs2, axs5, axs6, axs7):
# ax.grid()
# ax3.grid()
# ax4.grid()
# axs1[0].legend(
# ['$p_n^x$', '$p_n^y$', '$p_n^z$', '$\hat{p}_n^x$', '$\hat{p}_n^y$', '$\hat{p}_n^z$'])
# axs1[1].legend(
# ['$v_n^x$', '$v_n^y$', '$v_n^z$', '$\hat{v}_n^x$', '$\hat{v}_n^y$', '$\hat{v}_n^z$'])
# axs1[2].legend(
# ['$v_n^x$', '$v_n^y$', '$v_n^z$', '$\hat{v}_n^x$', '$\hat{v}_n^y$', '$\hat{v}_n^z$'])
# axs2[0].legend([r'$\phi_n^x$', r'$\theta_n^y$', r'$\psi_n^z$', r'$\hat{\phi}_n^x$',
# r'$\hat{\theta}_n^y$', r'$\hat{\psi}_n^z$'])
# axs2[1].legend(
# ['$b_n^x$', '$b_n^y$', '$b_n^z$', '$\hat{b}_n^x$', '$\hat{b}_n^y$', '$\hat{b}_n^z$'])
# axs2[2].legend(
# ['$b_n^x$', '$b_n^y$', '$b_n^z$', '$\hat{b}_n^x$', '$\hat{b}_n^y$', '$\hat{b}_n^z$'])
# ax3.legend(['ground-truth trajectory', 'proposed'])
# ax4.legend(['ground-truth trajectory', 'proposed'])
# axs5[0].legend(['zero lateral velocity', 'zero vertical velocity'])
# axs6[0].legend(['$\omega_n^x$', '$\omega_n^y$', '$\omega_n^z$'])
# axs6[1].legend(['$a_n^x$', '$a_n^y$', '$a_n^z$'])
# if u.shape[1] > 6:
# axs6[2].legend(['$m_n^x$', '$m_n^y$', '$m_n^z$'])
# axs7[0].legend(['MATE xy', 'MATE z', 'RMSE xy', 'RMSE z'])
# axs7[1].legend(['CATE xy', 'CATE z'])
# # save figures
# figs = [fig1, fig2, fig3, fig4, fig5, fig6, fig7, ]
# figs_name = ["position_velocity", "orientation_bias", "position_xy", "position_xy_aligned",
# "measurements_covs", "imu", "errors", "errors2"]
# for l, fig in enumerate(figs):
# fig_name = figs_name[l]
# fig.savefig(os.path.join(folder_path, fig_name + ".png"))
plt.show(block=True)
| [
"copy.deepcopy",
"numpy.outer",
"numpy.arctan2",
"numpy.abs",
"matplotlib.pyplot.show",
"math.pow",
"matplotlib.pyplot.close",
"numpy.zeros",
"numpy.identity",
"numpy.isclose",
"numpy.linalg.svd",
"numpy.array",
"numpy.linalg.norm",
"numpy.sin",
"numpy.cos",
"numpy.linalg.det",
"matp... | [((367, 406), 'numpy.linalg.svd', 'np.linalg.svd', (['Rot'], {'full_matrices': '(False)'}), '(Rot, full_matrices=False)\n', (380, 406), True, 'import numpy as np\n'), ((416, 430), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (427, 430), True, 'import numpy as np\n'), ((605, 624), 'numpy.zeros', 'np.zeros', (['(N, 3, 3)'], {}), '((N, 3, 3))\n', (613, 624), True, 'import numpy as np\n'), ((637, 653), 'numpy.zeros', 'np.zeros', (['(N, 3)'], {}), '((N, 3))\n', (645, 653), True, 'import numpy as np\n'), ((666, 682), 'numpy.zeros', 'np.zeros', (['(N, 3)'], {}), '((N, 3))\n', (674, 682), True, 'import numpy as np\n'), ((701, 717), 'numpy.zeros', 'np.zeros', (['(N, 3)'], {}), '((N, 3))\n', (709, 717), True, 'import numpy as np\n'), ((734, 750), 'numpy.zeros', 'np.zeros', (['(N, 3)'], {}), '((N, 3))\n', (742, 750), True, 'import numpy as np\n'), ((769, 783), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (780, 783), True, 'import numpy as np\n'), ((1684, 1714), 'numpy.isclose', 'np.isclose', (['pitch', '(np.pi / 2.0)'], {}), '(pitch, np.pi / 2.0)\n', (1694, 1714), True, 'import numpy as np\n'), ((2255, 2281), 'numpy.array', 'np.array', (['[0, 0, -9.80665]'], {}), '([0, 0, -9.80665])\n', (2263, 2281), True, 'import numpy as np\n'), ((2664, 2683), 'numpy.linalg.norm', 'np.linalg.norm', (['phi'], {}), '(phi)\n', (2678, 2683), True, 'import numpy as np\n'), ((2983, 3070), 'numpy.array', 'np.array', (['[[0, -axis[2], axis[1]], [axis[2], 0, -axis[0]], [-axis[1], axis[0], 0]]'], {}), '([[0, -axis[2], axis[1]], [axis[2], 0, -axis[0]], [-axis[1], axis[0\n ], 0]])\n', (2991, 3070), True, 'import numpy as np\n'), ((3114, 3127), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (3120, 3127), True, 'import numpy as np\n'), ((3136, 3149), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (3142, 3149), True, 'import numpy as np\n'), ((445, 461), 'numpy.linalg.det', 'np.linalg.det', (['U'], {}), '(U)\n', (458, 461), True, 'import numpy as np\n'), ((464, 480), 'numpy.linalg.det', 'np.linalg.det', (['V'], {}), '(V)\n', (477, 480), True, 'import numpy as np\n'), ((1638, 1678), 'numpy.sqrt', 'np.sqrt', (['(Rot[0, 0] ** 2 + Rot[1, 0] ** 2)'], {}), '(Rot[0, 0] ** 2 + Rot[1, 0] ** 2)\n', (1645, 1678), True, 'import numpy as np\n'), ((1747, 1779), 'numpy.arctan2', 'np.arctan2', (['Rot[0, 1]', 'Rot[1, 1]'], {}), '(Rot[0, 1], Rot[1, 1])\n', (1757, 1779), True, 'import numpy as np\n'), ((1789, 1820), 'numpy.isclose', 'np.isclose', (['pitch', '(-np.pi / 2.0)'], {}), '(pitch, -np.pi / 2.0)\n', (1799, 1820), True, 'import numpy as np\n'), ((2744, 2757), 'numpy.abs', 'np.abs', (['angle'], {}), '(angle)\n', (2750, 2757), True, 'import numpy as np\n'), ((2785, 2861), 'numpy.array', 'np.array', (['[[0, -phi[2], phi[1]], [phi[2], 0, -phi[0]], [-phi[1], phi[0], 0]]'], {}), '([[0, -phi[2], phi[1]], [phi[2], 0, -phi[0]], [-phi[1], phi[0], 0]])\n', (2793, 2861), True, 'import numpy as np\n'), ((3321, 3337), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (3330, 3337), True, 'import matplotlib.pyplot as plt\n'), ((4860, 4903), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'sharex': '(True)', 'figsize': '(20, 10)'}), '(sharex=True, figsize=(20, 10))\n', (4872, 4903), True, 'import matplotlib.pyplot as plt\n'), ((4926, 4953), 'copy.deepcopy', 'copy.deepcopy', (['ang_gt[0, :]'], {}), '(ang_gt[0, :])\n', (4939, 4953), False, 'import copy\n'), ((14171, 14191), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (14179, 14191), True, 'import matplotlib.pyplot as plt\n'), ((1950, 2006), 'numpy.arctan2', 'np.arctan2', (['(Rot[1, 0] * sec_pitch)', '(Rot[0, 0] * sec_pitch)'], {}), '(Rot[1, 0] * sec_pitch, Rot[0, 0] * sec_pitch)\n', (1960, 2006), True, 'import numpy as np\n'), ((2050, 2106), 'numpy.arctan2', 'np.arctan2', (['(Rot[2, 1] * sec_pitch)', '(Rot[2, 2] * sec_pitch)'], {}), '(Rot[2, 1] * sec_pitch, Rot[2, 2] * sec_pitch)\n', (2060, 2106), True, 'import numpy as np\n'), ((2455, 2470), 'math.pow', 'math.pow', (['dt', '(2)'], {}), '(dt, 2)\n', (2463, 2470), False, 'import math\n'), ((2917, 2931), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (2928, 2931), True, 'import numpy as np\n'), ((1854, 1886), 'numpy.arctan2', 'np.arctan2', (['Rot[0, 1]', 'Rot[1, 1]'], {}), '(Rot[0, 1], Rot[1, 1])\n', (1864, 1886), True, 'import numpy as np\n'), ((1922, 1935), 'numpy.cos', 'np.cos', (['pitch'], {}), '(pitch)\n', (1928, 1935), True, 'import numpy as np\n'), ((3166, 3180), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (3177, 3180), True, 'import numpy as np\n'), ((3193, 3213), 'numpy.outer', 'np.outer', (['axis', 'axis'], {}), '(axis, axis)\n', (3201, 3213), True, 'import numpy as np\n')] |
from numpy.core.multiarray import dtype
from numpy.core.multiarray import array
def digitize(x, bins, right=False):
x = array(x, dtype=dtype('float'))
bins = array(bins, dtype=dtype('float'))
if len(bins) == 0:
raise ValueError("bins must have non-zero length")
monotonic = check_monotonic(bins)
if monotonic == 0:
raise ValueError("bins must be monotonically increasing or decreasing")
if monotonic == -1:
bins = bins[::-1]
result = bins.searchsorted(x, side='right' if not right else 'left')
if monotonic == -1:
result = len(bins) - result
return result
def check_monotonic(a):
"""
Parameters
----------
a
input array
Returns
-1 -- for monotonic, non-increasing
0 -- for non-monotonic
1 -- for monotonic, non-decreasing
-------
>>> check_monotonic([1,2,3])
1
>>> check_monotonic([3,2,1])
-1
>>> check_monotonic([3,1,2])
0
>>> check_monotonic([1, 1, 1, 3, 100])
1
>>> check_monotonic([1, 1, 1, 0, -1])
-1
>>> check_monotonic([1, 1, 1, 3, 2])
0
>>> check_monotonic([1123123])
1
"""
len_a = len(a)
assert len_a > 0
last = a[0]
i = 1
while i < len_a and a[0] == a[i]:
i += 1
if i == len_a:
return 1
next = a[i]
i += 1
if last < next:
while i < len_a:
last = next
next = a[i]
if last > next:
return 0
i += 1
return 1
else:
while i < len_a:
last = next
next = a[i]
if last < next:
return 0
i += 1
return -1
| [
"numpy.core.multiarray.dtype"
] | [((142, 156), 'numpy.core.multiarray.dtype', 'dtype', (['"""float"""'], {}), "('float')\n", (147, 156), False, 'from numpy.core.multiarray import dtype\n'), ((187, 201), 'numpy.core.multiarray.dtype', 'dtype', (['"""float"""'], {}), "('float')\n", (192, 201), False, 'from numpy.core.multiarray import dtype\n')] |
#!/usr/local/bin/python3
from robot import robot
import sys, time, threading
import numpy as np
import PyQt5.QtGui as QtGui
import PyQt5.QtCore as QtCore
import pyqtgraph as pg
AXIS_PLOT_SIZE = 400
MEDIAN_LENGTH = 30
class App(QtGui.QMainWindow):
def __init__(self, parent=None):
super(App, self).__init__(parent)
pg.setConfigOptions(useOpenGL=1) # SPEEEEEED
# Create a robot
self.robot = robot()
#### Create Gui Elements ###########
self.mainbox = QtGui.QWidget()
self.setCentralWidget(self.mainbox)
self.mainbox.setLayout(QtGui.QBoxLayout(QtGui.QBoxLayout.TopToBottom))
self.hlayout = QtGui.QHBoxLayout()
self.vlayout = QtGui.QVBoxLayout()
# Define widgets
self.pgcanvas = pg.GraphicsLayoutWidget()
self.buttonLogStart = QtGui.QPushButton('Start data log')
self.buttonLogStart.clicked.connect(self.robot.startLog)
self.buttonLogStop = QtGui.QPushButton('Stop data log')
self.buttonLogStop.clicked.connect(self.robot.stopLog)
self.serialStatuslabel = QtGui.QLabel()
self.serialStatuslabel.setText('Serial: Not connected.')
self.serialConsole = QtGui.QLineEdit()
self.serialConsole.returnPressed.connect(self.writeSerialConsole)
self.buttonForward = QtGui.QPushButton('Forward')
self.buttonForward.clicked.connect(self.robot.botCmdForwardButton)
self.buttonStop = QtGui.QPushButton('Stop')
self.buttonStop.clicked.connect(self.robot.botCmdStopButton)
self.buttonReverse = QtGui.QPushButton('Reverse')
self.buttonReverse.clicked.connect(self.robot.botCmdReverseButton)
self.positionlabel = QtGui.QLabel()
newfont = QtGui.QFont("courier")
self.positionlabel.setFont(newfont)
self.fpslabel = QtGui.QLabel()
self.fpslabel.setFixedWidth(200)
# Add widgets/layouts to the layouts
self.vlayout.addWidget(self.buttonLogStart)
self.vlayout.addWidget(self.buttonLogStop)
self.vlayout.addWidget(self.serialStatuslabel)
self.vlayout.addWidget(self.serialConsole)
self.vlayout.addWidget(self.buttonForward)
self.vlayout.addWidget(self.buttonStop)
self.vlayout.addWidget(self.buttonReverse)
self.vlayout.addWidget(self.positionlabel)
self.vlayout.addWidget(self.fpslabel)
self.hlayout.addWidget(self.pgcanvas)
self.hlayout.addLayout(self.vlayout)
self.mainbox.layout().addLayout(self.hlayout)
# XY plot
self.plot_xy = self.pgcanvas.addPlot(0,1,labels={'bottom':'X distance (mm)','left':'Y distance(mm)'})
self.plot_xy.showGrid(1,1,255)
self.plot_xy.setDownsampling(ds=True, auto=True, mode='peak')
self.plot_xy.getAxis('left').setTickSpacing(100, 50)
self.plot_xy.getAxis('bottom').setTickSpacing(100, 50)
self.plot_xy.setXRange(0, 1000, padding=0)
self.plot_xy.setYRange(0, 1000, padding=0)
# Y range plot
self.plot_y = self.pgcanvas.addPlot(0,0,labels={'left':'Latest sample #','bottom':'Y distance(mm)'})
self.plot_y.showGrid(1,1,255)
#self.plot_y.setDownsampling(ds=True, auto=True, mode='peak')
self.plot_y.invertY()
self.plot_y.setYRange(0, self.robot.max_hist_len, padding=0)
self.plot_y_raw = self.plot_y.plot(pen='y')
self.plot_y_kalman = self.plot_y.plot(pen='r')
self.plot_y_hist = self.plot_y.plot( stepMode=True, fillLevel=0, brush=(0,0,255,150))
# X range plot
self.plot_x = self.pgcanvas.addPlot(1,1,labels={'left':'Latest sample #','bottom':'X distance(mm)'})
self.plot_x.showGrid(1,1,255)
#self.plot_x.setDownsampling(ds=True, auto=True, mode='peak')
self.plot_x.invertY()
self.plot_x.setYRange(0, self.robot.max_hist_len, padding=0)
self.plot_x_raw = self.plot_x.plot(pen='y')
self.plot_x_kalman = self.plot_x.plot(pen='r')
self.plot_x_hist = self.plot_x.plot( stepMode=True, fillLevel=0, brush=(0,0,255,150))
# Position arrow
self.abs_position_arrow = pg.ArrowItem(angle=0, tipAngle=45, headLen=15, tailLen=15, tailWidth=3, brush='y')
self.abs_position_arrow.rotate(90)
self.plot_xy.addItem(self.abs_position_arrow)
# Control/data signals
self.x = np.linspace(0, self.robot.max_hist_len + 1, num = self.robot.max_hist_len)
self.histbins = bins=np.linspace(0, 1000, 1000)
self.fps = 0.
self.lastupdate = time.time()
self.arrow_angle = 0
self.exiting = False
self.sensor_update_thread = threading.Thread(target=self.updateSensorValueWorker)
self.thread_lock = threading.Lock()
#### Start #####################
self._update()
def _update(self):
# If no serial available, try to open a new one
if (self.robot.ser_available == False):
self.serialStatuslabel.setText('Serial: Not connected.')
if (self.sensor_update_thread.is_alive()):
pass
self.robot.openSerial()
if (self.robot.ser_available):
self.serialStatuslabel.setText('Serial: Connected.')
self.sensor_update_thread = threading.Thread(target=self.updateSensorValueWorker)
self.sensor_update_thread.start()
# Acquire thread lock to get data from thread
self.thread_lock.acquire()
self.sensor_x_deque = self.robot.sensor_x.vals
self.sensor_x_median = self.robot.sensor_x.winMedian()
self.sensor_x_var = self.robot.sensor_x.winVar()
self.sensor_y_deque = self.robot.sensor_y.vals
self.sensor_y_median = self.robot.sensor_y.winMedian()
self.sensor_y_var = self.robot.sensor_y.winVar()
self.sensor_accel_x_median = self.robot.sensor_accel_x.winMedian()
self.sensor_accel_x_var = self.robot.sensor_accel_x.winVar()
self.sensor_accel_y_median = self.robot.sensor_accel_y.winMedian()
self.sensor_accel_z_median = self.robot.sensor_accel_z.winMedian()
self.sensor_gyro_x_median = self.robot.sensor_gyro_x.winMedian()
self.sensor_gyro_y_median = self.robot.sensor_gyro_y.winMedian()
self.sensor_gyro_z_median = self.robot.sensor_gyro_z.winMedian()
self.sensor_mag_median = self.robot.sensor_mag.winMedian()
self.sensor_mag_ref = self.robot.sensor_mag_ref
# Kalman states
self.kalman_x_deque = self.robot.kalman_x.vals
self.kalman_y_deque = self.robot.kalman_y.vals
self.kalman_x_median = self.robot.kalman_x.winMedian()
self.kalman_x_var = self.robot.kalman_x.winVar()
self.kalman_dx_median = self.robot.kalman_dx.winMedian()
self.kalman_y_median = self.robot.kalman_y.winMedian()
self.kalman_dy_median = self.robot.kalman_dy.winMedian()
self.dt_mean = self.robot.dt.winMean()
self.dt_var = self.robot.dt.winVar()
self.thread_lock.release()
# Calculate some stats
self.var_ratio = self.kalman_x_var / (self.sensor_x_var + 0.00000001)
self.data_rate = 1000.0 / (self.dt_mean + 0.00000001)
# Update the data label
positionlabel_str = 'Median X: \t%d \tmm\n' % self.sensor_x_median + \
'Var X: \t\t%0.2f \tmm^2\n' % self.sensor_x_var + \
'\nMedian Y: \t%d \tmm\n' % self.sensor_y_median + \
'Var y: \t\t%0.2f \tmm^2\n' % self.sensor_y_var + \
'\nAngle: \t\t%0.1f \tdeg\n' % self.sensor_mag_median + \
'Ref angle: \t%0.1f \tdeg\n' % self.sensor_mag_ref + \
'\nKalman states:\n' + \
'X: \t\t%d \tmm/s\n' % self.kalman_x_median + \
'dX: \t\t%+0.3f \tmm/s/s\n' % self.kalman_dx_median + \
'Var X: \t\t%0.2f \tmm^2\n' % self.kalman_x_var + \
'Var ratio X: \t%0.2f\n' % self.var_ratio + \
'Y: \t\t%d \tmm/s\n' % self.kalman_y_median + \
'dY: \t\t%+0.3f \tmm/s/s\n' % self.kalman_dy_median + \
'\nX accel: \t%+0.1f \tmm/s/s\n' % self.sensor_accel_x_median + \
'X accel var: \t%d \tmm/s/s\n' % self.sensor_accel_x_var + \
'Y accel: \t%+0.1f \tmm/s/s\n' % self.sensor_accel_y_median + \
'Z accel: \t%+0.1f \tmm/s/s\n' % self.sensor_accel_z_median + \
'\nX gyro: \t%+0.1f \tdps\n' % self.sensor_gyro_x_median + \
'Y gyro: \t%+0.1f \tdps\n' % self.sensor_gyro_y_median + \
'Z gyro: \t%+0.1f \tdps\n' % self.sensor_gyro_z_median + \
'\nData rate: \t%0.1f \tHz\n' % self.data_rate + \
'Data rate per: \t%0.3f \tms\n' % self.dt_mean + \
'Data rate var: \t%0.4f \tms\n' % self.dt_var
self.positionlabel.setText(positionlabel_str)
# Convert deques to lists for easier processing
self.sensor_x_list = list(self.sensor_x_deque)
self.kalman_x_list = list(self.kalman_x_deque)
self.sensor_y_list = list(self.sensor_y_deque)
self.kalman_y_list = list(self.kalman_y_deque)
# Update plots
self.plot_x_raw.setData(self.sensor_x_list, self.x)
self.plot_x_kalman.setData(self.kalman_x_list, self.x)
self.plot_y_raw.setData(self.sensor_y_list, self.x)
self.plot_y_kalman.setData(self.kalman_y_list, self.x)
plot_x_y,plot_x_x = self.reducedHistogram(self.sensor_x_list, self.histbins)
plot_y_y,plot_y_x = self.reducedHistogram(self.sensor_y_list, self.histbins)
self.plot_x_hist.setData(plot_x_x,plot_x_y)
self.plot_y_hist.setData(plot_y_x,plot_y_y)
# Set xy plot arrow's position and angle
self.abs_position_arrow.setPos(self.kalman_x_median,self.kalman_y_median)
self.setArrowAngle(90.0 - self.sensor_mag_median)
now = time.time()
dt = (now-self.lastupdate)
if dt <= 0:
dt = 0.000000000001
fps2 = 1.0 / dt
self.lastupdate = now
self.fps = self.fps * 0.9 + fps2 * 0.1
self.fpslabel.setText('Mean Frame Rate: {fps:0.0f} FPS'.format(fps=self.fps))
if (not self.exiting):
QtCore.QTimer.singleShot(1, self._update)
def updateSensorValueWorker(self):
if (not self.exiting):
print("Sensor update worker started.")
while (not self.exiting):
self.thread_lock.acquire()
self.robot.updateSensorValue()
self.thread_lock.release()
# Gets the histogram but without empty bins
def reducedHistogram(self, data_list, bins):
y,x = np.histogram(data_list, bins)
nonzero_indices = np.nonzero(y)
if (len(nonzero_indices) == 0):
print("fuck")
if (len(y) == 0):
print("fuk")
y_reduced = y[nonzero_indices[0][0]:nonzero_indices[0][-1] + 1]
x_reduced = x[nonzero_indices[0][0]:nonzero_indices[0][-1] + 2]
return y_reduced, x_reduced
def setArrowAngle(self, angle):
self.abs_position_arrow.rotate(angle - self.arrow_angle)
self.arrow_angle = angle
def writeSerialConsole(self):
cmd_str = self.serialConsole.text()
self.serialConsole.setText('')
if (self.robot.ser_available):
cmd_str += '\r'
self.robot.ser.write(cmd_str.encode('utf-8'))
def closeEvent(self, *args, **kwargs):
self.exiting = True
super(QtGui.QMainWindow, self).closeEvent(*args, **kwargs)
if (self.robot.ser_available): self.robot.closeSerial()
if (self.robot.data_log_enable): self.stopLog()
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
thisapp = App()
thisapp.show()
sys.exit(app.exec_())
| [
"PyQt5.QtGui.QHBoxLayout",
"PyQt5.QtGui.QWidget",
"PyQt5.QtGui.QBoxLayout",
"numpy.histogram",
"pyqtgraph.GraphicsLayoutWidget",
"robot.robot",
"PyQt5.QtGui.QVBoxLayout",
"threading.Lock",
"numpy.linspace",
"PyQt5.QtGui.QApplication",
"PyQt5.QtGui.QLabel",
"threading.Thread",
"PyQt5.QtGui.QL... | [((12025, 12053), 'PyQt5.QtGui.QApplication', 'QtGui.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (12043, 12053), True, 'import PyQt5.QtGui as QtGui\n'), ((337, 369), 'pyqtgraph.setConfigOptions', 'pg.setConfigOptions', ([], {'useOpenGL': '(1)'}), '(useOpenGL=1)\n', (356, 369), True, 'import pyqtgraph as pg\n'), ((430, 437), 'robot.robot', 'robot', ([], {}), '()\n', (435, 437), False, 'from robot import robot\n'), ((507, 522), 'PyQt5.QtGui.QWidget', 'QtGui.QWidget', ([], {}), '()\n', (520, 522), True, 'import PyQt5.QtGui as QtGui\n'), ((669, 688), 'PyQt5.QtGui.QHBoxLayout', 'QtGui.QHBoxLayout', ([], {}), '()\n', (686, 688), True, 'import PyQt5.QtGui as QtGui\n'), ((712, 731), 'PyQt5.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', ([], {}), '()\n', (729, 731), True, 'import PyQt5.QtGui as QtGui\n'), ((782, 807), 'pyqtgraph.GraphicsLayoutWidget', 'pg.GraphicsLayoutWidget', ([], {}), '()\n', (805, 807), True, 'import pyqtgraph as pg\n'), ((838, 873), 'PyQt5.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Start data log"""'], {}), "('Start data log')\n", (855, 873), True, 'import PyQt5.QtGui as QtGui\n'), ((968, 1002), 'PyQt5.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Stop data log"""'], {}), "('Stop data log')\n", (985, 1002), True, 'import PyQt5.QtGui as QtGui\n'), ((1099, 1113), 'PyQt5.QtGui.QLabel', 'QtGui.QLabel', ([], {}), '()\n', (1111, 1113), True, 'import PyQt5.QtGui as QtGui\n'), ((1208, 1225), 'PyQt5.QtGui.QLineEdit', 'QtGui.QLineEdit', ([], {}), '()\n', (1223, 1225), True, 'import PyQt5.QtGui as QtGui\n'), ((1329, 1357), 'PyQt5.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Forward"""'], {}), "('Forward')\n", (1346, 1357), True, 'import PyQt5.QtGui as QtGui\n'), ((1459, 1484), 'PyQt5.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Stop"""'], {}), "('Stop')\n", (1476, 1484), True, 'import PyQt5.QtGui as QtGui\n'), ((1583, 1611), 'PyQt5.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Reverse"""'], {}), "('Reverse')\n", (1600, 1611), True, 'import PyQt5.QtGui as QtGui\n'), ((1716, 1730), 'PyQt5.QtGui.QLabel', 'QtGui.QLabel', ([], {}), '()\n', (1728, 1730), True, 'import PyQt5.QtGui as QtGui\n'), ((1749, 1771), 'PyQt5.QtGui.QFont', 'QtGui.QFont', (['"""courier"""'], {}), "('courier')\n", (1760, 1771), True, 'import PyQt5.QtGui as QtGui\n'), ((1840, 1854), 'PyQt5.QtGui.QLabel', 'QtGui.QLabel', ([], {}), '()\n', (1852, 1854), True, 'import PyQt5.QtGui as QtGui\n'), ((4149, 4235), 'pyqtgraph.ArrowItem', 'pg.ArrowItem', ([], {'angle': '(0)', 'tipAngle': '(45)', 'headLen': '(15)', 'tailLen': '(15)', 'tailWidth': '(3)', 'brush': '"""y"""'}), "(angle=0, tipAngle=45, headLen=15, tailLen=15, tailWidth=3,\n brush='y')\n", (4161, 4235), True, 'import pyqtgraph as pg\n'), ((4378, 4450), 'numpy.linspace', 'np.linspace', (['(0)', '(self.robot.max_hist_len + 1)'], {'num': 'self.robot.max_hist_len'}), '(0, self.robot.max_hist_len + 1, num=self.robot.max_hist_len)\n', (4389, 4450), True, 'import numpy as np\n'), ((4482, 4508), 'numpy.linspace', 'np.linspace', (['(0)', '(1000)', '(1000)'], {}), '(0, 1000, 1000)\n', (4493, 4508), True, 'import numpy as np\n'), ((4557, 4568), 'time.time', 'time.time', ([], {}), '()\n', (4566, 4568), False, 'import sys, time, threading\n'), ((4663, 4716), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.updateSensorValueWorker'}), '(target=self.updateSensorValueWorker)\n', (4679, 4716), False, 'import sys, time, threading\n'), ((4744, 4760), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (4758, 4760), False, 'import sys, time, threading\n'), ((10218, 10229), 'time.time', 'time.time', ([], {}), '()\n', (10227, 10229), False, 'import sys, time, threading\n'), ((10981, 11010), 'numpy.histogram', 'np.histogram', (['data_list', 'bins'], {}), '(data_list, bins)\n', (10993, 11010), True, 'import numpy as np\n'), ((11037, 11050), 'numpy.nonzero', 'np.nonzero', (['y'], {}), '(y)\n', (11047, 11050), True, 'import numpy as np\n'), ((598, 644), 'PyQt5.QtGui.QBoxLayout', 'QtGui.QBoxLayout', (['QtGui.QBoxLayout.TopToBottom'], {}), '(QtGui.QBoxLayout.TopToBottom)\n', (614, 644), True, 'import PyQt5.QtGui as QtGui\n'), ((10548, 10589), 'PyQt5.QtCore.QTimer.singleShot', 'QtCore.QTimer.singleShot', (['(1)', 'self._update'], {}), '(1, self._update)\n', (10572, 10589), True, 'import PyQt5.QtCore as QtCore\n'), ((5292, 5345), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.updateSensorValueWorker'}), '(target=self.updateSensorValueWorker)\n', (5308, 5345), False, 'import sys, time, threading\n')] |
from os.path import join
import numpy as np
import pandas as pd
from rdkit import Chem
from rdkit.Chem import rdMolDescriptors
from rdkit.Chem import MACCSkeys
from mordred import Calculator, descriptors
from pymudra.mudra import MUDRAEstimator
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedKFold
def Q2(y, y_pred):
press = np.sum((y_pred-y)**2)
tss=np.sum((y-y.mean())**2)
return 1-(press/tss)
def clean_descriptors(X, idx):
X=X[:,np.all(np.isfinite(X), axis=0)]
X = X[:, ~(X.std(axis=0)<0.02)]
corrs = np.array([1])
while np.any(corrs>0.95):
corrs = np.corrcoef(X.T)**2
corrs[np.triu_indices_from(corrs)]=0
idx1, idx2 = np.unravel_index(np.argmax(corrs), corrs.shape)
X = X[:,~(np.arange(len(X.T))==idx1)]
X = StandardScaler().fit_transform(X)
print('Descriptor set %d: %d dimensions'%(idx, X.shape[1]))
return X
mordred = Calculator(descriptors)
sdf_filename = join('datasets', 'D4_mols_confident.sdf')
dragon_descs_filename = join('datasets', 'D4_mols_confident_descriptors.txt')
mols = [m for m in Chem.SDMolSupplier(sdf_filename, removeHs=False)]
y = np.array([mol.GetPropsAsDict()['pki'] for mol in mols])
print('\nCalculating descriptors...')
X = [np.array([np.array(d(mol)).astype(float) for mol in mols]) for d in [mordred]]
dragon_descs = pd.read_csv(dragon_descs_filename, sep='\t', na_values='na')
del dragon_descs['NAME']
del dragon_descs['No.']
X.append(dragon_descs.astype(float).values)
print('\nCleaning up variables...')
X = [clean_descriptors(x, i) for i, x in enumerate(X)]
quantiles = np.quantile(y, np.linspace(0,1,8))
y_classes = np.digitize(y, quantiles[1:], right=True)
# 5-fold cross-validation
y_preds, y_tests = [], []
skf = StratifiedKFold(n_splits=5)
for fold_id, (train_index, test_index) in enumerate(skf.split(X[0], y_classes)):
X_train, y_train = [x[train_index] for x in X], y[train_index]
X_test, y_test = [x[test_index] for x in X], y[test_index]
mudra_est = MUDRAEstimator('regressor', n_neighbors = 5, metric = 'euclidean')
mudra_est.fit(X_train, y_train)
y_pred, _, _ = mudra_est.predict(X_test)
y_preds.append(y_pred)
y_tests.append(y_test)
print('Q2 score: %.3f'%(Q2(np.concatenate(y_tests), np.concatenate(y_preds)))) | [
"numpy.sum",
"sklearn.preprocessing.StandardScaler",
"numpy.argmax",
"pandas.read_csv",
"numpy.corrcoef",
"numpy.triu_indices_from",
"numpy.isfinite",
"numpy.any",
"sklearn.model_selection.StratifiedKFold",
"mordred.Calculator",
"numpy.array",
"numpy.linspace",
"rdkit.Chem.SDMolSupplier",
... | [((971, 994), 'mordred.Calculator', 'Calculator', (['descriptors'], {}), '(descriptors)\n', (981, 994), False, 'from mordred import Calculator, descriptors\n'), ((1011, 1052), 'os.path.join', 'join', (['"""datasets"""', '"""D4_mols_confident.sdf"""'], {}), "('datasets', 'D4_mols_confident.sdf')\n", (1015, 1052), False, 'from os.path import join\n'), ((1077, 1130), 'os.path.join', 'join', (['"""datasets"""', '"""D4_mols_confident_descriptors.txt"""'], {}), "('datasets', 'D4_mols_confident_descriptors.txt')\n", (1081, 1130), False, 'from os.path import join\n'), ((1398, 1458), 'pandas.read_csv', 'pd.read_csv', (['dragon_descs_filename'], {'sep': '"""\t"""', 'na_values': '"""na"""'}), "(dragon_descs_filename, sep='\\t', na_values='na')\n", (1409, 1458), True, 'import pandas as pd\n'), ((1704, 1745), 'numpy.digitize', 'np.digitize', (['y', 'quantiles[1:]'], {'right': '(True)'}), '(y, quantiles[1:], right=True)\n', (1715, 1745), True, 'import numpy as np\n'), ((1805, 1832), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(5)'}), '(n_splits=5)\n', (1820, 1832), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((380, 405), 'numpy.sum', 'np.sum', (['((y_pred - y) ** 2)'], {}), '((y_pred - y) ** 2)\n', (386, 405), True, 'import numpy as np\n'), ((591, 604), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (599, 604), True, 'import numpy as np\n'), ((615, 635), 'numpy.any', 'np.any', (['(corrs > 0.95)'], {}), '(corrs > 0.95)\n', (621, 635), True, 'import numpy as np\n'), ((1672, 1692), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(8)'], {}), '(0, 1, 8)\n', (1683, 1692), True, 'import numpy as np\n'), ((2070, 2132), 'pymudra.mudra.MUDRAEstimator', 'MUDRAEstimator', (['"""regressor"""'], {'n_neighbors': '(5)', 'metric': '"""euclidean"""'}), "('regressor', n_neighbors=5, metric='euclidean')\n", (2084, 2132), False, 'from pymudra.mudra import MUDRAEstimator\n'), ((1150, 1198), 'rdkit.Chem.SDMolSupplier', 'Chem.SDMolSupplier', (['sdf_filename'], {'removeHs': '(False)'}), '(sdf_filename, removeHs=False)\n', (1168, 1198), False, 'from rdkit import Chem\n'), ((651, 667), 'numpy.corrcoef', 'np.corrcoef', (['X.T'], {}), '(X.T)\n', (662, 667), True, 'import numpy as np\n'), ((685, 712), 'numpy.triu_indices_from', 'np.triu_indices_from', (['corrs'], {}), '(corrs)\n', (705, 712), True, 'import numpy as np\n'), ((754, 770), 'numpy.argmax', 'np.argmax', (['corrs'], {}), '(corrs)\n', (763, 770), True, 'import numpy as np\n'), ((844, 860), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (858, 860), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2308, 2331), 'numpy.concatenate', 'np.concatenate', (['y_tests'], {}), '(y_tests)\n', (2322, 2331), True, 'import numpy as np\n'), ((2333, 2356), 'numpy.concatenate', 'np.concatenate', (['y_preds'], {}), '(y_preds)\n', (2347, 2356), True, 'import numpy as np\n'), ((513, 527), 'numpy.isfinite', 'np.isfinite', (['X'], {}), '(X)\n', (524, 527), True, 'import numpy as np\n')] |
import unittest
# TODO: Write a test on gradient computation.
class TestGraphDatastructures(unittest.TestCase):
def test_construct_nodes_edges_simple_graph_np(self):
"""
Tests the construction of some basic datastructures useful for GraphNet computation
"""
n1 = Node(np.random.randn(10,10))
n2 = Node(np.random.randn(10,10))
e12 = Edge(np.random.randn(5,10),n1,n2)
g = Graph([n1,n2], [e12])
def test_node_operations(self):
r1 = np.random.randn(10,10)
r2 = np.random.randn(10,10)
n1 = Node(r1)
n2 = Node(r2)
n3 = n1 + n2
self.assertEqual(np.linalg.norm(n2.node_attr_tensor + n1.node_attr_tensor-n3.node_attr_tensor),0)
def test_node_copy(self):
"""
test that when copying the object the value is coppied but not the
reference
"""
n1 = Node(np.random.randn(10,10))
n2 = n1.copy()
self.assertTrue(n1 != n2)
self.assertTrue(np.linalg.norm((n1 - n2).node_attr_tensor)== 0.)
def test_graph_tuple_construction(self):
"""
Tests if I can properly set and then retrieve a graph tuple.
"""
batch_size = 1
node_input_size = 2
edge_input_size = 2
n1 = Node(np.random.randn(batch_size,node_input_size))
n2 = Node(np.random.randn(batch_size, node_input_size))
n3 = Node(np.random.randn(batch_size, node_input_size))
n4 = Node(np.random.randn(batch_size, node_input_size))
n5 = Node(np.random.randn(batch_size, node_input_size))
e12 = Edge(np.random.randn(batch_size, edge_input_size),node_from = n1,node_to = n2)
e21 = Edge(np.random.randn(batch_size, edge_input_size),node_from = n2,node_to = n1)
e23 = Edge(np.random.randn(batch_size, edge_input_size),node_from = n2,node_to = n3)
e34 = Edge(np.random.randn(batch_size, edge_input_size), node_from = n3, node_to = n4)
e45 = Edge(np.random.randn(batch_size, edge_input_size), node_from = n4, node_to = n5)
g1 = Graph([n1,n2],[e12])
g2 = Graph([n1,n2,n3,n4],[e12,e21,e23,e34])
g3 = Graph([n3, n4] , [e34])
from tf_gnns import GraphTuple, make_graph_tuple_from_graph_list # the current folder is the module.
old_graphs_list = [g1.copy(),g2.copy(),g3.copy()]
graph_tuple = make_graph_tuple_from_graph_list(old_graphs_list)
new_graphs_list = [graph_tuple.get_graph(k) for k in range(graph_tuple.n_graphs)]
self.assertTrue(np.all([(k.is_equal_by_value(m) and k.compare_connectivity(m) ) for k, m in zip(new_graphs_list, old_graphs_list)]))
class TestGraphNet(unittest.TestCase):
def test_construct_simple_eval_graphnet(self):
from tf_gnns import GraphNet, make_keras_simple_agg
edge_input_size = 15
node_input_size = 11
node_output_size, edge_output_size = node_input_size, edge_input_size
node_input = tf.keras.layers.Input(shape = (node_input_size,))
edge_input = tf.keras.layers.Input(shape = (edge_input_size,))
node_function = tf.keras.Model(outputs = tf.keras.layers.Dense(node_output_size)(node_input), inputs= node_input)
edge_function = tf.keras.Model(outputs = tf.keras.layers.Dense(edge_output_size)(edge_input), inputs= edge_input)
edge_aggregation_function = make_keras_simple_agg(edge_output_size,'mean')
graphnet = GraphNet(node_function = node_function, edge_function = edge_function, edge_aggregation_function = edge_aggregation_function)
batch_size = 10
n1 = Node(np.random.randn(batch_size,node_input_size))
n2 = Node(np.random.randn(batch_size, node_input_size))
e12 = Edge(np.random.randn(batch_size, edge_input_size),node_from = n1,node_to = n2)
g = Graph([n1,n2],[e12])
def test_mean_max_aggregator(self):
"""
Tests if the special compound aggregator which outputs a concatenation of mean and max works.
"""
from tf_gnns import GraphNet, _aggregation_function_factory
node_input_size = 5
edge_input_size = 5
edge_output_size = 5
message_shape = 2*edge_output_size #<- message size is larger because it is a concatenation of two aggregators.
batch_size = 6
# The naive implementation:
v1,v2,v3 = [np.ones([batch_size, node_input_size])*k for k in range(3)]
n1 , n2, n3 = [Node(v_) for v_ in [v1,v2,v3]]
e21 = Edge(np.ones([batch_size, node_input_size])*0., node_from = n2, node_to = n1)
e31 = Edge(np.ones([batch_size, node_input_size])*10, node_from = n3, node_to = n1)
#** The "None" is the actual batch dimension during computation with the Naive evaluators
# ("safe" and "batched"). Reduction happens w.r.t. 1st dimension which enumerates incoming edges.
edge_aggregation_function = _aggregation_function_factory((None, edge_output_size), agg_type = 'mean_max')
node_input = tf.keras.layers.Input(shape = (node_input_size,), name = "node_state")
agg_edge_state_input = tf.keras.layers.Input(shape = (message_shape,), name = "edge_state_agg")
edge_input = tf.keras.layers.Input(shape = (edge_input_size,), name = "edge_state")
node_function = tf.keras.Model(outputs = tf.identity(node_input),
inputs = [agg_edge_state_input, node_input],name = "node_function")
edge_function = tf.keras.Model(outputs = tf.identity(edge_input),
inputs = [edge_input])
gn = GraphNet(node_function = node_function,
edge_function = edge_function,
edge_aggregation_function = edge_aggregation_function)
g = Graph([n1,n2,n3],[e21, e31])
g_, m = gn.graph_eval(g, eval_mode= "safe", return_messages = True)
m_correct = np.hstack([np.ones([batch_size,edge_output_size])*5, np.ones([batch_size,edge_output_size])*10.])
self.assertTrue(np.all(m == m_correct))
def test_eval_modes(self):
"""
test the different evaluation modes.
There are 3 evaluation modes - one appropriate for batched graphs, and two for graphs of the same shape ("batched" or unbached ("safe")).
The "safe" mode is used as reference for the correct results; All modes should give the same output within an error margin (due to finite precission
rounding errors and the different comp. graphs.)
"""
from tf_gnns import GraphNet, make_mlp_graphnet_functions
import code
batch_size = 12
tf.keras.backend.set_floatx("float64")
node_input_size = 10
edge_input_size = node_input_size
n1 = Node(np.random.randn(batch_size,node_input_size))
n2 = Node(np.random.randn(batch_size, node_input_size))
n3 = Node(np.random.randn(batch_size, node_input_size))
node_abs_vals = [np.abs(n.node_attr_tensor) for n in [n1,n2,n3]]
e12 = Edge(np.random.randn(batch_size, edge_input_size),node_from = n1,node_to = n2)
e21 = Edge(np.random.randn(batch_size, edge_input_size),node_from = n2,node_to = n1)
e23 = Edge(np.random.randn(batch_size, edge_input_size),node_from = n2,node_to = n3)
edge_abs_vals = [np.abs(e.edge_tensor) for e in [e12,e21,e23]]
g1 = Graph([n1,n2,n3],[e12,e21,e23])
node_output_size = 17
## The non-graph independent version:
gi = False
graph_fcn = make_mlp_graphnet_functions(150,
node_input_size = node_input_size,
node_output_size = node_output_size,
graph_indep=False)
gn = GraphNet(**graph_fcn )
res1 = gn.graph_eval(g1.copy(),eval_mode = "safe")
res2 = gn.graph_eval(g1.copy(), eval_mode = "batched")
error_nodes = np.max([np.linalg.norm(n1_.node_attr_tensor - n2_.node_attr_tensor) for n1_, n2_ in zip(res1.nodes, res2.nodes)])/np.min(node_abs_vals)
error_edges = np.max([np.linalg.norm(e1_.edge_tensor - e2_.edge_tensor) for e1_,e2_ in zip(res1.edges, res2.edges)])/np.min(edge_abs_vals)
#print(error_nodes, error_edges)
self.assertTrue(error_nodes < 1e-10)
self.assertTrue(error_edges < 1e-10)
## The graph-independent version:
gi = True
graph_fcn = make_mlp_graphnet_functions(150,
node_input_size = node_input_size,
node_output_size = node_input_size,
graph_indep=gi, use_edge_state_agg_input = False)
graph_fcn.update({"graph_independent" : gi})
gn = GraphNet(**graph_fcn )
res1 = gn.graph_eval(g1.copy(),eval_mode = "safe")
res2 = gn.graph_eval(g1.copy(), eval_mode = "batched")
error_nodes = np.max([np.linalg.norm(n1.node_attr_tensor - n2.node_attr_tensor) for n1, n2 in zip(res1.nodes, res2.nodes)])/np.min(node_abs_vals)
error_edges = np.max([np.linalg.norm(e1.edge_tensor - e2.edge_tensor) for e1,e2 in zip(res1.edges, res2.edges)])/np.min(edge_abs_vals)
self.assertTrue(error_nodes < 1e-10)
self.assertTrue(error_edges < 1e-10)
def test_save_load(self):
# TODO: this needs to be updated for the global blocks (there are 3 more functions to be treated).
# Write another test and keep this one, in order to keep backwards compatibility.
from tf_gnns import make_mlp_graphnet_functions, GraphNet
graph_fcn = make_mlp_graphnet_functions(150, node_input_size = 10, node_output_size = 10, graph_indep=False)
gn = GraphNet(**graph_fcn)
gn.save("/tmp/test_gn")
gn_loaded = GraphNet.make_from_path("/tmp/test_gn")
self.assertTrue(np.all([np.sum(np.abs(w1 - w2))<1e-10 for w1,w2 in zip(gn.weights,gn_loaded.weights)]))
def test_graph_tuple_eval(self):
"""
The graph tuples are graphs of different sizes batched to a single object,
to allow for more single-instruction multiple-data computation (batched computation).
This is the only evalution mode DeepMind's graphnets implement directly.
This mode is much more computationally efficient.
This mode allows computation with unsorted segment sum aggregators.
"""
import code
## Constructing a graph tuple:
batch_size = 1
node_input_size = 10
edge_input_size = 10
n1 = Node(np.random.randn(batch_size,node_input_size))
n2 = Node(np.random.randn(batch_size, node_input_size))
n3 = Node(np.random.randn(batch_size, node_input_size))
n4 = Node(np.random.randn(batch_size, node_input_size))
n5 = Node(np.random.randn(batch_size, node_input_size))
e12 = Edge(np.random.randn(batch_size, edge_input_size),node_from = n1,node_to = n2)
e21 = Edge(np.random.randn(batch_size, edge_input_size),node_from = n2,node_to = n1)
e23 = Edge(np.random.randn(batch_size, edge_input_size),node_from = n2,node_to = n3)
e34 = Edge(np.random.randn(batch_size, edge_input_size), node_from = n3, node_to = n4)
e45 = Edge(np.random.randn(batch_size, edge_input_size), node_from = n4, node_to = n5)
g1 = Graph([n1,n2],[e12]).copy()
g2 = Graph([n1,n2,n3,n4],[e12,e21,e23,e34]).copy()
g3 = Graph([n3, n4] , [e34]).copy()
from tf_gnns import GraphTuple, make_graph_tuple_from_graph_list ,GraphNet , make_mlp_graphnet_functions# the current folder is the module.
old_graphs_list = [g1.copy(),g2.copy(),g3.copy()]
graph_tuple = make_graph_tuple_from_graph_list(old_graphs_list)
#new_graphs_list = [graph_tuple.get_graph(k) for k in range(graph_tuple.n_graphs)]
#self.assertTrue(np.all([(k.is_equal_by_value(m) and k.compare_connectivity(m) ) for k, m in zip(new_graphs_list, old_graphs_list)]))
graph_fcn = make_mlp_graphnet_functions(150, node_input_size = 10, node_output_size = 10, graph_indep=False, aggregation_function = "mean")
gn = GraphNet(**graph_fcn)
gt_copy = graph_tuple.copy()
gn.graph_tuple_eval(gt_copy)
graphs_evaluated_separately = [gn.graph_eval(g_) for g_ in old_graphs_list]
graphs_evaluated_from_graph_tuple = [gt_copy.get_graph(i) for i in range(gt_copy.n_graphs)]
flatten_nodes = lambda x : tf.stack([x_.get_state() for x_ in x.nodes])
flatten_edges = lambda x : tf.stack([x_.edge_tensor for x_ in x.edges])
for g1,g2 in zip(graphs_evaluated_from_graph_tuple, graphs_evaluated_separately):
self.assertTrue(tf.norm(flatten_nodes(g1)- flatten_nodes(g2))<1e-10)
self.assertTrue(tf.norm(flatten_edges(g1) - flatten_edges(g2)) < 1e-10)
def test_graph_tuple_eval_with_global(self):
"""
Test if the evaluation of graph tuples with global variables works.
"""
## Constructing a graph tuple:
batch_size = 1
node_input_size = 10
edge_input_size = 10
global_attr_size = 5
n1 = Node(np.random.randn(batch_size,node_input_size))
n2 = Node(np.random.randn(batch_size, node_input_size))
n3 = Node(np.random.randn(batch_size, node_input_size))
n4 = Node(np.random.randn(batch_size, node_input_size))
n5 = Node(np.random.randn(batch_size, node_input_size))
e12 = Edge(np.random.randn(batch_size, edge_input_size),node_from = n1,node_to = n2)
e21 = Edge(np.random.randn(batch_size, edge_input_size),node_from = n2,node_to = n1)
e23 = Edge(np.random.randn(batch_size, edge_input_size),node_from = n2,node_to = n3)
e34 = Edge(np.random.randn(batch_size, edge_input_size), node_from = n3, node_to = n4)
e45 = Edge(np.random.randn(batch_size, edge_input_size), node_from = n4, node_to = n5)
g1 = Graph([n1,n2],[e12]).copy()
g2 = Graph([n1,n2,n3,n4],[e12,e21,e23,e34]).copy()
g3 = Graph([n3, n4] , [e34]).copy()
from tf_gnns import GraphTuple, make_graph_tuple_from_graph_list ,GraphNet , make_mlp_graphnet_functions# the current folder is the module.
old_graphs_list = [g1.copy(),g2.copy(),g3.copy()]
graph_tuple = make_graph_tuple_from_graph_list(old_graphs_list)
global_vars = tf.Variable(np.random.randn(graph_tuple.n_graphs,global_attr_size))
global_out = 10
graph_fcn = make_mlp_graphnet_functions(150,
node_input_size = 10,
node_output_size = 10,
graph_indep=False,
use_global_to_edge = True,
use_global_to_node = True,
use_global_input = True,
global_input_size = global_attr_size,
global_output_size = 10,
create_global_function = True)
gn = GraphNet(**graph_fcn)
gt_copy = graph_tuple.copy()
## This is how a global is assigned. The "update.." creates some flat vectors useful
# for the segment sums and reshaping of the tensors (when/in they are used in the
# node and edge computations)
gt_copy.assign_global(global_vars)
gt_copy.update_reps_for_globals()
out = gn.graph_tuple_eval(gt_copy )#, global_vars)
def test_computation_graph_to_global(self):
"""
Tests the construction of a simple GraphTuple without a global attribute
and its computation with a full GN (without a global input)
"""
import tensorflow as tf
import numpy as np
from tf_gnns import GraphTuple
from tf_gnns.graphnet_utils import _aggregation_function_factory
from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list
## Testing GN that contains globals:
### Create an encode-core-decode network:
## Create a GraphTuple to compute with:
node_state_size=4;
edge_state_size = 10;
ngraphs = 16;
graphs = [];
for gr in range(ngraphs):
n1 = Node(np.random.randn(1, node_state_size))
n2 = Node(np.random.randn(1, node_state_size))
n3 = Node(np.random.randn(1, node_state_size))
e12 = Edge(np.random.randn(1, edge_state_size) , node_from=n1,node_to=n2)
e13 = Edge(np.random.randn(1, edge_state_size) , node_from=n1,node_to=n2)
e23 = Edge(np.random.randn(1, edge_state_size) , node_from=n1,node_to=n2)
graphs.append(Graph([n1,n2,n3],[e12,e13,e23]))
gt = make_graph_tuple_from_graph_list(graphs )
units = 45
gi_node_input_size = node_state_size
gi_edge_input_size = edge_state_size
gn_core_size = 15
## Creation of a graph-to-global network (without global in the input side:)
gn_input = make_mlp_graphnet_functions(45,
gi_node_input_size,
gn_core_size,
edge_input_size=gi_edge_input_size,
edge_output_size=gn_core_size,
create_global_function = True,
global_input_size=None,
use_global_input= False,
global_output_size = gn_core_size,
graph_indep = False)
gn_constr_input_named_params = ['edge_function',
'global_function',
'node_function',
'edge_aggregation_function',
'node_to_global_aggregation_function',
'graph_independent','use_global_input']
correct_keys = np.all([k in gn_constr_input_named_params for k in gn_input.keys()])
self.assertTrue(correct_keys)
gn_gi = GraphNet(**gn_input)
gt_out = gn_gi.graph_tuple_eval(gt.copy())
self.assertTrue(gt_out.global_attr.shape == (ngraphs,gn_core_size))
class TestTraced_eval(unittest.TestCase):
def test_correct_results_traced(self):
import tensorflow as tf
import numpy as np
from tf_gnns import GraphTuple
from tf_gnns.graphnet_utils import _aggregation_function_factory, make_full_graphnet_functions
from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list
import code
## Create a GraphTuple to compute with:
node_state_size=4;
edge_state_size = 10;
ngraphs = 16;
graphs = [];
for gr in range(ngraphs):
n1 = Node(np.random.randn(1, node_state_size))
n2 = Node(np.random.randn(1, node_state_size))
n3 = Node(np.random.randn(1, node_state_size))
e12 = Edge(np.random.randn(1, edge_state_size) , node_from=n1,node_to=n2)
e13 = Edge(np.random.randn(1, edge_state_size) , node_from=n1,node_to=n3)
e23 = Edge(np.random.randn(1, edge_state_size) , node_from=n2,node_to=n3)
graphs.append(Graph([n1,n2,n3],[e12,e13,e23]))
gt = make_graph_tuple_from_graph_list(graphs)
units = 45
gi_node_input_size = node_state_size
gi_edge_input_size = edge_state_size
gn_core_size = 15
## Creation of a graph-to-global network (without global in the input side:)
gn_input_args = make_mlp_graphnet_functions(45,
gi_node_input_size,
gn_core_size,
edge_input_size=gi_edge_input_size,
edge_output_size=gn_core_size,
create_global_function = True,
global_input_size=None,
use_global_input= False,
global_output_size = gn_core_size,
graph_indep = False)
gn_core_args = make_full_graphnet_functions(units, gn_core_size)
gn_constr_input_named_params = ['edge_function',
'global_function',
'node_function',
'edge_aggregation_function',
'node_to_global_aggregation_function',
'graph_independent','use_global_input']
correct_keys = np.all([k in gn_constr_input_named_params for k in gn_input_args.keys()])
self.assertTrue(correct_keys)
gn_gi = GraphNet(**gn_input_args)
gn_core = GraphNet(**gn_core_args)
gt_out_1 = gn_gi.graph_tuple_eval(gt.copy())
gt_out = gn_core.graph_tuple_eval(gt_out_1)
tensor_dict_out = gn_gi.eval_tensor_dict(gt.copy().to_tensor_dict())
tensor_dict_out = gn_core.eval_tensor_dict(tensor_dict_out)
edges_err = np.linalg.norm(gt_out.edges - tensor_dict_out['edges'])
nodes_err = np.linalg.norm(gt_out.nodes - tensor_dict_out['nodes'])
glob_err = np.linalg.norm(gt_out.global_attr - tensor_dict_out['global_attr'])
self.assertTrue(edges_err < 1e-10)
self.assertTrue(nodes_err < 1e-10)
self.assertTrue(glob_err < 1e-10)
if __name__ == "__main__":
from tf_gnns import Node, Edge, Graph
import tensorflow as tf
import numpy as np
unittest.main(verbosity = 2)
| [
"tf_gnns.Node",
"tf_gnns.make_keras_simple_agg",
"numpy.abs",
"tensorflow.keras.layers.Dense",
"tensorflow.identity",
"numpy.ones",
"tf_gnns.make_graph_tuple_from_graph_list",
"tf_gnns.graphnet_utils.make_full_graphnet_functions",
"numpy.linalg.norm",
"unittest.main",
"tf_gnns.GraphNet",
"nump... | [((21438, 21464), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (21451, 21464), False, 'import unittest\n'), ((434, 456), 'tf_gnns.Graph', 'Graph', (['[n1, n2]', '[e12]'], {}), '([n1, n2], [e12])\n', (439, 456), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((516, 539), 'numpy.random.randn', 'np.random.randn', (['(10)', '(10)'], {}), '(10, 10)\n', (531, 539), True, 'import numpy as np\n'), ((552, 575), 'numpy.random.randn', 'np.random.randn', (['(10)', '(10)'], {}), '(10, 10)\n', (567, 575), True, 'import numpy as np\n'), ((588, 596), 'tf_gnns.Node', 'Node', (['r1'], {}), '(r1)\n', (592, 596), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((610, 618), 'tf_gnns.Node', 'Node', (['r2'], {}), '(r2)\n', (614, 618), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((2089, 2111), 'tf_gnns.Graph', 'Graph', (['[n1, n2]', '[e12]'], {}), '([n1, n2], [e12])\n', (2094, 2111), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((2123, 2168), 'tf_gnns.Graph', 'Graph', (['[n1, n2, n3, n4]', '[e12, e21, e23, e34]'], {}), '([n1, n2, n3, n4], [e12, e21, e23, e34])\n', (2128, 2168), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((2175, 2197), 'tf_gnns.Graph', 'Graph', (['[n3, n4]', '[e34]'], {}), '([n3, n4], [e34])\n', (2180, 2197), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((2389, 2438), 'tf_gnns.make_graph_tuple_from_graph_list', 'make_graph_tuple_from_graph_list', (['old_graphs_list'], {}), '(old_graphs_list)\n', (2421, 2438), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((2978, 3025), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(node_input_size,)'}), '(shape=(node_input_size,))\n', (2999, 3025), True, 'import tensorflow as tf\n'), ((3049, 3096), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(edge_input_size,)'}), '(shape=(edge_input_size,))\n', (3070, 3096), True, 'import tensorflow as tf\n'), ((3380, 3427), 'tf_gnns.make_keras_simple_agg', 'make_keras_simple_agg', (['edge_output_size', '"""mean"""'], {}), "(edge_output_size, 'mean')\n", (3401, 3427), False, 'from tf_gnns import GraphNet, make_keras_simple_agg\n'), ((3446, 3569), 'tf_gnns.GraphNet', 'GraphNet', ([], {'node_function': 'node_function', 'edge_function': 'edge_function', 'edge_aggregation_function': 'edge_aggregation_function'}), '(node_function=node_function, edge_function=edge_function,\n edge_aggregation_function=edge_aggregation_function)\n', (3454, 3569), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((3828, 3850), 'tf_gnns.Graph', 'Graph', (['[n1, n2]', '[e12]'], {}), '([n1, n2], [e12])\n', (3833, 3850), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((4909, 4985), 'tf_gnns.graphnet_utils._aggregation_function_factory', '_aggregation_function_factory', (['(None, edge_output_size)'], {'agg_type': '"""mean_max"""'}), "((None, edge_output_size), agg_type='mean_max')\n", (4938, 4985), False, 'from tf_gnns.graphnet_utils import _aggregation_function_factory, make_full_graphnet_functions\n'), ((5010, 5076), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(node_input_size,)', 'name': '"""node_state"""'}), "(shape=(node_input_size,), name='node_state')\n", (5031, 5076), True, 'import tensorflow as tf\n'), ((5112, 5180), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(message_shape,)', 'name': '"""edge_state_agg"""'}), "(shape=(message_shape,), name='edge_state_agg')\n", (5133, 5180), True, 'import tensorflow as tf\n'), ((5206, 5272), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(edge_input_size,)', 'name': '"""edge_state"""'}), "(shape=(edge_input_size,), name='edge_state')\n", (5227, 5272), True, 'import tensorflow as tf\n'), ((5564, 5687), 'tf_gnns.GraphNet', 'GraphNet', ([], {'node_function': 'node_function', 'edge_function': 'edge_function', 'edge_aggregation_function': 'edge_aggregation_function'}), '(node_function=node_function, edge_function=edge_function,\n edge_aggregation_function=edge_aggregation_function)\n', (5572, 5687), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((5736, 5767), 'tf_gnns.Graph', 'Graph', (['[n1, n2, n3]', '[e21, e31]'], {}), '([n1, n2, n3], [e21, e31])\n', (5741, 5767), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((6590, 6628), 'tensorflow.keras.backend.set_floatx', 'tf.keras.backend.set_floatx', (['"""float64"""'], {}), "('float64')\n", (6617, 6628), True, 'import tensorflow as tf\n'), ((7330, 7366), 'tf_gnns.Graph', 'Graph', (['[n1, n2, n3]', '[e12, e21, e23]'], {}), '([n1, n2, n3], [e12, e21, e23])\n', (7335, 7366), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((7479, 7602), 'tf_gnns.make_mlp_graphnet_functions', 'make_mlp_graphnet_functions', (['(150)'], {'node_input_size': 'node_input_size', 'node_output_size': 'node_output_size', 'graph_indep': '(False)'}), '(150, node_input_size=node_input_size,\n node_output_size=node_output_size, graph_indep=False)\n', (7506, 7602), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((7752, 7773), 'tf_gnns.GraphNet', 'GraphNet', ([], {}), '(**graph_fcn)\n', (7760, 7773), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((8415, 8570), 'tf_gnns.make_mlp_graphnet_functions', 'make_mlp_graphnet_functions', (['(150)'], {'node_input_size': 'node_input_size', 'node_output_size': 'node_input_size', 'graph_indep': 'gi', 'use_edge_state_agg_input': '(False)'}), '(150, node_input_size=node_input_size,\n node_output_size=node_input_size, graph_indep=gi,\n use_edge_state_agg_input=False)\n', (8442, 8570), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((8685, 8706), 'tf_gnns.GraphNet', 'GraphNet', ([], {}), '(**graph_fcn)\n', (8693, 8706), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((9538, 9634), 'tf_gnns.make_mlp_graphnet_functions', 'make_mlp_graphnet_functions', (['(150)'], {'node_input_size': '(10)', 'node_output_size': '(10)', 'graph_indep': '(False)'}), '(150, node_input_size=10, node_output_size=10,\n graph_indep=False)\n', (9565, 9634), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((9648, 9669), 'tf_gnns.GraphNet', 'GraphNet', ([], {}), '(**graph_fcn)\n', (9656, 9669), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((9722, 9761), 'tf_gnns.GraphNet.make_from_path', 'GraphNet.make_from_path', (['"""/tmp/test_gn"""'], {}), "('/tmp/test_gn')\n", (9745, 9761), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((11635, 11684), 'tf_gnns.make_graph_tuple_from_graph_list', 'make_graph_tuple_from_graph_list', (['old_graphs_list'], {}), '(old_graphs_list)\n', (11667, 11684), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((11939, 12064), 'tf_gnns.make_mlp_graphnet_functions', 'make_mlp_graphnet_functions', (['(150)'], {'node_input_size': '(10)', 'node_output_size': '(10)', 'graph_indep': '(False)', 'aggregation_function': '"""mean"""'}), "(150, node_input_size=10, node_output_size=10,\n graph_indep=False, aggregation_function='mean')\n", (11966, 12064), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((12080, 12101), 'tf_gnns.GraphNet', 'GraphNet', ([], {}), '(**graph_fcn)\n', (12088, 12101), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((14241, 14290), 'tf_gnns.make_graph_tuple_from_graph_list', 'make_graph_tuple_from_graph_list', (['old_graphs_list'], {}), '(old_graphs_list)\n', (14273, 14290), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((14426, 14691), 'tf_gnns.make_mlp_graphnet_functions', 'make_mlp_graphnet_functions', (['(150)'], {'node_input_size': '(10)', 'node_output_size': '(10)', 'graph_indep': '(False)', 'use_global_to_edge': '(True)', 'use_global_to_node': '(True)', 'use_global_input': '(True)', 'global_input_size': 'global_attr_size', 'global_output_size': '(10)', 'create_global_function': '(True)'}), '(150, node_input_size=10, node_output_size=10,\n graph_indep=False, use_global_to_edge=True, use_global_to_node=True,\n use_global_input=True, global_input_size=global_attr_size,\n global_output_size=10, create_global_function=True)\n', (14453, 14691), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((14856, 14877), 'tf_gnns.GraphNet', 'GraphNet', ([], {}), '(**graph_fcn)\n', (14864, 14877), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((16601, 16641), 'tf_gnns.make_graph_tuple_from_graph_list', 'make_graph_tuple_from_graph_list', (['graphs'], {}), '(graphs)\n', (16633, 16641), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((16894, 17168), 'tf_gnns.make_mlp_graphnet_functions', 'make_mlp_graphnet_functions', (['(45)', 'gi_node_input_size', 'gn_core_size'], {'edge_input_size': 'gi_edge_input_size', 'edge_output_size': 'gn_core_size', 'create_global_function': '(True)', 'global_input_size': 'None', 'use_global_input': '(False)', 'global_output_size': 'gn_core_size', 'graph_indep': '(False)'}), '(45, gi_node_input_size, gn_core_size,\n edge_input_size=gi_edge_input_size, edge_output_size=gn_core_size,\n create_global_function=True, global_input_size=None, use_global_input=\n False, global_output_size=gn_core_size, graph_indep=False)\n', (16921, 17168), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((17928, 17948), 'tf_gnns.GraphNet', 'GraphNet', ([], {}), '(**gn_input)\n', (17936, 17948), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((19209, 19249), 'tf_gnns.make_graph_tuple_from_graph_list', 'make_graph_tuple_from_graph_list', (['graphs'], {}), '(graphs)\n', (19241, 19249), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((19503, 19777), 'tf_gnns.make_mlp_graphnet_functions', 'make_mlp_graphnet_functions', (['(45)', 'gi_node_input_size', 'gn_core_size'], {'edge_input_size': 'gi_edge_input_size', 'edge_output_size': 'gn_core_size', 'create_global_function': '(True)', 'global_input_size': 'None', 'use_global_input': '(False)', 'global_output_size': 'gn_core_size', 'graph_indep': '(False)'}), '(45, gi_node_input_size, gn_core_size,\n edge_input_size=gi_edge_input_size, edge_output_size=gn_core_size,\n create_global_function=True, global_input_size=None, use_global_input=\n False, global_output_size=gn_core_size, graph_indep=False)\n', (19530, 19777), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((20120, 20169), 'tf_gnns.graphnet_utils.make_full_graphnet_functions', 'make_full_graphnet_functions', (['units', 'gn_core_size'], {}), '(units, gn_core_size)\n', (20148, 20169), False, 'from tf_gnns.graphnet_utils import _aggregation_function_factory, make_full_graphnet_functions\n'), ((20617, 20642), 'tf_gnns.GraphNet', 'GraphNet', ([], {}), '(**gn_input_args)\n', (20625, 20642), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((20661, 20685), 'tf_gnns.GraphNet', 'GraphNet', ([], {}), '(**gn_core_args)\n', (20669, 20685), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((20958, 21013), 'numpy.linalg.norm', 'np.linalg.norm', (["(gt_out.edges - tensor_dict_out['edges'])"], {}), "(gt_out.edges - tensor_dict_out['edges'])\n", (20972, 21013), True, 'import numpy as np\n'), ((21035, 21090), 'numpy.linalg.norm', 'np.linalg.norm', (["(gt_out.nodes - tensor_dict_out['nodes'])"], {}), "(gt_out.nodes - tensor_dict_out['nodes'])\n", (21049, 21090), True, 'import numpy as np\n'), ((21112, 21179), 'numpy.linalg.norm', 'np.linalg.norm', (["(gt_out.global_attr - tensor_dict_out['global_attr'])"], {}), "(gt_out.global_attr - tensor_dict_out['global_attr'])\n", (21126, 21179), True, 'import numpy as np\n'), ((308, 331), 'numpy.random.randn', 'np.random.randn', (['(10)', '(10)'], {}), '(10, 10)\n', (323, 331), True, 'import numpy as np\n'), ((350, 373), 'numpy.random.randn', 'np.random.randn', (['(10)', '(10)'], {}), '(10, 10)\n', (365, 373), True, 'import numpy as np\n'), ((393, 415), 'numpy.random.randn', 'np.random.randn', (['(5)', '(10)'], {}), '(5, 10)\n', (408, 415), True, 'import numpy as np\n'), ((666, 745), 'numpy.linalg.norm', 'np.linalg.norm', (['(n2.node_attr_tensor + n1.node_attr_tensor - n3.node_attr_tensor)'], {}), '(n2.node_attr_tensor + n1.node_attr_tensor - n3.node_attr_tensor)\n', (680, 745), True, 'import numpy as np\n'), ((914, 937), 'numpy.random.randn', 'np.random.randn', (['(10)', '(10)'], {}), '(10, 10)\n', (929, 937), True, 'import numpy as np\n'), ((1304, 1348), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'node_input_size'], {}), '(batch_size, node_input_size)\n', (1319, 1348), True, 'import numpy as np\n'), ((1367, 1411), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'node_input_size'], {}), '(batch_size, node_input_size)\n', (1382, 1411), True, 'import numpy as np\n'), ((1431, 1475), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'node_input_size'], {}), '(batch_size, node_input_size)\n', (1446, 1475), True, 'import numpy as np\n'), ((1495, 1539), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'node_input_size'], {}), '(batch_size, node_input_size)\n', (1510, 1539), True, 'import numpy as np\n'), ((1559, 1603), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'node_input_size'], {}), '(batch_size, node_input_size)\n', (1574, 1603), True, 'import numpy as np\n'), ((1625, 1669), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'edge_input_size'], {}), '(batch_size, edge_input_size)\n', (1640, 1669), True, 'import numpy as np\n'), ((1718, 1762), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'edge_input_size'], {}), '(batch_size, edge_input_size)\n', (1733, 1762), True, 'import numpy as np\n'), ((1811, 1855), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'edge_input_size'], {}), '(batch_size, edge_input_size)\n', (1826, 1855), True, 'import numpy as np\n'), ((1904, 1948), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'edge_input_size'], {}), '(batch_size, edge_input_size)\n', (1919, 1948), True, 'import numpy as np\n'), ((1999, 2043), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'edge_input_size'], {}), '(batch_size, edge_input_size)\n', (2014, 2043), True, 'import numpy as np\n'), ((3614, 3658), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'node_input_size'], {}), '(batch_size, node_input_size)\n', (3629, 3658), True, 'import numpy as np\n'), ((3677, 3721), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'node_input_size'], {}), '(batch_size, node_input_size)\n', (3692, 3721), True, 'import numpy as np\n'), ((3742, 3786), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'edge_input_size'], {}), '(batch_size, edge_input_size)\n', (3757, 3786), True, 'import numpy as np\n'), ((4452, 4460), 'tf_gnns.Node', 'Node', (['v_'], {}), '(v_)\n', (4456, 4460), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((5984, 6006), 'numpy.all', 'np.all', (['(m == m_correct)'], {}), '(m == m_correct)\n', (5990, 6006), True, 'import numpy as np\n'), ((6719, 6763), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'node_input_size'], {}), '(batch_size, node_input_size)\n', (6734, 6763), True, 'import numpy as np\n'), ((6782, 6826), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'node_input_size'], {}), '(batch_size, node_input_size)\n', (6797, 6826), True, 'import numpy as np\n'), ((6846, 6890), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'node_input_size'], {}), '(batch_size, node_input_size)\n', (6861, 6890), True, 'import numpy as np\n'), ((6917, 6943), 'numpy.abs', 'np.abs', (['n.node_attr_tensor'], {}), '(n.node_attr_tensor)\n', (6923, 6943), True, 'import numpy as np\n'), ((6985, 7029), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'edge_input_size'], {}), '(batch_size, edge_input_size)\n', (7000, 7029), True, 'import numpy as np\n'), ((7078, 7122), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'edge_input_size'], {}), '(batch_size, edge_input_size)\n', (7093, 7122), True, 'import numpy as np\n'), ((7171, 7215), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'edge_input_size'], {}), '(batch_size, edge_input_size)\n', (7186, 7215), True, 'import numpy as np\n'), ((7270, 7291), 'numpy.abs', 'np.abs', (['e.edge_tensor'], {}), '(e.edge_tensor)\n', (7276, 7291), True, 'import numpy as np\n'), ((8034, 8055), 'numpy.min', 'np.min', (['node_abs_vals'], {}), '(node_abs_vals)\n', (8040, 8055), True, 'import numpy as np\n'), ((8181, 8202), 'numpy.min', 'np.min', (['edge_abs_vals'], {}), '(edge_abs_vals)\n', (8187, 8202), True, 'import numpy as np\n'), ((8962, 8983), 'numpy.min', 'np.min', (['node_abs_vals'], {}), '(node_abs_vals)\n', (8968, 8983), True, 'import numpy as np\n'), ((9105, 9126), 'numpy.min', 'np.min', (['edge_abs_vals'], {}), '(edge_abs_vals)\n', (9111, 9126), True, 'import numpy as np\n'), ((10490, 10534), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'node_input_size'], {}), '(batch_size, node_input_size)\n', (10505, 10534), True, 'import numpy as np\n'), ((10553, 10597), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'node_input_size'], {}), '(batch_size, node_input_size)\n', (10568, 10597), True, 'import numpy as np\n'), ((10617, 10661), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'node_input_size'], {}), '(batch_size, node_input_size)\n', (10632, 10661), True, 'import numpy as np\n'), ((10681, 10725), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'node_input_size'], {}), '(batch_size, node_input_size)\n', (10696, 10725), True, 'import numpy as np\n'), ((10745, 10789), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'node_input_size'], {}), '(batch_size, node_input_size)\n', (10760, 10789), True, 'import numpy as np\n'), ((10811, 10855), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'edge_input_size'], {}), '(batch_size, edge_input_size)\n', (10826, 10855), True, 'import numpy as np\n'), ((10904, 10948), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'edge_input_size'], {}), '(batch_size, edge_input_size)\n', (10919, 10948), True, 'import numpy as np\n'), ((10997, 11041), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'edge_input_size'], {}), '(batch_size, edge_input_size)\n', (11012, 11041), True, 'import numpy as np\n'), ((11090, 11134), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'edge_input_size'], {}), '(batch_size, edge_input_size)\n', (11105, 11134), True, 'import numpy as np\n'), ((11185, 11229), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'edge_input_size'], {}), '(batch_size, edge_input_size)\n', (11200, 11229), True, 'import numpy as np\n'), ((12476, 12520), 'tensorflow.stack', 'tf.stack', (['[x_.edge_tensor for x_ in x.edges]'], {}), '([x_.edge_tensor for x_ in x.edges])\n', (12484, 12520), True, 'import tensorflow as tf\n'), ((13096, 13140), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'node_input_size'], {}), '(batch_size, node_input_size)\n', (13111, 13140), True, 'import numpy as np\n'), ((13159, 13203), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'node_input_size'], {}), '(batch_size, node_input_size)\n', (13174, 13203), True, 'import numpy as np\n'), ((13223, 13267), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'node_input_size'], {}), '(batch_size, node_input_size)\n', (13238, 13267), True, 'import numpy as np\n'), ((13287, 13331), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'node_input_size'], {}), '(batch_size, node_input_size)\n', (13302, 13331), True, 'import numpy as np\n'), ((13351, 13395), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'node_input_size'], {}), '(batch_size, node_input_size)\n', (13366, 13395), True, 'import numpy as np\n'), ((13417, 13461), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'edge_input_size'], {}), '(batch_size, edge_input_size)\n', (13432, 13461), True, 'import numpy as np\n'), ((13510, 13554), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'edge_input_size'], {}), '(batch_size, edge_input_size)\n', (13525, 13554), True, 'import numpy as np\n'), ((13603, 13647), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'edge_input_size'], {}), '(batch_size, edge_input_size)\n', (13618, 13647), True, 'import numpy as np\n'), ((13696, 13740), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'edge_input_size'], {}), '(batch_size, edge_input_size)\n', (13711, 13740), True, 'import numpy as np\n'), ((13791, 13835), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'edge_input_size'], {}), '(batch_size, edge_input_size)\n', (13806, 13835), True, 'import numpy as np\n'), ((14325, 14380), 'numpy.random.randn', 'np.random.randn', (['graph_tuple.n_graphs', 'global_attr_size'], {}), '(graph_tuple.n_graphs, global_attr_size)\n', (14340, 14380), True, 'import numpy as np\n'), ((1019, 1061), 'numpy.linalg.norm', 'np.linalg.norm', (['(n1 - n2).node_attr_tensor'], {}), '((n1 - n2).node_attr_tensor)\n', (1033, 1061), True, 'import numpy as np\n'), ((4369, 4407), 'numpy.ones', 'np.ones', (['[batch_size, node_input_size]'], {}), '([batch_size, node_input_size])\n', (4376, 4407), True, 'import numpy as np\n'), ((4502, 4540), 'numpy.ones', 'np.ones', (['[batch_size, node_input_size]'], {}), '([batch_size, node_input_size])\n', (4509, 4540), True, 'import numpy as np\n'), ((4594, 4632), 'numpy.ones', 'np.ones', (['[batch_size, node_input_size]'], {}), '([batch_size, node_input_size])\n', (4601, 4632), True, 'import numpy as np\n'), ((5327, 5350), 'tensorflow.identity', 'tf.identity', (['node_input'], {}), '(node_input)\n', (5338, 5350), True, 'import tensorflow as tf\n'), ((5486, 5509), 'tensorflow.identity', 'tf.identity', (['edge_input'], {}), '(edge_input)\n', (5497, 5509), True, 'import tensorflow as tf\n'), ((11275, 11297), 'tf_gnns.Graph', 'Graph', (['[n1, n2]', '[e12]'], {}), '([n1, n2], [e12])\n', (11280, 11297), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((11316, 11361), 'tf_gnns.Graph', 'Graph', (['[n1, n2, n3, n4]', '[e12, e21, e23, e34]'], {}), '([n1, n2, n3, n4], [e12, e21, e23, e34])\n', (11321, 11361), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((11375, 11397), 'tf_gnns.Graph', 'Graph', (['[n3, n4]', '[e34]'], {}), '([n3, n4], [e34])\n', (11380, 11397), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((13881, 13903), 'tf_gnns.Graph', 'Graph', (['[n1, n2]', '[e12]'], {}), '([n1, n2], [e12])\n', (13886, 13903), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((13922, 13967), 'tf_gnns.Graph', 'Graph', (['[n1, n2, n3, n4]', '[e12, e21, e23, e34]'], {}), '([n1, n2, n3, n4], [e12, e21, e23, e34])\n', (13927, 13967), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((13981, 14003), 'tf_gnns.Graph', 'Graph', (['[n3, n4]', '[e34]'], {}), '([n3, n4], [e34])\n', (13986, 14003), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((16115, 16150), 'numpy.random.randn', 'np.random.randn', (['(1)', 'node_state_size'], {}), '(1, node_state_size)\n', (16130, 16150), True, 'import numpy as np\n'), ((16174, 16209), 'numpy.random.randn', 'np.random.randn', (['(1)', 'node_state_size'], {}), '(1, node_state_size)\n', (16189, 16209), True, 'import numpy as np\n'), ((16233, 16268), 'numpy.random.randn', 'np.random.randn', (['(1)', 'node_state_size'], {}), '(1, node_state_size)\n', (16248, 16268), True, 'import numpy as np\n'), ((16293, 16328), 'numpy.random.randn', 'np.random.randn', (['(1)', 'edge_state_size'], {}), '(1, edge_state_size)\n', (16308, 16328), True, 'import numpy as np\n'), ((16379, 16414), 'numpy.random.randn', 'np.random.randn', (['(1)', 'edge_state_size'], {}), '(1, edge_state_size)\n', (16394, 16414), True, 'import numpy as np\n'), ((16465, 16500), 'numpy.random.randn', 'np.random.randn', (['(1)', 'edge_state_size'], {}), '(1, edge_state_size)\n', (16480, 16500), True, 'import numpy as np\n'), ((16554, 16590), 'tf_gnns.Graph', 'Graph', (['[n1, n2, n3]', '[e12, e13, e23]'], {}), '([n1, n2, n3], [e12, e13, e23])\n', (16559, 16590), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((18723, 18758), 'numpy.random.randn', 'np.random.randn', (['(1)', 'node_state_size'], {}), '(1, node_state_size)\n', (18738, 18758), True, 'import numpy as np\n'), ((18782, 18817), 'numpy.random.randn', 'np.random.randn', (['(1)', 'node_state_size'], {}), '(1, node_state_size)\n', (18797, 18817), True, 'import numpy as np\n'), ((18841, 18876), 'numpy.random.randn', 'np.random.randn', (['(1)', 'node_state_size'], {}), '(1, node_state_size)\n', (18856, 18876), True, 'import numpy as np\n'), ((18901, 18936), 'numpy.random.randn', 'np.random.randn', (['(1)', 'edge_state_size'], {}), '(1, edge_state_size)\n', (18916, 18936), True, 'import numpy as np\n'), ((18987, 19022), 'numpy.random.randn', 'np.random.randn', (['(1)', 'edge_state_size'], {}), '(1, edge_state_size)\n', (19002, 19022), True, 'import numpy as np\n'), ((19073, 19108), 'numpy.random.randn', 'np.random.randn', (['(1)', 'edge_state_size'], {}), '(1, edge_state_size)\n', (19088, 19108), True, 'import numpy as np\n'), ((19162, 19198), 'tf_gnns.Graph', 'Graph', (['[n1, n2, n3]', '[e12, e13, e23]'], {}), '([n1, n2, n3], [e12, e13, e23])\n', (19167, 19198), False, 'from tf_gnns import make_mlp_graphnet_functions, GraphNet, Node, Edge, Graph, GraphTuple, make_graph_tuple_from_graph_list\n'), ((3149, 3188), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['node_output_size'], {}), '(node_output_size)\n', (3170, 3188), True, 'import tensorflow as tf\n'), ((3271, 3310), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['edge_output_size'], {}), '(edge_output_size)\n', (3292, 3310), True, 'import tensorflow as tf\n'), ((5873, 5912), 'numpy.ones', 'np.ones', (['[batch_size, edge_output_size]'], {}), '([batch_size, edge_output_size])\n', (5880, 5912), True, 'import numpy as np\n'), ((5915, 5954), 'numpy.ones', 'np.ones', (['[batch_size, edge_output_size]'], {}), '([batch_size, edge_output_size])\n', (5922, 5954), True, 'import numpy as np\n'), ((7928, 7987), 'numpy.linalg.norm', 'np.linalg.norm', (['(n1_.node_attr_tensor - n2_.node_attr_tensor)'], {}), '(n1_.node_attr_tensor - n2_.node_attr_tensor)\n', (7942, 7987), True, 'import numpy as np\n'), ((8086, 8135), 'numpy.linalg.norm', 'np.linalg.norm', (['(e1_.edge_tensor - e2_.edge_tensor)'], {}), '(e1_.edge_tensor - e2_.edge_tensor)\n', (8100, 8135), True, 'import numpy as np\n'), ((8860, 8917), 'numpy.linalg.norm', 'np.linalg.norm', (['(n1.node_attr_tensor - n2.node_attr_tensor)'], {}), '(n1.node_attr_tensor - n2.node_attr_tensor)\n', (8874, 8917), True, 'import numpy as np\n'), ((9014, 9061), 'numpy.linalg.norm', 'np.linalg.norm', (['(e1.edge_tensor - e2.edge_tensor)'], {}), '(e1.edge_tensor - e2.edge_tensor)\n', (9028, 9061), True, 'import numpy as np\n'), ((9801, 9816), 'numpy.abs', 'np.abs', (['(w1 - w2)'], {}), '(w1 - w2)\n', (9807, 9816), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=10)
import classeval as cle
def test_summary():
X, y = cle.load_example('breast')
X_train, X_test, y_train, y_true = train_test_split(X, y, test_size=0.2, random_state=42)
# Prediction
model = clf.fit(X_train, y_train)
y_proba = model.predict_proba(X_test)
y_pred = model.predict(X_test)
# CHECK summary two class
results = cle.eval(y_true, y_proba[:,1], pos_label='malignant')
assert np.all(results['confmat']['confmat']==[[69,2],[3,40]])
assert results['f1'].astype(str)[0:4]=='0.94'
assert results['auc'].astype(str)[0:4]=='0.99'
assert results['kappa'].astype(str)[0:4]=='0.90'
assert results['MCC'].astype(str)[0:5]=='0.906'
assert results['average_precision'].astype(str)[0:4]=='0.99'
assert results['CAP'].astype(str)=='43'
# CHECK using bool as input
results = cle.eval(y_true=='malignant', y_proba[:,1])
assert np.all(results['confmat']['confmat']==[[69,2],[3,40]])
assert results['f1'].astype(str)[0:4]=='0.94'
assert results['auc'].astype(str)[0:4]=='0.99'
assert results['kappa'].astype(str)[0:4]=='0.90'
assert results['MCC'].astype(str)[0:5]=='0.906'
assert results['average_precision'].astype(str)[0:4]=='0.99'
assert results['CAP'].astype(str)=='43'
# CHECK dict output
assert np.all(np.isin([*results.keys()], ['class_names', 'pos_label', 'neg_label', 'y_true', 'y_pred', 'y_proba', 'auc', 'f1', 'kappa', 'report', 'thresholds', 'fpr', 'tpr', 'average_precision', 'precision', 'recall', 'MCC', 'CAP', 'TPFP', 'confmat', 'threshold']))
# CHECK plots
ax = cle.plot(results)
# TEST 2: Check model output is unchanged
X, y = cle.load_example('iris')
X_train, X_test, y_train, y_true = train_test_split(X, y, test_size=0.2, random_state=1)
model = clf.fit(X_train, y_train)
y_pred = model.predict(X_test)
y_proba = model.predict_proba(X_test)
y_score = model.decision_function(X_test)
# CHECK confmat
out = cle.confmatrix.eval(y_true, y_pred, normalize=True)
assert np.all(out['confmat'].astype(str)==[['1.0', '0.0', '0.0'], ['0.0', '0.9230769230769231', '0.07692307692307693'], ['0.0', '0.0', '1.0']])
out = cle.confmatrix.eval(y_true, y_pred, normalize=False)
assert np.all(out['confmat']==[[11, 0, 0], [0, 12, 1], [0, 0, 6]])
results = cle.eval(y_true, y_proba, y_score, y_pred)
# CHECK output
assert np.all(np.isin([*results.keys()], ['y_true', 'y_pred', 'y_proba', 'threshold', 'class_names', 'ROCAUC', 'stackbar', 'confmat']))
# assert results['ROCAUC']['auc'].astype(str)[0:4]=='0.98'
# CHECK plot
ax = cle.plot(results)
| [
"sklearn.ensemble.RandomForestClassifier",
"classeval.eval",
"classeval.confmatrix.eval",
"sklearn.model_selection.train_test_split",
"classeval.plot",
"classeval.load_example",
"numpy.all"
] | [((130, 169), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (152, 169), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((227, 253), 'classeval.load_example', 'cle.load_example', (['"""breast"""'], {}), "('breast')\n", (243, 253), True, 'import classeval as cle\n'), ((293, 347), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(X, y, test_size=0.2, random_state=42)\n', (309, 347), False, 'from sklearn.model_selection import train_test_split\n'), ((526, 580), 'classeval.eval', 'cle.eval', (['y_true', 'y_proba[:, 1]'], {'pos_label': '"""malignant"""'}), "(y_true, y_proba[:, 1], pos_label='malignant')\n", (534, 580), True, 'import classeval as cle\n'), ((591, 650), 'numpy.all', 'np.all', (["(results['confmat']['confmat'] == [[69, 2], [3, 40]])"], {}), "(results['confmat']['confmat'] == [[69, 2], [3, 40]])\n", (597, 650), True, 'import numpy as np\n'), ((1008, 1054), 'classeval.eval', 'cle.eval', (["(y_true == 'malignant')", 'y_proba[:, 1]'], {}), "(y_true == 'malignant', y_proba[:, 1])\n", (1016, 1054), True, 'import classeval as cle\n'), ((1063, 1122), 'numpy.all', 'np.all', (["(results['confmat']['confmat'] == [[69, 2], [3, 40]])"], {}), "(results['confmat']['confmat'] == [[69, 2], [3, 40]])\n", (1069, 1122), True, 'import numpy as np\n'), ((1756, 1773), 'classeval.plot', 'cle.plot', (['results'], {}), '(results)\n', (1764, 1773), True, 'import classeval as cle\n'), ((1832, 1856), 'classeval.load_example', 'cle.load_example', (['"""iris"""'], {}), "('iris')\n", (1848, 1856), True, 'import classeval as cle\n'), ((1896, 1949), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(X, y, test_size=0.2, random_state=1)\n', (1912, 1949), False, 'from sklearn.model_selection import train_test_split\n'), ((2142, 2193), 'classeval.confmatrix.eval', 'cle.confmatrix.eval', (['y_true', 'y_pred'], {'normalize': '(True)'}), '(y_true, y_pred, normalize=True)\n', (2161, 2193), True, 'import classeval as cle\n'), ((2352, 2404), 'classeval.confmatrix.eval', 'cle.confmatrix.eval', (['y_true', 'y_pred'], {'normalize': '(False)'}), '(y_true, y_pred, normalize=False)\n', (2371, 2404), True, 'import classeval as cle\n'), ((2416, 2477), 'numpy.all', 'np.all', (["(out['confmat'] == [[11, 0, 0], [0, 12, 1], [0, 0, 6]])"], {}), "(out['confmat'] == [[11, 0, 0], [0, 12, 1], [0, 0, 6]])\n", (2422, 2477), True, 'import numpy as np\n'), ((2491, 2533), 'classeval.eval', 'cle.eval', (['y_true', 'y_proba', 'y_score', 'y_pred'], {}), '(y_true, y_proba, y_score, y_pred)\n', (2499, 2533), True, 'import classeval as cle\n'), ((2784, 2801), 'classeval.plot', 'cle.plot', (['results'], {}), '(results)\n', (2792, 2801), True, 'import classeval as cle\n')] |
from datetime import datetime as date
import json, random, sys, time
from os import listdir, makedirs, mkdir
from os.path import join as path_join
import imageio
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch.nn.functional import relu as relu_func
from tqdm import tqdm, trange
import wandb
from load import load_data_from_args
from utils.nerf_helpers import *
from utils.parser import config_parser
np.random.seed(0)
DEBUG = False
def batchify(fn, chunk):
"""
Constructs a version of 'fn' that applies to smaller batches.
"""
if chunk is None:
return fn
def ret(inputs):
return torch.cat([fn(inputs[i:i+chunk]) for i in range(0, inputs.shape[0], chunk)], 0)
return ret
def run_network(inputs, viewdirs, fn, embed_fn, embeddirs_fn, netchunk=1024*64):
"""
Prepares inputs and applies network 'fn'.
"""
inputs_flat = torch.reshape(inputs, [-1, inputs.shape[-1]])
embedded = embed_fn(inputs_flat)
if viewdirs is not None:
input_dirs = viewdirs[:,None].expand(inputs.shape)
input_dirs_flat = torch.reshape(input_dirs, [-1, input_dirs.shape[-1]])
embedded_dirs = embeddirs_fn(input_dirs_flat)
embedded = torch.cat([embedded, embedded_dirs], -1)
outputs_flat = batchify(fn, netchunk)(embedded)
outputs = torch.reshape(outputs_flat, list(inputs.shape[:-1]) + [outputs_flat.shape[-1]])
return outputs
def batchify_rays(rays_flat, chunk=1024*32, **kwargs):
"""
Render rays in smaller minibatches to avoid OOM.
"""
all_ret = {}
for i in range(0, rays_flat.shape[0], chunk):
ret = render_rays(rays_flat[i:i+chunk], **kwargs)
for k in ret:
if k not in all_ret:
all_ret[k] = []
all_ret[k].append(ret[k])
all_ret = {k : torch.cat(all_ret[k], 0) for k in all_ret}
return all_ret
def render(H, W, K, chunk=1024*32, rays=None, c2w=None, ndc=True,
near=0., far=1.,
use_viewdirs=False, c2w_staticcam=None,
**kwargs):
"""
Render rays
Args:
H: int. Height of image in pixels.
W: int. Width of image in pixels.
focal: float. Focal length of pinhole camera.
chunk: int. Maximum number of rays to process simultaneously. Used to
control maximum memory usage. Does not affect final results.
rays: array of shape [2, batch_size, 3]. Ray origin and direction for
each example in batch.
c2w: array of shape [3, 4]. Camera-to-world transformation matrix.
ndc: bool. If True, represent ray origin, direction in NDC coordinates.
near: float or array of shape [batch_size]. Nearest distance for a ray.
far: float or array of shape [batch_size]. Farthest distance for a ray.
use_viewdirs: bool. If True, use viewing direction of a point in space in model.
c2w_staticcam: array of shape [3, 4]. If not None, use this transformation matrix for
camera while using other c2w argument for viewing directions.
Returns:
rgb_map: [batch_size, 3]. Predicted RGB values for rays.
disp_map: [batch_size]. Disparity map. Inverse of depth.
acc_map: [batch_size]. Accumulated opacity (alpha) along a ray.
extras: dict with everything returned by render_rays().
"""
if c2w is not None:
# special case to render full image
rays_o, rays_d = get_rays(H, W, K, c2w)
else:
# use provided ray batch
rays_o, rays_d = rays
if use_viewdirs:
# provide ray directions as input
viewdirs = rays_d
if c2w_staticcam is not None:
# special case to visualize effect of viewdirs
rays_o, rays_d = get_rays(H, W, K, c2w_staticcam)
viewdirs = viewdirs / torch.norm(viewdirs, dim=-1, keepdim=True)
viewdirs = torch.reshape(viewdirs, [-1,3]).float()
sh = rays_d.shape # [..., 3]
if ndc:
# for forward facing scenes
rays_o, rays_d = ndc_rays(H, W, K[0][0], 1., rays_o, rays_d)
# Create ray batch
rays_o = torch.reshape(rays_o, [-1,3]).float()
rays_d = torch.reshape(rays_d, [-1,3]).float()
near, far = near * torch.ones_like(rays_d[...,:1]), far * torch.ones_like(rays_d[...,:1])
rays = torch.cat([rays_o, rays_d, near, far], -1)
if use_viewdirs:
rays = torch.cat([rays, viewdirs], -1)
# Render and reshape
all_ret = batchify_rays(rays, chunk, **kwargs)
for k in all_ret:
k_sh = list(sh[:-1]) + list(all_ret[k].shape[1:])
all_ret[k] = torch.reshape(all_ret[k], k_sh)
k_extract = ['rgb_map', 'disp_map', 'acc_map']
ret_list = [all_ret[k] for k in k_extract]
ret_dict = {k : all_ret[k] for k in all_ret if k not in k_extract}
return ret_list + [ret_dict]
def render_path(render_poses, hwf, K, chunk, render_kwargs,
gt_imgs=None,
savedir=None,
render_factor=0,
img_prefix='',
img_suffix='',
save_depths=False
):
H, W, focal = hwf
if render_factor!=0:
# Render downsampled for speed
H = H//render_factor
W = W//render_factor
focal = focal/render_factor
rgbs = []
disps = []
depths = []
t = time.time()
for i, c2w in enumerate(tqdm(render_poses,desc='Rendering poses: ')):
if DEBUG:
print(i, time.time() - t)
t = time.time()
rgb, disp, acc, ret = render(H, W, K, chunk=chunk, c2w=c2w[:3,:4], **render_kwargs)
rgbs.append(rgb.cpu().numpy())
disps.append(disp.cpu().numpy())
depths.append(ret['depth_map'].cpu().numpy())
# print(torch.max(rgb), type(rgb))
# print(torch.max(disp), type(disp))
# print(ret['depth_map'].shape, type(ret['depth_map']))
# print(torch.max(ret['depth_map']))
# assert False, "BREAK"
if DEBUG and i==0:
print(rgb.shape, disp.shape)
if savedir is not None:
rgb8 = to8b(rgbs[-1])
filename = path_join(savedir, img_prefix+'{:03d}.png'.format(i))
imageio.imwrite(filename, rgb8)
wandb.log({img_prefix+'{:03d}.png'.format(i): wandb.Image(filename)})
if save_depths:
filename = path_join(savedir, img_prefix+'{:03d}_depth.png'.format(i))
imageio.imwrite(filename, to8b(depths[-1]))
wandb.log({
img_prefix+'{:03d}_depth.png'.format(i): wandb.Image(filename)
})
rgbs = np.stack(rgbs, 0)
disps = np.stack(disps, 0)
depths = np.stack(depths, 0)
if gt_imgs is not None and render_factor==0:
with torch.no_grad():
if isinstance(gt_imgs,torch.Tensor):
gt_imgs = gt_imgs.cpu()
gts = np.stack(gt_imgs,0)
val_loss = np.mean((rgbs-gts)**2)
val_psnr = -10. * np.log10(val_loss)
output = f'[{img_prefix}] Iter: {img_suffix} Loss: {val_loss:.3f} {img_prefix} PSNR: {val_psnr:.3f}'
print(output)
wandb.log({
f'{img_prefix}/Iter': img_suffix,
f'{img_prefix}/Loss': val_loss,
f'{img_prefix}/PSNR': val_psnr
})
return rgbs, disps, depths
def create_nerf(args):
"""
Instantiate NeRF's MLP model.
"""
embed_fn, input_ch = get_embedder(args.multires, args.i_embed)
input_ch_views = 0
embeddirs_fn = None
if args.use_viewdirs:
embeddirs_fn, input_ch_views = get_embedder(args.multires_views, args.i_embed)
output_ch = 5 if args.N_importance > 0 else 4
skips = [4]
model = NeRF(D=args.netdepth, W=args.netwidth,
input_ch=input_ch, output_ch=output_ch, skips=skips,
input_ch_views=input_ch_views, use_viewdirs=args.use_viewdirs).to(device)
grad_vars = list(model.parameters())
model_fine = None
if args.N_importance > 0:
model_fine = NeRF(D=args.netdepth_fine, W=args.netwidth_fine,
input_ch=input_ch, output_ch=output_ch, skips=skips,
input_ch_views=input_ch_views, use_viewdirs=args.use_viewdirs).to(device)
grad_vars += list(model_fine.parameters())
network_query_fn = lambda inputs, viewdirs, network_fn : run_network(inputs, viewdirs, network_fn,
embed_fn=embed_fn,
embeddirs_fn=embeddirs_fn,
netchunk=args.netchunk)
# Create optimizer
optimizer = torch.optim.Adam(params=grad_vars, lr=args.lrate, betas=(0.9, 0.999))
start = 0
basedir = args.basedir
expname = args.expname
##########################
# Load checkpoints
if args.ft_path is not None and args.ft_path!='None':
ckpts = [args.ft_path]
else:
ckpts = [path_join(basedir, expname, f) for f in sorted(listdir(path_join(basedir, expname))) if 'tar' in f]
if len(ckpts) > 0 and not args.no_reload:
print('Found ckpts')
ckpt_path = ckpts[-1]
print('Reloading from', ckpt_path)
wandb.log({'Reloading from': ckpt_path})
ckpt = torch.load(ckpt_path)
start = ckpt['global_step']
optimizer.load_state_dict(ckpt['optimizer_state_dict'])
# Load model
model.load_state_dict(ckpt['network_fn_state_dict'])
if model_fine is not None:
model_fine.load_state_dict(ckpt['network_fine_state_dict'])
##########################
render_kwargs_train = {
'network_query_fn' : network_query_fn,
'perturb' : args.perturb,
'N_importance' : args.N_importance,
'network_fine' : model_fine,
'N_samples' : args.N_samples,
'network_fn' : model,
'use_viewdirs' : args.use_viewdirs,
'white_bkgd' : args.white_bkgd,
'raw_noise_std' : args.raw_noise_std,
}
# NDC only good for LLFF-style forward facing data
if args.dataset_type != 'llff' or args.no_ndc:
print('Not ndc!')
render_kwargs_train['ndc'] = False
render_kwargs_train['lindisp'] = args.lindisp
render_kwargs_test = {k : render_kwargs_train[k] for k in render_kwargs_train}
render_kwargs_test['perturb'] = False
render_kwargs_test['raw_noise_std'] = 0.
wandb.watch(model,log='all')
# change render_kwargs_train
return render_kwargs_train, render_kwargs_test, start, grad_vars, optimizer
def raw2outputs(raw, z_vals, rays_d, raw_noise_std=0, white_bkgd=False, pytest=False):
"""
Transforms model's predictions to semantically meaningful values.
Args:
raw: [num_rays, num_samples along ray, 4]. Prediction from model.
z_vals: [num_rays, num_samples along ray]. Integration time.
rays_d: [num_rays, 3]. Direction of each ray.
Returns:
rgb_map: [num_rays, 3]. Estimated RGB color of a ray.
disp_map: [num_rays]. Disparity map. Inverse of depth map.
acc_map: [num_rays]. Sum of weights along each ray.
weights: [num_rays, num_samples]. Weights assigned to each sampled color.
depth_map: [num_rays]. Estimated distance to object.
"""
raw2alpha = lambda raw, dists, act_fn=relu_func: 1.-torch.exp(-act_fn(raw)*dists)
dists = z_vals[...,1:] - z_vals[...,:-1]
dists = torch.cat([dists, torch.Tensor([1e10]).expand(dists[...,:1].shape)], -1) # [N_rays, N_samples]
dists = dists * torch.norm(rays_d[...,None,:], dim=-1)
rgb = torch.sigmoid(raw[...,:3]) # [N_rays, N_samples, 3]
noise = 0.
if raw_noise_std > 0.:
noise = torch.randn(raw[...,3].shape) * raw_noise_std
# Overwrite randomly sampled data if pytest
if pytest:
np.random.seed(0)
noise = np.random.rand(*list(raw[...,3].shape)) * raw_noise_std
noise = torch.Tensor(noise)
alpha = raw2alpha(raw[...,3] + noise, dists) # [N_rays, N_samples]
# weights = alpha * tf.math.cumprod(1.-alpha + 1e-10, -1, exclusive=True)
weights = alpha * torch.cumprod(torch.cat([torch.ones((alpha.shape[0], 1)), 1.-alpha + 1e-10], -1), -1)[:, :-1]
rgb_map = torch.sum(weights[...,None] * rgb, -2) # [N_rays, 3]
depth_map = torch.sum(weights * z_vals, -1)
# disp_map = 1./torch.max(1e-10 * torch.ones_like(depth_map), depth_map / torch.sum(weights, -1))
disp_map = depth2dist(depth_map,weights)
acc_map = torch.sum(weights, -1)
if white_bkgd:
rgb_map = rgb_map + (1.-acc_map[...,None])
return rgb_map, disp_map, acc_map, weights, depth_map
def render_rays(ray_batch,
network_fn,
network_query_fn,
N_samples,
ret_raw=False,
lindisp=False,
perturb=0.,
N_importance=0,
network_fine=None,
white_bkgd=False,
raw_noise_std=0.,
verbose=False,
pytest=False):
"""Volumetric rendering.
Args:
ray_batch: array of shape [batch_size, ...]. All information necessary
for sampling along a ray, including: ray origin, ray direction, min
dist, max dist, and unit-magnitude viewing direction.
network_fn: function. Model for predicting RGB and density at each point
in space.
network_query_fn: function used for passing queries to network_fn.
N_samples: int. Number of different times to sample along each ray.
ret_raw: bool. If True, include model's raw, unprocessed predictions.
lindisp: bool. If True, sample linearly in inverse depth rather than in depth.
perturb: float, 0 or 1. If non-zero, each ray is sampled at stratified
random points in time.
N_importance: int. Number of additional times to sample along each ray.
These samples are only passed to network_fine.
network_fine: "fine" network with same spec as network_fn.
white_bkgd: bool. If True, assume a white background.
raw_noise_std: ...
verbose: bool. If True, print more debugging info.
Returns:
rgb_map: [num_rays, 3]. Estimated RGB color of a ray. Comes from fine model.
disp_map: [num_rays]. Disparity map. 1 / depth.
acc_map: [num_rays]. Accumulated opacity along each ray. Comes from fine model.
raw: [num_rays, num_samples, 4]. Raw predictions from model.
rgb0: See rgb_map. Output for coarse model.
disp0: See disp_map. Output for coarse model.
acc0: See acc_map. Output for coarse model.
z_std: [num_rays]. Standard deviation of distances along ray for each
sample.
"""
N_rays = ray_batch.shape[0]
rays_o, rays_d = ray_batch[:,0:3], ray_batch[:,3:6] # [N_rays, 3] each
viewdirs = ray_batch[:,-3:] if ray_batch.shape[-1] > 8 else None
bounds = torch.reshape(ray_batch[...,6:8], [-1,1,2])
near, far = bounds[...,0], bounds[...,1] # [-1,1]
t_vals = torch.linspace(0., 1., steps=N_samples)
if not lindisp:
z_vals = near * (1.-t_vals) + far * (t_vals)
else:
z_vals = 1./(1./near * (1.-t_vals) + 1./far * (t_vals))
z_vals = z_vals.expand([N_rays, N_samples])
if perturb > 0.:
# get intervals between samples
mids = .5 * (z_vals[...,1:] + z_vals[...,:-1])
upper = torch.cat([mids, z_vals[...,-1:]], -1)
lower = torch.cat([z_vals[...,:1], mids], -1)
# stratified samples in those intervals
t_rand = torch.rand(z_vals.shape)
# Pytest, overwrite u with numpy's fixed random numbers
if pytest:
np.random.seed(0)
t_rand = np.random.rand(*list(z_vals.shape))
t_rand = torch.Tensor(t_rand)
z_vals = lower + (upper - lower) * t_rand
pts = rays_o[...,None,:] + rays_d[...,None,:] * z_vals[...,:,None] # [N_rays, N_samples, 3]
# raw = run_network(pts)
raw = network_query_fn(pts, viewdirs, network_fn)
rgb_map, disp_map, acc_map, weights, depth_map = raw2outputs(raw, z_vals, rays_d, raw_noise_std, white_bkgd, pytest=pytest)
if N_importance > 0:
rgb_map_0, disp_map_0, acc_map_0 = rgb_map, disp_map, acc_map
z_vals_mid = .5 * (z_vals[...,1:] + z_vals[...,:-1])
z_samples = sample_pdf(z_vals_mid, weights[...,1:-1], N_importance, det=(perturb==0.), pytest=pytest)
z_samples = z_samples.detach()
z_vals, _ = torch.sort(torch.cat([z_vals, z_samples], -1), -1)
pts = rays_o[...,None,:] + rays_d[...,None,:] * z_vals[...,:,None] # [N_rays, N_samples + N_importance, 3]
run_fn = network_fn if network_fine is None else network_fine
# raw = run_network(pts, fn=run_fn)
raw = network_query_fn(pts, viewdirs, run_fn)
rgb_map, disp_map, acc_map, weights, depth_map = raw2outputs(raw, z_vals, rays_d, raw_noise_std, white_bkgd, pytest=pytest)
ret = {'rgb_map' : rgb_map, 'disp_map' : disp_map, 'acc_map' : acc_map, 'depth_map' : depth_map}
if ret_raw:
ret['raw'] = raw
if N_importance > 0:
ret['rgb0'] = rgb_map_0
ret['disp0'] = disp_map_0
ret['acc0'] = acc_map_0
ret['z_std'] = torch.std(z_samples, dim=-1, unbiased=False) # [N_rays]
for k in ret:
if (torch.isnan(ret[k]).any() or torch.isinf(ret[k]).any()) and DEBUG:
print(f"! [Numerical Error] {k} contains nan or inf.")
return ret
def train(args):
# TODO: add depthmapping
# https://keras.io/examples/vision/nerf/
images, poses, render_poses, hwf, K, i_split, near, far = load_data_from_args(args)
i_train, i_val, i_test = i_split
H, W, _ = hwf
if args.render_poses_filter and np.max(args.render_poses_filter) > len(i_test):
raise ValueError(f"args.render_poses_filter must be <= len(i_test)")
# Create log dir and copy the config file
basedir = args.basedir
expname = args.expname
makedirs(path_join(basedir, expname), exist_ok=True)
file_path = path_join(basedir, expname, 'args.txt')
with open(file_path, 'w') as file:
for arg in sorted(vars(args)):
attr = getattr(args, arg)
file.write('{} = {}\n'.format(arg, attr))
if args.config is not None:
file_path = path_join(basedir, expname, 'config.txt')
with open(file_path, 'w') as file:
file.write(open(args.config, 'r').read())
wandb.init(
project='C291 NeRF',
name=expname,
dir=path_join(basedir, expname),
config=vars(args)
)
# Create nerf model
# grad_vars unused
render_kwargs_train, render_kwargs_test, start, _, nerf_optimizer = create_nerf(args)
global_step = start
bds_dict = {
'near' : near,
'far' : far,
}
render_kwargs_train.update(bds_dict)
render_kwargs_test.update(bds_dict)
# Move testing data to GPU
render_poses = torch.Tensor(render_poses).to(device)
# Short circuit if only rendering out from trained model
if args.render_only:
print('RENDER ONLY')
with torch.no_grad():
if args.render_test:
# render_test switches to test poses
images = images[i_test]
else:
# Default is smoother render_poses path
images = None
testsavedir = path_join(basedir, expname, 'renderonly_{}_{:06d}'.format('test' if args.render_test else 'path', start))
makedirs(testsavedir, exist_ok=True)
print('test poses shape', render_poses[args.render_poses_filter].shape)
rgbs, _, depths = render_path(render_poses[args.render_poses_filter], hwf, K, args.chunk, render_kwargs_test, gt_imgs=images, savedir=testsavedir, render_factor=args.render_factor)
depths = np.repeat(np.expand_dims(depths,axis=3),3,axis=3)
# because rgbs may be slightly over 1
imageio.mimwrite(path_join(testsavedir, 'rbgs_video.mp4'), to8b(rgbs/np.max(rgbs)), fps=30, quality=8)
imageio.mimwrite(path_join(testsavedir, 'depths_video.mp4'), to8b(depths/np.max(depths)), fps=30, quality=8)
# logs
wandb.log(
{
"render_only_rbgs_gif": wandb.Video(rgbs, fps=30, format='gif'),
"render_only_depths_gif": wandb.Video(depths, fps=30, format='gif'),
"render_only_rbgs_mp4": wandb.Video(path_join(testsavedir, 'rbgs_video.mp4'), fps=10, format='mp4'),
"render_only_depths_mp4": wandb.Video(path_join(testsavedir, 'depths_video.mp4'), fps=10, format='mp4'),
}
)
# early break
return
# Prepare raybatch tensor if batching random rays
N_rand = args.N_rand
use_batching = not args.no_batching
if use_batching:
# For random ray batching
print('get rays')
rays = np.stack([get_rays_np(H, W, K, p) for p in poses[:,:3,:4]], 0) # [N, ro+rd, H, W, 3]
print('done, concats')
# added for bottles dataset
if rays.shape[0] > images[:,None].shape[0]:
rays = rays[:images[:,None].shape[0]]
# print(rays.shape, images[:,None].shape)
rays_rgb = np.concatenate([rays, images[:,None]], 1) # [N, ro+rd+rgb, H, W, 3]
rays_rgb = np.transpose(rays_rgb, [0,2,3,1,4]) # [N, H, W, ro+rd+rgb, 3]
rays_rgb = np.stack([rays_rgb[i] for i in i_train], 0) # train images only
rays_rgb = np.reshape(rays_rgb, [-1,3,3]) # [(N-1)*H*W, ro+rd+rgb, 3]
rays_rgb = rays_rgb.astype(np.float32)
print('shuffle rays')
np.random.shuffle(rays_rgb)
print('done')
i_batch = 0
# Move training data to GPU
images = torch.Tensor(images).to(device)
rays_rgb = torch.Tensor(rays_rgb).to(device)
poses = torch.Tensor(poses).to(device)
if args.i_val_eval > 0:
val_imgs = images[i_val[:args.i_val_set]]
val_poses = poses[i_val[:args.i_val_set]]
N_iters = args.n_iters + 1
print('Begin')
print('TRAIN views are', i_train)
print('TEST views are', i_test)
print('VAL views are', i_val)
# Summary writers
# writer = SummaryWriter(path_join(basedir, 'summaries', expname))
start = start + 1
for i in range(start, N_iters):
time0 = time.time()
# Sample random ray batch
if use_batching:
# Random over all images
batch = rays_rgb[i_batch:i_batch+N_rand] # [B, 2+1, 3*?]
batch = torch.transpose(batch, 0, 1)
batch_rays, target_s = batch[:2], batch[2]
i_batch += N_rand
if i_batch >= rays_rgb.shape[0]:
print("Shuffle data after an epoch!")
rand_idx = torch.randperm(rays_rgb.shape[0])
rays_rgb = rays_rgb[rand_idx]
i_batch = 0
else:
# Random from one image
img_i = np.random.choice(i_train)
target = images[img_i]
target = torch.Tensor(target).to(device)
pose = poses[img_i, :3,:4]
if N_rand is not None:
rays_o, rays_d = get_rays(H, W, K, torch.Tensor(pose)) # (H, W, 3), (H, W, 3)
if i < args.precrop_iters:
dH = int(H//2 * args.precrop_frac)
dW = int(W//2 * args.precrop_frac)
coords = torch.stack(
torch.meshgrid(
torch.linspace(H//2 - dH, H//2 + dH - 1, 2*dH),
torch.linspace(W//2 - dW, W//2 + dW - 1, 2*dW),
indexing = 'ij'
), -1)
if i == start:
print(f"[Config] Center cropping of size {2*dH} x {2*dW} is enabled until iter {args.precrop_iters}")
else:
coords = torch.stack(torch.meshgrid(
torch.linspace(0, H-1, H),
torch.linspace(0, W-1, W),
indexing='ij')
, -1) # (H, W, 2)
coords = torch.reshape(coords, [-1,2]) # (H * W, 2)
select_inds = np.random.choice(coords.shape[0], size=[N_rand], replace=False) # (N_rand,)
select_coords = coords[select_inds].long() # (N_rand, 2)
rays_o = rays_o[select_coords[:, 0], select_coords[:, 1]] # (N_rand, 3)
rays_d = rays_d[select_coords[:, 0], select_coords[:, 1]] # (N_rand, 3)
batch_rays = torch.stack([rays_o, rays_d], 0)
target_s = target[select_coords[:, 0], select_coords[:, 1]] # (N_rand, 3)
##### Core optimization loop #####
rgb, disp, acc_map, extras = render(H, W, K, chunk=args.chunk, rays=batch_rays,
verbose=i < 10, ret_raw=True,
**render_kwargs_train)
nerf_optimizer.zero_grad()
img_loss = img2mse(rgb, target_s)
trans = extras['raw'][...,-1]
train_loss = img_loss
train_psnr = mse2psnr(img_loss)
if 'rgb0' in extras:
img_loss0 = img2mse(extras['rgb0'], target_s)
train_loss = train_loss + img_loss0
psnr0 = mse2psnr(img_loss0)
train_loss.backward()
nerf_optimizer.step()
# NOTE: IMPORTANT!
### update learning rate ###
decay_rate = 0.1
decay_steps = args.lrate_decay * 1000
new_lrate = args.lrate * (decay_rate ** (global_step / decay_steps))
for param_group in nerf_optimizer.param_groups:
param_group['lr'] = new_lrate
################################
dt = time.time()-time0
# print(f"Step: {global_step}, Loss: {loss}, Time: {dt}")
##### end #####
##### Rest is logging
if i%args.i_print==0:
outstring =f"[TRAIN] Iter: {i} Loss: {train_loss.item()} PSNR: {train_psnr.item()} Iter time: {dt:.05f}"
tqdm.write(outstring)
wandb.log({
"TRAIN/Iter": i,
"TRAIN/Loss": train_loss.item(),
"TRAIN/PSNR": train_psnr.item(),
"TRAIN/Iter time": dt
})
# logging weights
if i%args.i_weights==0:
path = path_join(basedir, expname, '{:06d}.tar'.format(i))
torch.save({
'global_step': global_step,
'network_fn_state_dict': render_kwargs_train['network_fn'].state_dict(),
'network_fine_state_dict': render_kwargs_train['network_fine'].state_dict(),
'optimizer_state_dict': nerf_optimizer.state_dict(),
}, path)
wandb.save("model_iter_{:06d}.tar".format(i))
print('Saved checkpoints at', path)
# TODO: Consolidate code
if i%args.i_video==0 and i > 0:
# Turn on testing mode
print('video')
with torch.no_grad():
rgbs, _, depths = render_path(render_poses, hwf, K, args.chunk, render_kwargs_test)
print('Done, saving', rgbs.shape, depths.shape)
moviebase = path_join(basedir, expname, '{}_spiral_{:06d}_'.format(expname, i))
imageio.mimwrite(moviebase + 'rgb.mp4', to8b(rgbs), fps=30, quality=8)
imageio.mimwrite(moviebase + 'depth.mp4', to8b(depths / np.max(depths)), fps=30, quality=8)
wandb.log({
'{}_spiral_{:06d}_'.format(expname, i)+'rgb.gif': wandb.Video(moviebase + 'rgb.mp4', format='gif'),
'{}_spiral_{:06d}_'.format(expname, i)+'disp.gif': wandb.Video(moviebase + 'depth.mp4', format='gif'),
'{}_spiral_{:06d}_'.format(expname, i)+'rgb.mp4': wandb.Video(moviebase + 'rgb.mp4'),
'{}_spiral_{:06d}_'.format(expname, i)+'depth.mp4': wandb.Video(moviebase + 'depth.mp4'),
})
if args.use_viewdirs:
print('static video')
render_kwargs_test['c2w_staticcam'] = render_poses[30][:3,:4]
with torch.no_grad():
rgbs_still, *_ = render_path(render_poses, hwf, K ,args.chunk, render_kwargs_test)
render_kwargs_test['c2w_staticcam'] = None
imageio.mimwrite(moviebase + 'rgb_still.mp4', to8b(rgbs_still), fps=30, quality=8)
wandb.log({
'{}_spiral_{:06d}_'.format(expname, i)+'rgb_still.gif': wandb.Video(moviebase + 'rgb_still.mp4', format='gif'),
'{}_spiral_{:06d}_'.format(expname, i)+'rgb_still.mp4': wandb.Video(moviebase + 'rgb_still.mp4'),
})
if i%args.i_testset==0 and i > 0:
testsavedir = path_join(basedir, expname, 'testset_{:06d}'.format(i))
makedirs(testsavedir, exist_ok=True)
inds = i_test
if args.render_poses_filter:
inds = i_test[args.render_poses_filter]
with torch.no_grad():
pose_filter = torch.Tensor(poses[inds]).to(device)
render_path(pose_filter, hwf, K, args.chunk, render_kwargs_test,
# gt_imgs=images[i_test],
savedir=testsavedir)
pose_filter = pose_filter.cpu()
del pose_filter
print('Saved test set')
if args.i_val_eval and i%args.i_val_eval==0 and i > 0:
print("Evaluating on validation set")
filename = path_join(basedir, expname, 'val_eval_{:06d}'.format(i))
mkdir(filename)
with torch.no_grad():
render_path(val_poses, hwf, K, args.chunk, render_kwargs_train,
gt_imgs=val_imgs,
img_prefix=f'VAL',
img_suffix=i,
savedir=filename
)
"""
with tf.contrib.summary.record_summaries_every_n_global_steps(args.i_print):
tf.contrib.summary.scalar('loss', loss)
tf.contrib.summary.scalar('psnr', psnr)
tf.contrib.summary.histogram('tran', trans)
if args.N_importance > 0:
tf.contrib.summary.scalar('psnr0', psnr0)
if i%args.i_img==0:
# Log a rendered validation view to Tensorboard
img_i=np.random.choice(i_val)
target = images[img_i]
pose = poses[img_i, :3,:4]
with torch.no_grad():
rgb, disp, acc, extras = render(H, W, focal, chunk=args.chunk, c2w=pose,
**render_kwargs_test)
psnr = mse2psnr(img2mse(rgb, target))
with tf.contrib.summary.record_summaries_every_n_global_steps(args.i_img):
tf.contrib.summary.image('rgb', to8b(rgb)[tf.newaxis])
tf.contrib.summary.image('disp', disp[tf.newaxis,...,tf.newaxis])
tf.contrib.summary.image('acc', acc[tf.newaxis,...,tf.newaxis])
tf.contrib.summary.scalar('psnr_holdout', psnr)
tf.contrib.summary.image('rgb_holdout', target[tf.newaxis])
if args.N_importance > 0:
with tf.contrib.summary.record_summaries_every_n_global_steps(args.i_img):
tf.contrib.summary.image('rgb0', to8b(extras['rgb0'])[tf.newaxis])
tf.contrib.summary.image('disp0', extras['disp0'][tf.newaxis,...,tf.newaxis])
tf.contrib.summary.image('z_std', extras['z_std'][tf.newaxis,...,tf.newaxis])
"""
global_step += 1
if __name__=='__main__':
parser = config_parser()
args = parser.parse_args()
device = torch.device((f'cuda:{args.gpu}' if torch.cuda.is_available() else 'cpu'))
print(f'using device {device}')
with torch.cuda.device(args.gpu):
torch.set_default_tensor_type('torch.cuda.FloatTensor')
train(args)
| [
"wandb.log",
"os.mkdir",
"numpy.random.seed",
"wandb.watch",
"torch.cat",
"torch.set_default_tensor_type",
"torch.randn",
"torch.std",
"numpy.mean",
"torch.no_grad",
"os.path.join",
"torch.isnan",
"torch.ones",
"wandb.Video",
"torch.isinf",
"torch.load",
"numpy.transpose",
"numpy.m... | [((432, 449), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (446, 449), True, 'import numpy as np\n'), ((907, 952), 'torch.reshape', 'torch.reshape', (['inputs', '[-1, inputs.shape[-1]]'], {}), '(inputs, [-1, inputs.shape[-1]])\n', (920, 952), False, 'import torch\n'), ((4337, 4379), 'torch.cat', 'torch.cat', (['[rays_o, rays_d, near, far]', '(-1)'], {}), '([rays_o, rays_d, near, far], -1)\n', (4346, 4379), False, 'import torch\n'), ((5371, 5382), 'time.time', 'time.time', ([], {}), '()\n', (5380, 5382), False, 'import json, random, sys, time\n'), ((6664, 6681), 'numpy.stack', 'np.stack', (['rgbs', '(0)'], {}), '(rgbs, 0)\n', (6672, 6681), True, 'import numpy as np\n'), ((6694, 6712), 'numpy.stack', 'np.stack', (['disps', '(0)'], {}), '(disps, 0)\n', (6702, 6712), True, 'import numpy as np\n'), ((6726, 6745), 'numpy.stack', 'np.stack', (['depths', '(0)'], {}), '(depths, 0)\n', (6734, 6745), True, 'import numpy as np\n'), ((8795, 8864), 'torch.optim.Adam', 'torch.optim.Adam', ([], {'params': 'grad_vars', 'lr': 'args.lrate', 'betas': '(0.9, 0.999)'}), '(params=grad_vars, lr=args.lrate, betas=(0.9, 0.999))\n', (8811, 8864), False, 'import torch\n'), ((10569, 10598), 'wandb.watch', 'wandb.watch', (['model'], {'log': '"""all"""'}), "(model, log='all')\n", (10580, 10598), False, 'import wandb\n'), ((11749, 11776), 'torch.sigmoid', 'torch.sigmoid', (['raw[..., :3]'], {}), '(raw[..., :3])\n', (11762, 11776), False, 'import torch\n'), ((12405, 12444), 'torch.sum', 'torch.sum', (['(weights[..., None] * rgb)', '(-2)'], {}), '(weights[..., None] * rgb, -2)\n', (12414, 12444), False, 'import torch\n'), ((12476, 12507), 'torch.sum', 'torch.sum', (['(weights * z_vals)', '(-1)'], {}), '(weights * z_vals, -1)\n', (12485, 12507), False, 'import torch\n'), ((12669, 12691), 'torch.sum', 'torch.sum', (['weights', '(-1)'], {}), '(weights, -1)\n', (12678, 12691), False, 'import torch\n'), ((15142, 15188), 'torch.reshape', 'torch.reshape', (['ray_batch[..., 6:8]', '[-1, 1, 2]'], {}), '(ray_batch[..., 6:8], [-1, 1, 2])\n', (15155, 15188), False, 'import torch\n'), ((15254, 15295), 'torch.linspace', 'torch.linspace', (['(0.0)', '(1.0)'], {'steps': 'N_samples'}), '(0.0, 1.0, steps=N_samples)\n', (15268, 15295), False, 'import torch\n'), ((17860, 17885), 'load.load_data_from_args', 'load_data_from_args', (['args'], {}), '(args)\n', (17879, 17885), False, 'from load import load_data_from_args\n'), ((18290, 18329), 'os.path.join', 'path_join', (['basedir', 'expname', '"""args.txt"""'], {}), "(basedir, expname, 'args.txt')\n", (18299, 18329), True, 'from os.path import join as path_join\n'), ((32374, 32389), 'utils.parser.config_parser', 'config_parser', ([], {}), '()\n', (32387, 32389), False, 'from utils.parser import config_parser\n'), ((1105, 1158), 'torch.reshape', 'torch.reshape', (['input_dirs', '[-1, input_dirs.shape[-1]]'], {}), '(input_dirs, [-1, input_dirs.shape[-1]])\n', (1118, 1158), False, 'import torch\n'), ((1232, 1272), 'torch.cat', 'torch.cat', (['[embedded, embedded_dirs]', '(-1)'], {}), '([embedded, embedded_dirs], -1)\n', (1241, 1272), False, 'import torch\n'), ((1835, 1859), 'torch.cat', 'torch.cat', (['all_ret[k]', '(0)'], {}), '(all_ret[k], 0)\n', (1844, 1859), False, 'import torch\n'), ((4416, 4447), 'torch.cat', 'torch.cat', (['[rays, viewdirs]', '(-1)'], {}), '([rays, viewdirs], -1)\n', (4425, 4447), False, 'import torch\n'), ((4626, 4657), 'torch.reshape', 'torch.reshape', (['all_ret[k]', 'k_sh'], {}), '(all_ret[k], k_sh)\n', (4639, 4657), False, 'import torch\n'), ((5411, 5455), 'tqdm.tqdm', 'tqdm', (['render_poses'], {'desc': '"""Rendering poses: """'}), "(render_poses, desc='Rendering poses: ')\n", (5415, 5455), False, 'from tqdm import tqdm, trange\n'), ((5525, 5536), 'time.time', 'time.time', ([], {}), '()\n', (5534, 5536), False, 'import json, random, sys, time\n'), ((9367, 9407), 'wandb.log', 'wandb.log', (["{'Reloading from': ckpt_path}"], {}), "({'Reloading from': ckpt_path})\n", (9376, 9407), False, 'import wandb\n'), ((9423, 9444), 'torch.load', 'torch.load', (['ckpt_path'], {}), '(ckpt_path)\n', (9433, 9444), False, 'import torch\n'), ((11699, 11739), 'torch.norm', 'torch.norm', (['rays_d[..., None, :]'], {'dim': '(-1)'}), '(rays_d[..., None, :], dim=-1)\n', (11709, 11739), False, 'import torch\n'), ((15623, 15662), 'torch.cat', 'torch.cat', (['[mids, z_vals[..., -1:]]', '(-1)'], {}), '([mids, z_vals[..., -1:]], -1)\n', (15632, 15662), False, 'import torch\n'), ((15678, 15716), 'torch.cat', 'torch.cat', (['[z_vals[..., :1], mids]', '(-1)'], {}), '([z_vals[..., :1], mids], -1)\n', (15687, 15716), False, 'import torch\n'), ((15781, 15805), 'torch.rand', 'torch.rand', (['z_vals.shape'], {}), '(z_vals.shape)\n', (15791, 15805), False, 'import torch\n'), ((17466, 17510), 'torch.std', 'torch.std', (['z_samples'], {'dim': '(-1)', 'unbiased': '(False)'}), '(z_samples, dim=-1, unbiased=False)\n', (17475, 17510), False, 'import torch\n'), ((18225, 18252), 'os.path.join', 'path_join', (['basedir', 'expname'], {}), '(basedir, expname)\n', (18234, 18252), True, 'from os.path import join as path_join\n'), ((18552, 18593), 'os.path.join', 'path_join', (['basedir', 'expname', '"""config.txt"""'], {}), "(basedir, expname, 'config.txt')\n", (18561, 18593), True, 'from os.path import join as path_join\n'), ((21523, 21565), 'numpy.concatenate', 'np.concatenate', (['[rays, images[:, None]]', '(1)'], {}), '([rays, images[:, None]], 1)\n', (21537, 21565), True, 'import numpy as np\n'), ((21610, 21649), 'numpy.transpose', 'np.transpose', (['rays_rgb', '[0, 2, 3, 1, 4]'], {}), '(rays_rgb, [0, 2, 3, 1, 4])\n', (21622, 21649), True, 'import numpy as np\n'), ((21691, 21734), 'numpy.stack', 'np.stack', (['[rays_rgb[i] for i in i_train]', '(0)'], {}), '([rays_rgb[i] for i in i_train], 0)\n', (21699, 21734), True, 'import numpy as np\n'), ((21774, 21806), 'numpy.reshape', 'np.reshape', (['rays_rgb', '[-1, 3, 3]'], {}), '(rays_rgb, [-1, 3, 3])\n', (21784, 21806), True, 'import numpy as np\n'), ((21918, 21945), 'numpy.random.shuffle', 'np.random.shuffle', (['rays_rgb'], {}), '(rays_rgb)\n', (21935, 21945), True, 'import numpy as np\n'), ((22634, 22645), 'time.time', 'time.time', ([], {}), '()\n', (22643, 22645), False, 'import json, random, sys, time\n'), ((32555, 32582), 'torch.cuda.device', 'torch.cuda.device', (['args.gpu'], {}), '(args.gpu)\n', (32572, 32582), False, 'import torch\n'), ((32592, 32647), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['"""torch.cuda.FloatTensor"""'], {}), "('torch.cuda.FloatTensor')\n", (32621, 32647), False, 'import torch\n'), ((3852, 3894), 'torch.norm', 'torch.norm', (['viewdirs'], {'dim': '(-1)', 'keepdim': '(True)'}), '(viewdirs, dim=-1, keepdim=True)\n', (3862, 3894), False, 'import torch\n'), ((4142, 4172), 'torch.reshape', 'torch.reshape', (['rays_o', '[-1, 3]'], {}), '(rays_o, [-1, 3])\n', (4155, 4172), False, 'import torch\n'), ((4193, 4223), 'torch.reshape', 'torch.reshape', (['rays_d', '[-1, 3]'], {}), '(rays_d, [-1, 3])\n', (4206, 4223), False, 'import torch\n'), ((4255, 4287), 'torch.ones_like', 'torch.ones_like', (['rays_d[..., :1]'], {}), '(rays_d[..., :1])\n', (4270, 4287), False, 'import torch\n'), ((4294, 4326), 'torch.ones_like', 'torch.ones_like', (['rays_d[..., :1]'], {}), '(rays_d[..., :1])\n', (4309, 4326), False, 'import torch\n'), ((6225, 6256), 'imageio.imwrite', 'imageio.imwrite', (['filename', 'rgb8'], {}), '(filename, rgb8)\n', (6240, 6256), False, 'import imageio\n'), ((6809, 6824), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6822, 6824), False, 'import torch\n'), ((6933, 6953), 'numpy.stack', 'np.stack', (['gt_imgs', '(0)'], {}), '(gt_imgs, 0)\n', (6941, 6953), True, 'import numpy as np\n'), ((6976, 7002), 'numpy.mean', 'np.mean', (['((rgbs - gts) ** 2)'], {}), '((rgbs - gts) ** 2)\n', (6983, 7002), True, 'import numpy as np\n'), ((7200, 7313), 'wandb.log', 'wandb.log', (["{f'{img_prefix}/Iter': img_suffix, f'{img_prefix}/Loss': val_loss,\n f'{img_prefix}/PSNR': val_psnr}"], {}), "({f'{img_prefix}/Iter': img_suffix, f'{img_prefix}/Loss': val_loss,\n f'{img_prefix}/PSNR': val_psnr})\n", (7209, 7313), False, 'import wandb\n'), ((9106, 9136), 'os.path.join', 'path_join', (['basedir', 'expname', 'f'], {}), '(basedir, expname, f)\n', (9115, 9136), True, 'from os.path import join as path_join\n'), ((11860, 11890), 'torch.randn', 'torch.randn', (['raw[..., 3].shape'], {}), '(raw[..., 3].shape)\n', (11871, 11890), False, 'import torch\n'), ((11990, 12007), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (12004, 12007), True, 'import numpy as np\n'), ((12104, 12123), 'torch.Tensor', 'torch.Tensor', (['noise'], {}), '(noise)\n', (12116, 12123), False, 'import torch\n'), ((15902, 15919), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (15916, 15919), True, 'import numpy as np\n'), ((15998, 16018), 'torch.Tensor', 'torch.Tensor', (['t_rand'], {}), '(t_rand)\n', (16010, 16018), False, 'import torch\n'), ((16720, 16754), 'torch.cat', 'torch.cat', (['[z_vals, z_samples]', '(-1)'], {}), '([z_vals, z_samples], -1)\n', (16729, 16754), False, 'import torch\n'), ((17978, 18010), 'numpy.max', 'np.max', (['args.render_poses_filter'], {}), '(args.render_poses_filter)\n', (17984, 18010), True, 'import numpy as np\n'), ((18772, 18799), 'os.path.join', 'path_join', (['basedir', 'expname'], {}), '(basedir, expname)\n', (18781, 18799), True, 'from os.path import join as path_join\n'), ((19195, 19221), 'torch.Tensor', 'torch.Tensor', (['render_poses'], {}), '(render_poses)\n', (19207, 19221), False, 'import torch\n'), ((19362, 19377), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (19375, 19377), False, 'import torch\n'), ((19754, 19790), 'os.makedirs', 'makedirs', (['testsavedir'], {'exist_ok': '(True)'}), '(testsavedir, exist_ok=True)\n', (19762, 19790), False, 'from os import listdir, makedirs, mkdir\n'), ((22142, 22161), 'torch.Tensor', 'torch.Tensor', (['poses'], {}), '(poses)\n', (22154, 22161), False, 'import torch\n'), ((22832, 22860), 'torch.transpose', 'torch.transpose', (['batch', '(0)', '(1)'], {}), '(batch, 0, 1)\n', (22847, 22860), False, 'import torch\n'), ((23252, 23277), 'numpy.random.choice', 'np.random.choice', (['i_train'], {}), '(i_train)\n', (23268, 23277), True, 'import numpy as np\n'), ((26183, 26194), 'time.time', 'time.time', ([], {}), '()\n', (26192, 26194), False, 'import json, random, sys, time\n'), ((26504, 26525), 'tqdm.tqdm.write', 'tqdm.write', (['outstring'], {}), '(outstring)\n', (26514, 26525), False, 'from tqdm import tqdm, trange\n'), ((29287, 29323), 'os.makedirs', 'makedirs', (['testsavedir'], {'exist_ok': '(True)'}), '(testsavedir, exist_ok=True)\n', (29295, 29323), False, 'from os import listdir, makedirs, mkdir\n'), ((30067, 30082), 'os.mkdir', 'mkdir', (['filename'], {}), '(filename)\n', (30072, 30082), False, 'from os import listdir, makedirs, mkdir\n'), ((32471, 32496), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (32494, 32496), False, 'import torch\n'), ((3914, 3946), 'torch.reshape', 'torch.reshape', (['viewdirs', '[-1, 3]'], {}), '(viewdirs, [-1, 3])\n', (3927, 3946), False, 'import torch\n'), ((7029, 7047), 'numpy.log10', 'np.log10', (['val_loss'], {}), '(val_loss)\n', (7037, 7047), True, 'import numpy as np\n'), ((20101, 20131), 'numpy.expand_dims', 'np.expand_dims', (['depths'], {'axis': '(3)'}), '(depths, axis=3)\n', (20115, 20131), True, 'import numpy as np\n'), ((20220, 20260), 'os.path.join', 'path_join', (['testsavedir', '"""rbgs_video.mp4"""'], {}), "(testsavedir, 'rbgs_video.mp4')\n", (20229, 20260), True, 'from os.path import join as path_join\n'), ((20335, 20377), 'os.path.join', 'path_join', (['testsavedir', '"""depths_video.mp4"""'], {}), "(testsavedir, 'depths_video.mp4')\n", (20344, 20377), True, 'from os.path import join as path_join\n'), ((22043, 22063), 'torch.Tensor', 'torch.Tensor', (['images'], {}), '(images)\n', (22055, 22063), False, 'import torch\n'), ((22094, 22116), 'torch.Tensor', 'torch.Tensor', (['rays_rgb'], {}), '(rays_rgb)\n', (22106, 22116), False, 'import torch\n'), ((23073, 23106), 'torch.randperm', 'torch.randperm', (['rays_rgb.shape[0]'], {}), '(rays_rgb.shape[0])\n', (23087, 23106), False, 'import torch\n'), ((24543, 24573), 'torch.reshape', 'torch.reshape', (['coords', '[-1, 2]'], {}), '(coords, [-1, 2])\n', (24556, 24573), False, 'import torch\n'), ((24617, 24680), 'numpy.random.choice', 'np.random.choice', (['coords.shape[0]'], {'size': '[N_rand]', 'replace': '(False)'}), '(coords.shape[0], size=[N_rand], replace=False)\n', (24633, 24680), True, 'import numpy as np\n'), ((24975, 25007), 'torch.stack', 'torch.stack', (['[rays_o, rays_d]', '(0)'], {}), '([rays_o, rays_d], 0)\n', (24986, 25007), False, 'import torch\n'), ((27464, 27479), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (27477, 27479), False, 'import torch\n'), ((29464, 29479), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (29477, 29479), False, 'import torch\n'), ((30100, 30115), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (30113, 30115), False, 'import torch\n'), ((5496, 5507), 'time.time', 'time.time', ([], {}), '()\n', (5505, 5507), False, 'import json, random, sys, time\n'), ((6315, 6336), 'wandb.Image', 'wandb.Image', (['filename'], {}), '(filename)\n', (6326, 6336), False, 'import wandb\n'), ((11600, 11629), 'torch.Tensor', 'torch.Tensor', (['[10000000000.0]'], {}), '([10000000000.0])\n', (11612, 11629), False, 'import torch\n'), ((20531, 20570), 'wandb.Video', 'wandb.Video', (['rgbs'], {'fps': '(30)', 'format': '"""gif"""'}), "(rgbs, fps=30, format='gif')\n", (20542, 20570), False, 'import wandb\n'), ((20618, 20659), 'wandb.Video', 'wandb.Video', (['depths'], {'fps': '(30)', 'format': '"""gif"""'}), "(depths, fps=30, format='gif')\n", (20629, 20659), False, 'import wandb\n'), ((23334, 23354), 'torch.Tensor', 'torch.Tensor', (['target'], {}), '(target)\n', (23346, 23354), False, 'import torch\n'), ((23492, 23510), 'torch.Tensor', 'torch.Tensor', (['pose'], {}), '(pose)\n', (23504, 23510), False, 'import torch\n'), ((28011, 28059), 'wandb.Video', 'wandb.Video', (["(moviebase + 'rgb.mp4')"], {'format': '"""gif"""'}), "(moviebase + 'rgb.mp4', format='gif')\n", (28022, 28059), False, 'import wandb\n'), ((28128, 28178), 'wandb.Video', 'wandb.Video', (["(moviebase + 'depth.mp4')"], {'format': '"""gif"""'}), "(moviebase + 'depth.mp4', format='gif')\n", (28139, 28178), False, 'import wandb\n'), ((28246, 28280), 'wandb.Video', 'wandb.Video', (["(moviebase + 'rgb.mp4')"], {}), "(moviebase + 'rgb.mp4')\n", (28257, 28280), False, 'import wandb\n'), ((28350, 28386), 'wandb.Video', 'wandb.Video', (["(moviebase + 'depth.mp4')"], {}), "(moviebase + 'depth.mp4')\n", (28361, 28386), False, 'import wandb\n'), ((28574, 28589), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (28587, 28589), False, 'import torch\n'), ((6603, 6624), 'wandb.Image', 'wandb.Image', (['filename'], {}), '(filename)\n', (6614, 6624), False, 'import wandb\n'), ((9161, 9188), 'os.path.join', 'path_join', (['basedir', 'expname'], {}), '(basedir, expname)\n', (9170, 9188), True, 'from os.path import join as path_join\n'), ((12322, 12353), 'torch.ones', 'torch.ones', (['(alpha.shape[0], 1)'], {}), '((alpha.shape[0], 1))\n', (12332, 12353), False, 'import torch\n'), ((17554, 17573), 'torch.isnan', 'torch.isnan', (['ret[k]'], {}), '(ret[k])\n', (17565, 17573), False, 'import torch\n'), ((17583, 17602), 'torch.isinf', 'torch.isinf', (['ret[k]'], {}), '(ret[k])\n', (17594, 17602), False, 'import torch\n'), ((20272, 20284), 'numpy.max', 'np.max', (['rgbs'], {}), '(rgbs)\n', (20278, 20284), True, 'import numpy as np\n'), ((20391, 20405), 'numpy.max', 'np.max', (['depths'], {}), '(depths)\n', (20397, 20405), True, 'import numpy as np\n'), ((20717, 20757), 'os.path.join', 'path_join', (['testsavedir', '"""rbgs_video.mp4"""'], {}), "(testsavedir, 'rbgs_video.mp4')\n", (20726, 20757), True, 'from os.path import join as path_join\n'), ((20840, 20882), 'os.path.join', 'path_join', (['testsavedir', '"""depths_video.mp4"""'], {}), "(testsavedir, 'depths_video.mp4')\n", (20849, 20882), True, 'from os.path import join as path_join\n'), ((27885, 27899), 'numpy.max', 'np.max', (['depths'], {}), '(depths)\n', (27891, 27899), True, 'import numpy as np\n'), ((28957, 29011), 'wandb.Video', 'wandb.Video', (["(moviebase + 'rgb_still.mp4')"], {'format': '"""gif"""'}), "(moviebase + 'rgb_still.mp4', format='gif')\n", (28968, 29011), False, 'import wandb\n'), ((29089, 29129), 'wandb.Video', 'wandb.Video', (["(moviebase + 'rgb_still.mp4')"], {}), "(moviebase + 'rgb_still.mp4')\n", (29100, 29129), False, 'import wandb\n'), ((29511, 29536), 'torch.Tensor', 'torch.Tensor', (['poses[inds]'], {}), '(poses[inds])\n', (29523, 29536), False, 'import torch\n'), ((23800, 23852), 'torch.linspace', 'torch.linspace', (['(H // 2 - dH)', '(H // 2 + dH - 1)', '(2 * dH)'], {}), '(H // 2 - dH, H // 2 + dH - 1, 2 * dH)\n', (23814, 23852), False, 'import torch\n'), ((23877, 23929), 'torch.linspace', 'torch.linspace', (['(W // 2 - dW)', '(W // 2 + dW - 1)', '(2 * dW)'], {}), '(W // 2 - dW, W // 2 + dW - 1, 2 * dW)\n', (23891, 23929), False, 'import torch\n'), ((24300, 24327), 'torch.linspace', 'torch.linspace', (['(0)', '(H - 1)', 'H'], {}), '(0, H - 1, H)\n', (24314, 24327), False, 'import torch\n'), ((24372, 24399), 'torch.linspace', 'torch.linspace', (['(0)', '(W - 1)', 'W'], {}), '(0, W - 1, W)\n', (24386, 24399), False, 'import torch\n')] |
#!/usr/bin/env python
import math
import mpmath
import numpy as np
from floripy.mathutils.xform import shift_tensor2_dcm, shift_tensor3_dcm
from .hydrodynamicsbase import HydrodynamicsBase
class Ellipsoids_hydrodynamics(HydrodynamicsBase):
def __init__(self, model, flowfield, kwargs):
self._model = model
self._num_bodies = self._model.num_bodies
self._all_ellipsoid_a = model.get_all_ellipsoid_a()
self._all_ellipsoid_b = model.get_all_ellipsoid_b()
self._all_ellipsoid_c = model.get_all_ellipsoid_c()
self._calc_rf()
self._flowfield = flowfield
self._viscosity = kwargs['viscosity']
form = kwargs['form']
if form == 'resistance':
self._grm = np.zeros((6*self._num_bodies,15*self._num_bodies))
self._calc_grm()
elif form == 'mobility':
self._gmm = np.zeros((6*self._num_bodies,15*self._num_bodies))
self._calc_gmm()
else:
raise ValueError('Unkown form ', form)
def _calc_rf(self):
self._rf = np.zeros(self._num_bodies,
dtype=[('A','f8',(3,3)), ('C','f8',(3,3)),
('H_tilde','f8',(3,3,3))])
for i in range(self._num_bodies):
a = self._all_ellipsoid_a[i]
b = self._all_ellipsoid_b[i]
c = self._all_ellipsoid_c[i]
asq = a**2
bsq = 0.0 if b == 'zero' else b**2
csq = c**2
#The integrals alpha_i
asq_alpha_a = (2*asq/3.0)*mpmath.elliprj(asq, bsq, csq, asq)
csq_alpha_c = (2*csq/3.0)*mpmath.elliprj(asq, bsq, csq, csq)
if b == 'zero':
bsq_alpha_b = 0.0
else:
bsq_alpha_b = (2*bsq/3.0)*mpmath.elliprj(asq, bsq, csq, bsq)
#The integral chi
chi = 2*mpmath.elliprf(asq, bsq, csq)
sxp = 16.0*math.pi
sxp3 = sxp/3.0
#Submatrix A in the resistance matrix
self._rf[i]['A'][0,0] = sxp/(chi + asq_alpha_a)
self._rf[i]['A'][1,1] = sxp/(chi + bsq_alpha_b)
self._rf[i]['A'][2,2] = sxp/(chi + csq_alpha_c)
#Submatrix C in the resistance matrix
self._rf[i]['C'][0,0] = sxp3*(bsq+csq)/(bsq_alpha_b + csq_alpha_c)
self._rf[i]['C'][1,1] = sxp3*(csq+asq)/(asq_alpha_a + csq_alpha_c)
self._rf[i]['C'][2,2] = sxp3*(asq+bsq)/(asq_alpha_a + bsq_alpha_b)
#Tensor (third order) H_tilde in the resistance matrix
self._rf[i]['H_tilde'][0,1,2] = sxp3*bsq/(bsq_alpha_b + csq_alpha_c)
self._rf[i]['H_tilde'][0,2,1] = -sxp3*csq/(bsq_alpha_b + csq_alpha_c)
self._rf[i]['H_tilde'][1,2,0] = sxp3*csq/(asq_alpha_a + csq_alpha_c)
self._rf[i]['H_tilde'][1,0,2] = -sxp3*asq/(asq_alpha_a + csq_alpha_c)
self._rf[i]['H_tilde'][2,0,1] = sxp3*asq/(asq_alpha_a + bsq_alpha_b)
self._rf[i]['H_tilde'][2,1,0] = -sxp3*bsq/(asq_alpha_a + bsq_alpha_b)
def _calc_grm(self):
shifters = self._model.get_all_ellipsoid_shifters()
for i in range(self._num_bodies):
shifter = shifters[i]
dcm = shifter.T
rm = np.zeros((6,15))
A = self._rf[i]['A']
C = self._rf[i]['C']
H_tilde = self._rf[i]['H_tilde']
#Submatrix C in the resistance matrix
rm[0:3,0:3] = shift_tensor2_dcm(C, dcm, forward=False)
#Matrix representaion of Htilde in the resistance matrix
H_tilde = shift_tensor3_dcm(H_tilde, dcm, forward=False)
rm[0:3,6:15] = H_tilde.reshape((3,9))
#Submatrix A in the resistance matrix
rm[3:6,3:6] = shift_tensor2_dcm(A, dcm, forward=False)
self._grm[6*i:6*i+6,15*i:15*i+15] = rm
def _calc_gmm(self):
'''
Returns the mobility matrix for a set of point particles.
'''
raise NotImplementedError
def update(self, time):
if hasattr(self, '_grm'):
self._calc_grm()
if hasattr(self, '_gmm'):
self._calc_gmm()
| [
"mpmath.elliprf",
"mpmath.elliprj",
"floripy.mathutils.xform.shift_tensor2_dcm",
"floripy.mathutils.xform.shift_tensor3_dcm",
"numpy.zeros"
] | [((1072, 1182), 'numpy.zeros', 'np.zeros', (['self._num_bodies'], {'dtype': "[('A', 'f8', (3, 3)), ('C', 'f8', (3, 3)), ('H_tilde', 'f8', (3, 3, 3))]"}), "(self._num_bodies, dtype=[('A', 'f8', (3, 3)), ('C', 'f8', (3, 3)),\n ('H_tilde', 'f8', (3, 3, 3))])\n", (1080, 1182), True, 'import numpy as np\n'), ((745, 800), 'numpy.zeros', 'np.zeros', (['(6 * self._num_bodies, 15 * self._num_bodies)'], {}), '((6 * self._num_bodies, 15 * self._num_bodies))\n', (753, 800), True, 'import numpy as np\n'), ((3252, 3269), 'numpy.zeros', 'np.zeros', (['(6, 15)'], {}), '((6, 15))\n', (3260, 3269), True, 'import numpy as np\n'), ((3456, 3496), 'floripy.mathutils.xform.shift_tensor2_dcm', 'shift_tensor2_dcm', (['C', 'dcm'], {'forward': '(False)'}), '(C, dcm, forward=False)\n', (3473, 3496), False, 'from floripy.mathutils.xform import shift_tensor2_dcm, shift_tensor3_dcm\n'), ((3589, 3635), 'floripy.mathutils.xform.shift_tensor3_dcm', 'shift_tensor3_dcm', (['H_tilde', 'dcm'], {'forward': '(False)'}), '(H_tilde, dcm, forward=False)\n', (3606, 3635), False, 'from floripy.mathutils.xform import shift_tensor2_dcm, shift_tensor3_dcm\n'), ((3764, 3804), 'floripy.mathutils.xform.shift_tensor2_dcm', 'shift_tensor2_dcm', (['A', 'dcm'], {'forward': '(False)'}), '(A, dcm, forward=False)\n', (3781, 3804), False, 'from floripy.mathutils.xform import shift_tensor2_dcm, shift_tensor3_dcm\n'), ((882, 937), 'numpy.zeros', 'np.zeros', (['(6 * self._num_bodies, 15 * self._num_bodies)'], {}), '((6 * self._num_bodies, 15 * self._num_bodies))\n', (890, 937), True, 'import numpy as np\n'), ((1559, 1593), 'mpmath.elliprj', 'mpmath.elliprj', (['asq', 'bsq', 'csq', 'asq'], {}), '(asq, bsq, csq, asq)\n', (1573, 1593), False, 'import mpmath\n'), ((1632, 1666), 'mpmath.elliprj', 'mpmath.elliprj', (['asq', 'bsq', 'csq', 'csq'], {}), '(asq, bsq, csq, csq)\n', (1646, 1666), False, 'import mpmath\n'), ((1876, 1905), 'mpmath.elliprf', 'mpmath.elliprf', (['asq', 'bsq', 'csq'], {}), '(asq, bsq, csq)\n', (1890, 1905), False, 'import mpmath\n'), ((1789, 1823), 'mpmath.elliprj', 'mpmath.elliprj', (['asq', 'bsq', 'csq', 'bsq'], {}), '(asq, bsq, csq, bsq)\n', (1803, 1823), False, 'import mpmath\n')] |
import asyncio
import numpy as np
from poke_env.player.random_player import RandomPlayer
from tabulate import tabulate
from threading import Thread
from poke_env.utils import to_id_str
from poke_env.player.env_player import (
Gen8EnvSinglePlayer,
)
from poke_env.player.utils import cross_evaluate
from poke_env.teambuilder.constant_teambuilder import ConstantTeambuilder
from poke_env.player_configuration import PlayerConfiguration
NUM_BATTLES = 1
AGENT_0_ID = 0
AGENT_1_ID = 1
NULL_ACTION_ID = -1
team = """
Garchomp (M) @ <NAME>
Ability: Rough Skin
EVs: 248 HP / 252 SpA / 8 Spe
Adamant Nature
- Dragon Claw
- Fire Fang
- Shadow Claw
Lucario (M) @ <NAME>
Ability: Inner Focus
EVs: 248 HP / 252 SpA / 8 Spe
Adamant Nature
- Close Combat
- Earthquake
- Crunch
Tyranitar (M) @ <NAME>
Ability: Sand Stream
EVs: 248 HP / 252 SpA / 8 Spe
Adamant Nature
- Rock Slide
- Thunder Fang
- Stone Edge
"""
# return turn number in state
class RandomGen8EnvPlayer(Gen8EnvSinglePlayer):
def embed_battle(self, battle):
# my_pokemon = battle.available_switches
return np.array([battle.turn])
class SharedInfo():
def __init__(self):
self.num_agents = 2
self.num_completed_batches = [0] * self.num_agents
self.actions = [[], []]
self.states = [[], []]
self.num_completed_battles = [0, 0]
self.num_completed_turns = [0, 0]
def num_completed_battles_equal(self):
return self.num_completed_battles[AGENT_0_ID] == self.num_completed_battles[AGENT_1_ID]
def num_completed_turns_equal(self):
return self.num_completed_turns[AGENT_0_ID] == self.num_completed_turns[AGENT_1_ID]
def reset(self):
self.actions = [[], []]
self.num_completed_turns = [0, 0]
def get_turn(action):
return action[1]
def action_length_balancer(action_0, action_1):
new_action_0 = []
new_action_1 = []
i = 0
j = 0
while i < len(action_0) and j < len(action_1):
action_0_turn = get_turn(action_0[i])
action_1_turn = get_turn(action_1[j])
if action_0_turn == action_1_turn:
new_action_0.append(action_0[i])
new_action_1.append(action_1[j])
i += 1
j += 1
elif action_0_turn < action_1_turn:
null_action = (NULL_ACTION_ID, action_0_turn)
new_action_0.append(action_0[i])
new_action_1.append(null_action)
i += 1
else:
null_action = (NULL_ACTION_ID, action_1_turn)
new_action_0.append(null_action)
new_action_1.append(action_1[j])
j += 1
while i < len(action_0):
action_0_turn = get_turn(action_0[i])
null_action = (NULL_ACTION_ID, action_0_turn)
new_action_0.append(action_0[i])
new_action_1.append(null_action)
i += 1
while j < len(action_1):
action_1_turn = get_turn(action_1[j])
null_action = (NULL_ACTION_ID, action_1_turn)
new_action_0.append(null_action)
new_action_1.append(action_1[j])
j += 1
return new_action_0, new_action_1
def env_algorithm(env, n_battles, id, shared_info):
for b in range(n_battles):
done = False
observation = env.reset()
while not done:
action = 0
print(observation)
other_id = AGENT_1_ID if id == AGENT_0_ID else AGENT_0_ID
observation, reward, done, _ = env.step(action)
shared_info.states[id].append(observation)
if len(shared_info.actions[id]) > len(shared_info.actions[other_id]) + 1:
shared_info.actions[other_id].append(NULL_ACTION_ID)
shared_info.actions[id].append(action)
shared_info.num_completed_turns[id] += 1
if shared_info.num_completed_battles_equal():
print(f'Battle #{b}')
print(f'Number of actions Agent 0 took: {len(shared_info.actions[AGENT_0_ID])}')
print(f'Number of actions Agent 1 took: {len(shared_info.actions[AGENT_1_ID])}')
print(f'Number of unique actions: {len(set(shared_info.actions[AGENT_0_ID]))}')
for i in range(len(shared_info.actions)):
print(f'{i}: {shared_info.actions[i]}')
shared_info.reset()
def env_algorithm_wrapper(player, num_battles, id, shared_info):
env_algorithm(player, num_battles, id, shared_info)
player._start_new_battle = False
while True:
try:
player.complete_current_battle()
player.reset()
except OSError:
break
async def launch_battles(player, opponent):
battles_coroutine = asyncio.gather(
player.send_challenges(
opponent=to_id_str(opponent.username),
n_challenges=1,
to_wait=opponent.logged_in,
),
opponent.accept_challenges(opponent=to_id_str(player.username), n_challenges=1),
)
await battles_coroutine
teambuilder = ConstantTeambuilder(team)
p1 = RandomGen8EnvPlayer(battle_format="gen8ou", log_level=40, team=teambuilder)
p2 = RandomGen8EnvPlayer(battle_format="gen8ou", log_level=40, team=teambuilder)
p1._start_new_battle = True
p2._start_new_battle = True
loop = asyncio.get_event_loop()
env_algorithm_kwargs = {"n_battles": 1}
shared_info = SharedInfo()
t1 = Thread(target=lambda: env_algorithm_wrapper(p1, NUM_BATTLES, AGENT_0_ID, shared_info))
t1.start()
t2 = Thread(target=lambda: env_algorithm_wrapper(p2, NUM_BATTLES, AGENT_1_ID, shared_info))
t2.start()
while p1._start_new_battle:
loop.run_until_complete(launch_battles(p1, p2))
t1.join()
t2.join() | [
"numpy.array",
"poke_env.teambuilder.constant_teambuilder.ConstantTeambuilder",
"asyncio.get_event_loop",
"poke_env.utils.to_id_str"
] | [((5016, 5041), 'poke_env.teambuilder.constant_teambuilder.ConstantTeambuilder', 'ConstantTeambuilder', (['team'], {}), '(team)\n', (5035, 5041), False, 'from poke_env.teambuilder.constant_teambuilder import ConstantTeambuilder\n'), ((5270, 5294), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (5292, 5294), False, 'import asyncio\n'), ((1087, 1110), 'numpy.array', 'np.array', (['[battle.turn]'], {}), '([battle.turn])\n', (1095, 1110), True, 'import numpy as np\n'), ((4769, 4797), 'poke_env.utils.to_id_str', 'to_id_str', (['opponent.username'], {}), '(opponent.username)\n', (4778, 4797), False, 'from poke_env.utils import to_id_str\n'), ((4922, 4948), 'poke_env.utils.to_id_str', 'to_id_str', (['player.username'], {}), '(player.username)\n', (4931, 4948), False, 'from poke_env.utils import to_id_str\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext as _build_ext
# cython compile
try:
from Cython.Build import cythonize
except ImportError:
def cythonize(*args, **kwargs):
"""cythonize"""
from Cython.Build import cythonize
return cythonize(*args, **kwargs)
class CustomBuildExt(_build_ext):
"""CustomBuildExt"""
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
compile_extra_args = ["-std=c++11"]
link_extra_args = []
extensions = [
Extension(
"gammagl.sample",
sources=[os.path.join("gammagl", "sample.pyx")],
language="c++",
extra_compile_args=compile_extra_args,
extra_link_args=link_extra_args, ),
]
install_requires = ['numpy', 'scipy', 'pytest', 'cython', 'tensorflow', 'tensorlayerx']
classifiers = [
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
]
setup(
name="gammagl",
version="0.0.1",
author="BUPT-GAMMA LAB",
author_email="<EMAIL>",
maintainer="<NAME>",
license="Apache-2.0 License",
cmdclass={'build_ext': CustomBuildExt},
ext_modules=extensions,
description=" ",
url="https://github.com/BUPT-GAMMA/GammaGL",
download_url="https://github.com/BUPT-GAMMA/GammaGL",
python_requires='>=3.7',
packages=find_packages(),
install_requires=install_requires,
include_package_data=True,
classifiers=classifiers
)
# python setup.py build_ext --inplace
# python setup.py install
| [
"Cython.Build.cythonize",
"setuptools.command.build_ext.build_ext.finalize_options",
"numpy.get_include",
"os.path.join",
"setuptools.find_packages"
] | [((506, 539), 'setuptools.command.build_ext.build_ext.finalize_options', '_build_ext.finalize_options', (['self'], {}), '(self)\n', (533, 539), True, 'from setuptools.command.build_ext import build_ext as _build_ext\n'), ((1638, 1653), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1651, 1653), False, 'from setuptools import setup, find_packages, Extension\n'), ((377, 403), 'Cython.Build.cythonize', 'cythonize', (['*args'], {}), '(*args, **kwargs)\n', (386, 403), False, 'from Cython.Build import cythonize\n'), ((711, 730), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (728, 730), False, 'import numpy\n'), ((864, 901), 'os.path.join', 'os.path.join', (['"""gammagl"""', '"""sample.pyx"""'], {}), "('gammagl', 'sample.pyx')\n", (876, 901), False, 'import os\n')] |
""" Test functionality to analyse bias triangles."""
import numpy as np
import unittest
from qtt.algorithms.bias_triangles import lever_arm
class TestBiasTriangles(unittest.TestCase):
def test_lever_arm(self):
lever_arm_fit = {
'clicked_points': np.array([[24., 38., 40.], [135., 128., 111.]]),
'distance': 15.0,
'intersection_point': np.array([[40.4], [127.]])
}
test_lever_arm = lever_arm(-800, lever_arm_fit)
self.assertAlmostEqual(test_lever_arm, 53.3, 1)
| [
"qtt.algorithms.bias_triangles.lever_arm",
"numpy.array"
] | [((450, 480), 'qtt.algorithms.bias_triangles.lever_arm', 'lever_arm', (['(-800)', 'lever_arm_fit'], {}), '(-800, lever_arm_fit)\n', (459, 480), False, 'from qtt.algorithms.bias_triangles import lever_arm\n'), ((274, 327), 'numpy.array', 'np.array', (['[[24.0, 38.0, 40.0], [135.0, 128.0, 111.0]]'], {}), '([[24.0, 38.0, 40.0], [135.0, 128.0, 111.0]])\n', (282, 327), True, 'import numpy as np\n'), ((387, 414), 'numpy.array', 'np.array', (['[[40.4], [127.0]]'], {}), '([[40.4], [127.0]])\n', (395, 414), True, 'import numpy as np\n')] |
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from numpy import genfromtxt
from prediction.GAN_Regression import *
from prediction.LR_XR_Regression import *
import argparse
parser = argparse.ArgumentParser(description='Quantitative Evaluation')
parser.add_argument('--input', type=str, help='ibm', required=True)
parser.add_argument('--export_plot', type=int, help='to export plot', default=0, required=False)
parser.add_argument('--op_classifier', type=int, default= 2, help='0: DDM(Data-driven model), 1: EI, 2: DDM_EI (GAN), 3: LR_XR, '
'4: DDM_EI (linear), 5: DDM_EI (functional alpha, GAN), 6: DDM_EI (functional alpha, linear)', required=False)
parser.add_argument('--sigmoid_coeff', type=float, default= 8., help='coefficient for sigmoid', required=False)
parser.add_argument('--prior_weight', type=float, default= 0.5, help='the weight of prior if we use maual alpha (op_classifier=2, 4)', required=False)
parser.add_argument('--confusion', type=float, default= 0.5, help='difference for judging confusion if we use maual alpha (op_classifier=2, 4)', required=False)
parser.add_argument('--num-steps', type=int, default=1000,
help='the number of training steps to take')
parser.add_argument('--hidden-size', type=int, default=16,
help='MLP hidden size')
parser.add_argument('--batch-size', type=int, default=128,
help='the batch size')
parser.add_argument('--log-every', type=int, default=10,
help='print loss after this many steps')
parser.add_argument('--dc_weight', type=float, default=1.1,
help='weight in discriminator')
parser.add_argument('--op_valid', type=int, default=1,
help='1: from major cluster, 2: from minor cluster, 3: from major + minor clusters')
parser.add_argument('--debug', type=int, default=0,
help='0: false, 1: true')
parser.add_argument('--outlier_ratio', type=float, default=0.25,
help='0: from major cluster, 1: from minor cluster')
parser.add_argument('--show_trend', type=int, default=0,
help='0: no plotting, 1: plotting')
# FOR LR_XR
parser.add_argument('--batch_size', type=int, default= 939, help='batch size for training', required=False)
parser.add_argument('--learning_rate', type=float, default= 0.1, help='learning rate for optimization', required=False)
parser.add_argument('--reg_rate', type=float, default= 100, help='learning rate for optimization', required=False)
parser.add_argument('--training_epochs', type=int, default= 600, help='max_training_size', required=False)
args = parser.parse_args()
print(args)
# get feature
if args.input == "CREDIT":
dataset = genfromtxt('data/Input_credit.csv', delimiter=',')
features = dataset[:, :-2] # we will use 38 (gurantor)
all_feature = np.hstack([features[:, :38], features[:, 39:]])
df_label = dataset[:, -1]
all_label = np.array(df_label, dtype=int)
all_intuition = features[:, 38]
all_intuition = np.reshape(all_intuition, (len(all_intuition), 1))
n_classes = len(set(list(all_label)))
else:
print ("# dataname error!")
exit()
# For Output
str_output = ""
l_result = []
# To have data for Intuition
id_major, id_minor = split_data_byClustering(all_feature, ratio=args.outlier_ratio, option=1) # using isolation forest
major_data = all_feature[id_major]
minor_data = all_feature[id_minor]
# random validation
for i in range(10):
ids_train, ids_test = split_data(all_feature.shape[0], seed=i, ratio=0.8)
# set0: train from major, set1, test from minor
# set2, set3 = split_data(id_minor.shape[0], seed=i, ratio=0.8) # set2: train from maior, set3, test from minor
# set2 = id_minor
if args.show_trend:
num_repeats = 100
train_feature = all_feature[ids_train]
# randomly pick a data point in test data
test_idx = np.random.randint(len(ids_test), size=1)
#print(test_idx)
# create data points using same feature values
test_feature = np.repeat(all_feature[ids_test[test_idx]], num_repeats, axis=0)
train_label = all_label[ids_train]
# create repeated/same test label corresponding to the 'repeated/same' features
test_label = np.repeat(all_label[ids_test[test_idx]], num_repeats, axis=0)
train_price = all_intuition[ids_train]
# how the test prices were generated:
# using max and min as start and end, evenly spaced to yeild num of data points needed (100 in this experiment)
test_price = np.linspace(train_price.min(axis=0), train_price.max(axis=0), num=num_repeats)
else:
train_feature = all_feature[ids_train]
test_feature = all_feature[ids_test]
train_label = all_label[ids_train]
test_label = all_label[ids_test]
train_price = all_intuition[ids_train]
test_price = all_intuition[ids_test]
if args.op_classifier == 0: # LR with no intuition
result, _, probs_0 = GAN_WinPrediction_withOutliers(np.array([]),
train_feature, train_label, train_price,
test_feature, test_label, test_price,
weight=args.prior_weight,
op_prior=0, op_plot=args.export_plot,
op_diff=args.confusion,
op_valid=args.op_valid, op_classifier=args.op_classifier,
debug=args.debug)
elif args.op_classifier == 1: # Intuition Only
result, _, probs_1 = GAN_WinPrediction_withOutliers(np.array([]),
train_feature, train_label, train_price,
test_feature, test_label, test_price,
weight=args.prior_weight,
op_prior=3, op_plot=args.export_plot,
op_diff=args.confusion,
op_valid=args.op_valid, op_classifier=args.op_classifier,
debug=args.debug)
elif args.op_classifier == 2: # GAN_REG_MANUAL_WEIGHTS (ALPHA, DIFF) another GAN that is not as good as 5
# GAN(s, x) ->s
test_GAN_price = GANRegression(args, np.concatenate((train_price, train_feature), -1),
np.concatenate((test_price, test_feature), -1),
pricedim=1, debug=args.debug)
result, _, probs_2 = GAN_WinPrediction_withOutliers(test_GAN_price,
train_feature, train_label, train_price,
test_feature, test_label, test_price,
weight=args.prior_weight,
op_prior=1, op_plot=args.export_plot, op_diff = args.confusion,
op_valid = args.op_valid, op_classifier= args.op_classifier, debug = args.debug)
elif args.op_classifier == 3: # LR_XR (Linear Regression with Expectation Regularization)
result, _, probs_3 = LR_XR_WinPrediction_withOutliers(args,
train_feature, train_label, train_price,
test_feature, test_label, test_price,
class_dim=n_classes,
op_plot=args.export_plot, debug=args.debug)
elif args.op_classifier == 4: # Linear Regression for price detection
reg_model = PriceRegression(train_feature, train_price) # G(x) ->s*
test_LR_price = reg_model.predict(test_feature)
test_LR_price = np.reshape(np.array(test_LR_price), (len(test_LR_price), 1))
result, _, probs_4 = GAN_WinPrediction_withOutliers(test_LR_price,
train_feature, train_label, train_price,
test_feature, test_label, test_price,
weight=args.prior_weight,
op_prior=1, op_plot=args.export_plot,
op_diff=args.confusion,
op_valid=args.op_valid, op_classifier=args.op_classifier,
debug=args.debug)
elif args.op_classifier == 5: # GAN_REG_FUNCTIONAL_WEIGHTS / Our Method (GAN)
# GAN(s, x) ->s*
test_GAN_price = GANRegression(args, np.concatenate((train_price, train_feature), -1),
np.concatenate((test_price, test_feature), -1),
pricedim=1, debug=args.debug)
result, _, probs_5 = GAN_WinPrediction_difffunc_withOutliers(test_GAN_price,
train_feature, train_label,
train_price,
test_feature, test_label, test_price,
op_coeff=args.sigmoid_coeff,
op_plot=args.export_plot,
op_valid=args.op_valid, debug=args.debug)
elif args.op_classifier == 6: # Linear_REG_MANUAL_WEIGHTS (ALPHA, DIFF) / Our Method (Linear Reg)
reg_model = PriceRegression(train_feature, train_price) # G(x) ->s*
test_LR_price = reg_model.predict(test_feature)
test_LR_price = np.reshape(np.array(test_LR_price), (len(test_LR_price), 1))
result, _, probs_6 = GAN_WinPrediction_difffunc_withOutliers(test_LR_price,
train_feature, train_label, train_price,
test_feature, test_label, test_price,
op_coeff=args.sigmoid_coeff,
op_plot=args.export_plot,
op_valid=args.op_valid, debug=args.debug)
if args.op_classifier == 0:
with open('probs/probs_0_{}.txt'.format(i), 'w') as f:
f.write('\n'.join(map(str, probs_0)))
elif args.op_classifier == 5:
with open('probs/probs_5_{}_{}.txt'.format(int(args.sigmoid_coeff), i), 'w') as f:
f.write('\n'.join(map(str, probs_5)))
elif args.op_classifier == 2:
with open('probs/probs_2_{}.txt'.format(i), 'w') as f:
f.write('\n'.join(map(str, probs_2)))
l_result.append(result)
# printing results
if args.op_classifier == 5 or args.op_classifier == 6:
str_output += str(args.op_classifier) + "\t" + str(args.outlier_ratio) + "\t" + "\t" + str(args.sigmoid_coeff) + "\t" +\
str(args.confusion) + "\t" + str(args.sigmoid_coeff) + "\t" + str(args.op_valid) + "\t" + \
str(np.mean(l_result)) + "\t" + str(np.std(l_result)) + "\t" + str(l_result) + "\n"
else:
str_output += str(args.op_classifier) + "\t" + str(args.outlier_ratio) + "\t" + "\t" + str(args.prior_weight) + \
"\t" + str(args.confusion) + "\t" + str(args.prior_weight) + "\t" + str(args.op_valid) + "\t" + \
str(np.mean(l_result)) + "\t" + str(np.std(l_result)) + "\t" + str(l_result) + "\n"
print("###############################################################")
print('##op_classifier\toutlier_ratio\tprior_weight\top_diff\tweight\top_valid\taccuracy\tstd\tall_results')
print(str_output)
| [
"argparse.ArgumentParser",
"numpy.std",
"numpy.genfromtxt",
"numpy.hstack",
"numpy.mean",
"numpy.array",
"numpy.concatenate",
"numpy.repeat"
] | [((733, 795), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Quantitative Evaluation"""'}), "(description='Quantitative Evaluation')\n", (756, 795), False, 'import argparse\n'), ((3278, 3328), 'numpy.genfromtxt', 'genfromtxt', (['"""data/Input_credit.csv"""'], {'delimiter': '""","""'}), "('data/Input_credit.csv', delimiter=',')\n", (3288, 3328), False, 'from numpy import genfromtxt\n'), ((3408, 3455), 'numpy.hstack', 'np.hstack', (['[features[:, :38], features[:, 39:]]'], {}), '([features[:, :38], features[:, 39:]])\n', (3417, 3455), True, 'import numpy as np\n'), ((3503, 3532), 'numpy.array', 'np.array', (['df_label'], {'dtype': 'int'}), '(df_label, dtype=int)\n', (3511, 3532), True, 'import numpy as np\n'), ((4618, 4681), 'numpy.repeat', 'np.repeat', (['all_feature[ids_test[test_idx]]', 'num_repeats'], {'axis': '(0)'}), '(all_feature[ids_test[test_idx]], num_repeats, axis=0)\n', (4627, 4681), True, 'import numpy as np\n'), ((4835, 4896), 'numpy.repeat', 'np.repeat', (['all_label[ids_test[test_idx]]', 'num_repeats'], {'axis': '(0)'}), '(all_label[ids_test[test_idx]], num_repeats, axis=0)\n', (4844, 4896), True, 'import numpy as np\n'), ((5609, 5621), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5617, 5621), True, 'import numpy as np\n'), ((6316, 6328), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6324, 6328), True, 'import numpy as np\n'), ((7089, 7137), 'numpy.concatenate', 'np.concatenate', (['(train_price, train_feature)', '(-1)'], {}), '((train_price, train_feature), -1)\n', (7103, 7137), True, 'import numpy as np\n'), ((7178, 7224), 'numpy.concatenate', 'np.concatenate', (['(test_price, test_feature)', '(-1)'], {}), '((test_price, test_feature), -1)\n', (7192, 7224), True, 'import numpy as np\n'), ((11877, 11893), 'numpy.std', 'np.std', (['l_result'], {}), '(l_result)\n', (11883, 11893), True, 'import numpy as np\n'), ((12212, 12228), 'numpy.std', 'np.std', (['l_result'], {}), '(l_result)\n', (12218, 12228), True, 'import numpy as np\n'), ((8478, 8501), 'numpy.array', 'np.array', (['test_LR_price'], {}), '(test_LR_price)\n', (8486, 8501), True, 'import numpy as np\n'), ((9336, 9384), 'numpy.concatenate', 'np.concatenate', (['(train_price, train_feature)', '(-1)'], {}), '((train_price, train_feature), -1)\n', (9350, 9384), True, 'import numpy as np\n'), ((9425, 9471), 'numpy.concatenate', 'np.concatenate', (['(test_price, test_feature)', '(-1)'], {}), '((test_price, test_feature), -1)\n', (9439, 9471), True, 'import numpy as np\n'), ((11845, 11862), 'numpy.mean', 'np.mean', (['l_result'], {}), '(l_result)\n', (11852, 11862), True, 'import numpy as np\n'), ((12180, 12197), 'numpy.mean', 'np.mean', (['l_result'], {}), '(l_result)\n', (12187, 12197), True, 'import numpy as np\n'), ((10418, 10441), 'numpy.array', 'np.array', (['test_LR_price'], {}), '(test_LR_price)\n', (10426, 10441), True, 'import numpy as np\n')] |
# **************************************************************************** #
# #
# ::: :::::::: #
# linear_mse.py :+: :+: :+: #
# +:+ +:+ +:+ #
# By: ciglesia <<EMAIL>> +#+ +:+ +#+ #
# +#+#+#+#+#+ +#+ #
# Created: 2020/11/23 13:49:26 by ciglesia #+# #+# #
# Updated: 2020/11/23 14:15:25 by ciglesia ### ########.fr #
# #
# **************************************************************************** #
import numpy as np
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent.absolute()))
import ex04.dot as d
import ex00.sum as s
def linear_mse(x, y, theta):
"""
Computes the mean squared error of three non-empty numpy.ndarray,
using a for-loop. The three arrays must have compatible dimensions.
Args:
y: has to be an numpy.ndarray, a vector of dimension m * 1.
x: has to be an numpy.ndarray, a matrix of dimesion m * n.
theta: has to be an numpy.ndarray, a vector of dimension n * 1.
Returns:
The mean squared error as a float.
None if y, x, or theta are empty numpy.ndarray.
None if y, x or theta does not share compatibles dimensions.
Raises:
This function should not raise any Exception.
"""
if (not s.elements(x) or not s.elements(y) or not s.elements(theta)):
return (None)
if (len(list(filter(lambda l: len(l) == len(theta), x))) != len(x)):
return (None)
ss = s.sum_(np.array([(d.dot(theta, i) - j)**2 for i, j in zip(X, Y)]), lambda l: l)
if (ss != None):
return (ss/len(Y))
return (0)
if __name__ == "__main__":
X = np.array([0, 15, -9, 7, 12, 3, -21])
X = np.array([
[ -6, -7, -9],
[ 13, -2, 14],
[ -7, 14, -1],
[ -8, -4, 6],
[ -5, -9, 6],
[ 1, -5, 11],
[ 9, -11, 8]])
Y = np.array([2, 14, -13, 5, 12, 4, -19])
Z = np.array([3,0.5,-6])
print(linear_mse(X, Y, Z))
W = np.array([0,0,0])
print(linear_mse(X, Y, W))
| [
"pathlib.Path",
"numpy.array",
"ex00.sum.elements",
"ex04.dot.dot"
] | [((2040, 2076), 'numpy.array', 'np.array', (['[0, 15, -9, 7, 12, 3, -21]'], {}), '([0, 15, -9, 7, 12, 3, -21])\n', (2048, 2076), True, 'import numpy as np\n'), ((2085, 2194), 'numpy.array', 'np.array', (['[[-6, -7, -9], [13, -2, 14], [-7, 14, -1], [-8, -4, 6], [-5, -9, 6], [1, -5,\n 11], [9, -11, 8]]'], {}), '([[-6, -7, -9], [13, -2, 14], [-7, 14, -1], [-8, -4, 6], [-5, -9, 6\n ], [1, -5, 11], [9, -11, 8]])\n', (2093, 2194), True, 'import numpy as np\n'), ((2262, 2299), 'numpy.array', 'np.array', (['[2, 14, -13, 5, 12, 4, -19]'], {}), '([2, 14, -13, 5, 12, 4, -19])\n', (2270, 2299), True, 'import numpy as np\n'), ((2308, 2330), 'numpy.array', 'np.array', (['[3, 0.5, -6]'], {}), '([3, 0.5, -6])\n', (2316, 2330), True, 'import numpy as np\n'), ((2368, 2387), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (2376, 2387), True, 'import numpy as np\n'), ((1672, 1685), 'ex00.sum.elements', 's.elements', (['x'], {}), '(x)\n', (1682, 1685), True, 'import ex00.sum as s\n'), ((1693, 1706), 'ex00.sum.elements', 's.elements', (['y'], {}), '(y)\n', (1703, 1706), True, 'import ex00.sum as s\n'), ((1714, 1731), 'ex00.sum.elements', 's.elements', (['theta'], {}), '(theta)\n', (1724, 1731), True, 'import ex00.sum as s\n'), ((952, 966), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (956, 966), False, 'from pathlib import Path\n'), ((1878, 1893), 'ex04.dot.dot', 'd.dot', (['theta', 'i'], {}), '(theta, i)\n', (1883, 1893), True, 'import ex04.dot as d\n')] |
#!/usr/bin/env python3
"""
Author : <NAME>, <NAME>, <NAME>, <NAME>
Note : Parts of this code was initially developed by the AgPipeline and TERRA-REF teams.
Date : 2020-07-09
Purpose: Convert FLIR .bin files to .tif (Season 11)
"""
import argparse
import os
import sys
import logging
import json
import numpy as np
import glob
from terrautils.spatial import scanalyzer_to_utm, scanalyzer_to_latlon, geojson_to_tuples
from terrautils.formats import create_geotiff
import matplotlib.pyplot as plt
from osgeo import gdal, osr
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Season 11 flir2tif',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('bin',
metavar='str',
help='Bin file to be converted to TIF')
parser.add_argument('-m',
'--metadata',
help='Cleaned metadata file',
metavar='metadata',
type=str,
required=True)
#default='cleanmetadata_out')
parser.add_argument('-z',
'--zoffset',
help='Z-axis offset',
metavar='z-offset',
type=int,
default=0.76)#0.76
parser.add_argument('-o',
'--outdir',
help='Output directory where .tif files will be saved',
metavar='str',
type=str,
default='flir2tif_out')
args = parser.parse_args()
if '/' not in args.outdir:
args.outdir = args.outdir + '/'
return args
# --------------------------------------------------
def get_boundingbox(metadata, z_offset):
with open(metadata) as f:
meta = json.load(f)['lemnatec_measurement_metadata']
loc_gantry_x = float(meta['sensor_fixed_metadata']['location in camera box x [m]'])
loc_gantry_y = float(meta['sensor_fixed_metadata']['location in camera box y [m]'])
loc_gantry_z = float(meta['sensor_fixed_metadata']['location in camera box z [m]'])
gantry_x = float(meta['gantry_system_variable_metadata']['position x [m]']) + loc_gantry_x
gantry_y = float(meta['gantry_system_variable_metadata']['position y [m]']) + loc_gantry_y
gantry_z = float(meta['gantry_system_variable_metadata']['position z [m]']) + loc_gantry_z + z_offset#offset in m
fov_x, fov_y = float(meta['sensor_fixed_metadata']['field of view x [m]']), float(meta['sensor_fixed_metadata']['field of view y [m]'])
img_height, img_width = 640, 480
B = gantry_z
A_x = np.arctan((0.5*float(fov_x))/2)
A_y = np.arctan((0.5*float(fov_y))/2)
L_x = 2*B*np.tan(A_x)
L_y = 2*B*np.tan(A_y)
x_n = gantry_x + (L_x/2)
x_s = gantry_x - (L_x/2)
y_w = gantry_y + (L_y/2)
y_e = gantry_y - (L_y/2)
bbox_nw_latlon = scanalyzer_to_latlon(x_n, y_w)
bbox_se_latlon = scanalyzer_to_latlon(x_s, y_e)
# TERRA-REF
lon_shift = 0.000020308287
# Drone
lat_shift = 0.000018292 #0.000015258894
b_box = ( bbox_se_latlon[0] - lat_shift,
bbox_nw_latlon[0] - lat_shift,
bbox_nw_latlon[1] + lon_shift,
bbox_se_latlon[1] + lon_shift)
return b_box, img_height, img_width
# --------------------------------------------------
def flirRawToTemperature(rawData, calibP):
shutter_temp = calibP['sensor_variable_metadata']['shutter temperature [K]']
T = float(shutter_temp) - 273.15
P_5_outmean = [1.137440642331793e-11,
-7.151963918140453e-07,
2.040023288027391e-02,
-1.480567234537099e+02]
P_15_outmean = [1.081311914979629e-11,
-7.016010881023338e-07,
2.054630019627413e-02,
-1.521561215301546e+02]
P_20_outmean = [7.884866004076222e-12,
-5.627752964123624e-07,
1.841833557270094e-02,
-1.424489740528044e+02]
P_25_outmean = [9.583147873422692e-12,
-6.411047671547955e-07,
1.957403307722059e-02,
-1.488744387542483e+02]
P_30_outmean = [7.731929583673130e-12,
-5.450000399690083e-07,
1.788280850465480e-02,
-1.397155089900219e+02]
P_35_outmean = [9.979352154351443e-12,
-6.638673059086900e-07,
2.015587753410061e-02,
-1.556220395053390e+02]
P_40_outmean = [1.113388420010232e-11,
-7.376131006851630e-07,
2.162806444290634e-02,
-1.657425341330783e+02]
P_45_outmean = [8.689237696307418e-12,
-6.008401296566917e-07,
1.914217995514052e-02,
-1.514361986681356e+02]
T_list = [5, 15, 20, 25, 30, 35, 40, 45]
a = [P_5_outmean[0], P_15_outmean[0], P_20_outmean[0],
P_25_outmean[0], P_30_outmean[0], P_35_outmean[0],
P_40_outmean[0], P_45_outmean[0]]
b = [P_5_outmean[1], P_15_outmean[1], P_20_outmean[1],
P_25_outmean[1], P_30_outmean[1], P_35_outmean[1],
P_40_outmean[1], P_45_outmean[1]]
c = [P_5_outmean[2], P_15_outmean[2], P_20_outmean[2],
P_25_outmean[2], P_30_outmean[2], P_35_outmean[2],
P_40_outmean[2], P_45_outmean[2]]
d = [P_5_outmean[3], P_15_outmean[3], P_20_outmean[3],
P_25_outmean[3], P_30_outmean[3], P_35_outmean[3],
P_40_outmean[3], P_45_outmean[3]]
# use numpy linear interpolation function to generate calibration coefficients for actual sensor temperature
P_val = [np.interp(T, T_list, a), np.interp(T, T_list, b),
np.interp(T, T_list, c), np.interp(T, T_list, d)]
im = rawData
pxl_temp = P_val[0]*im**3 + P_val[1]*im**2 + P_val[2]*im + P_val[3]
#pxl_temp = pxl_temp.astype(int)
return pxl_temp
# --------------------------------------------------
def main():
"""Create TIF here"""
args = get_args()
if not os.path.isdir(args.outdir):
os.makedirs(args.outdir)
bin_file = args.bin
if bin_file is not None:
with open(args.metadata, 'r') as mdf:
full_md = json.load(mdf)['lemnatec_measurement_metadata']
extractor_info = None
if full_md:
if bin_file is not None:
out_file = os.path.join(args.outdir, bin_file.split('/')[-1].replace(".bin", ".tif"))
gps_bounds_bin, img_height, img_width = get_boundingbox(args.metadata, args.zoffset)
raw_data = np.fromfile(bin_file, np.dtype('<u2')).reshape(
[480, 640]).astype('float')
raw_data = np.rot90(raw_data, 3)
tc = flirRawToTemperature(raw_data, full_md)
create_geotiff(tc, gps_bounds_bin, out_file, None,
True, extractor_info, None, compress=True)
print(f'Done. See output in {args.outdir}')
# --------------------------------------------------
if __name__ == '__main__':
main()
| [
"json.load",
"argparse.ArgumentParser",
"os.makedirs",
"os.path.isdir",
"numpy.dtype",
"numpy.tan",
"terrautils.spatial.scanalyzer_to_latlon",
"numpy.rot90",
"numpy.interp",
"terrautils.formats.create_geotiff"
] | [((649, 767), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Season 11 flir2tif"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Season 11 flir2tif', formatter_class=\n argparse.ArgumentDefaultsHelpFormatter)\n", (672, 767), False, 'import argparse\n'), ((3074, 3104), 'terrautils.spatial.scanalyzer_to_latlon', 'scanalyzer_to_latlon', (['x_n', 'y_w'], {}), '(x_n, y_w)\n', (3094, 3104), False, 'from terrautils.spatial import scanalyzer_to_utm, scanalyzer_to_latlon, geojson_to_tuples\n'), ((3126, 3156), 'terrautils.spatial.scanalyzer_to_latlon', 'scanalyzer_to_latlon', (['x_s', 'y_e'], {}), '(x_s, y_e)\n', (3146, 3156), False, 'from terrautils.spatial import scanalyzer_to_utm, scanalyzer_to_latlon, geojson_to_tuples\n'), ((2897, 2908), 'numpy.tan', 'np.tan', (['A_x'], {}), '(A_x)\n', (2903, 2908), True, 'import numpy as np\n'), ((2923, 2934), 'numpy.tan', 'np.tan', (['A_y'], {}), '(A_y)\n', (2929, 2934), True, 'import numpy as np\n'), ((5916, 5939), 'numpy.interp', 'np.interp', (['T', 'T_list', 'a'], {}), '(T, T_list, a)\n', (5925, 5939), True, 'import numpy as np\n'), ((5941, 5964), 'numpy.interp', 'np.interp', (['T', 'T_list', 'b'], {}), '(T, T_list, b)\n', (5950, 5964), True, 'import numpy as np\n'), ((5989, 6012), 'numpy.interp', 'np.interp', (['T', 'T_list', 'c'], {}), '(T, T_list, c)\n', (5998, 6012), True, 'import numpy as np\n'), ((6014, 6037), 'numpy.interp', 'np.interp', (['T', 'T_list', 'd'], {}), '(T, T_list, d)\n', (6023, 6037), True, 'import numpy as np\n'), ((6314, 6340), 'os.path.isdir', 'os.path.isdir', (['args.outdir'], {}), '(args.outdir)\n', (6327, 6340), False, 'import os\n'), ((6350, 6374), 'os.makedirs', 'os.makedirs', (['args.outdir'], {}), '(args.outdir)\n', (6361, 6374), False, 'import os\n'), ((1974, 1986), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1983, 1986), False, 'import json\n'), ((6499, 6513), 'json.load', 'json.load', (['mdf'], {}), '(mdf)\n', (6508, 6513), False, 'import json\n'), ((7021, 7042), 'numpy.rot90', 'np.rot90', (['raw_data', '(3)'], {}), '(raw_data, 3)\n', (7029, 7042), True, 'import numpy as np\n'), ((7130, 7227), 'terrautils.formats.create_geotiff', 'create_geotiff', (['tc', 'gps_bounds_bin', 'out_file', 'None', '(True)', 'extractor_info', 'None'], {'compress': '(True)'}), '(tc, gps_bounds_bin, out_file, None, True, extractor_info,\n None, compress=True)\n', (7144, 7227), False, 'from terrautils.formats import create_geotiff\n'), ((6912, 6927), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (6920, 6927), True, 'import numpy as np\n')] |
from unittest import TestCase
import numpy as np
from nirmapper.camera import Camera
from nirmapper.model import Texture
class TestTexture(TestCase):
def setUp(self):
# Create Cam1
location = [0, 7, 0]
rotation = [-90, 180, 0]
focal_length = 35
sensor_width = 32
sensor_height = 18
screen_width = 1920
screen_height = 1080
cam1 = Camera(focal_length, screen_width, screen_height, sensor_width, sensor_height, location, rotation,
"EULER")
texture = Texture('/fake_path', cam1)
texture.visible_vertices = np.array([
[1, 1, -1],
[-1, 1, 1],
[1, 1, 1],
[1, 1, -1],
[-1, 1, -1],
[-1, 1, 1]
])
texture.normal_indices = np.array(
[[0, 7, 4],
[0, 3, 7]
])
texture.uv_coords = np.array(
[[0.31770833, 0.17592593],
[0.68229167, 0.82407407],
[0.31770833, 0.82407407],
[0.31770833, 0.17592593],
[0.68229167, 0.17592593],
[0.68229167, 0.82407407]]
)
texture.arange_uv_indices()
texture.verts_indices = [0, 7, 4, 0, 3, 7]
texture.counts = [667, 665]
texture.vis_triangle_indices = [5, 11]
texture.duplicate_triangle_indices = [11]
self.texture = texture
def test_remove_triangle_with_index(self):
expected_visible_vertices = np.array([
[1, 1, -1],
[-1, 1, 1],
[1, 1, 1]
])
expected_uv_coords = np.array(
[[0.31770833, 0.17592593],
[0.68229167, 0.82407407],
[0.31770833, 0.82407407]]
)
expected_vis_vert_indices = np.array([[0, 7, 4]])
expected_normal_indices = np.array([[0, 7, 4]])
expected_uv_indices = np.array([0, 1, 2])
expected_counts = [667]
expected_vis_triangle_ids = [5]
expected_dup_triangle_ids = []
self.texture.remove_triangle_with_index(11)
try:
np.testing.assert_equal(self.texture.visible_vertices, expected_visible_vertices)
np.testing.assert_equal(self.texture.verts_indices, expected_vis_vert_indices)
np.testing.assert_equal(self.texture.normal_indices, expected_normal_indices)
np.testing.assert_equal(self.texture.uv_coords, expected_uv_coords)
np.testing.assert_equal(self.texture.uv_indices, expected_uv_indices)
np.testing.assert_equal(self.texture.counts, expected_counts)
np.testing.assert_equal(self.texture.vis_triangle_indices, expected_vis_triangle_ids)
np.testing.assert_equal(self.texture.duplicate_triangle_indices, expected_dup_triangle_ids)
res = True
except AssertionError as err:
res = False
print(err)
self.assertTrue(res)
| [
"numpy.array",
"nirmapper.model.Texture",
"nirmapper.camera.Camera",
"numpy.testing.assert_equal"
] | [((413, 524), 'nirmapper.camera.Camera', 'Camera', (['focal_length', 'screen_width', 'screen_height', 'sensor_width', 'sensor_height', 'location', 'rotation', '"""EULER"""'], {}), "(focal_length, screen_width, screen_height, sensor_width,\n sensor_height, location, rotation, 'EULER')\n", (419, 524), False, 'from nirmapper.camera import Camera\n'), ((562, 589), 'nirmapper.model.Texture', 'Texture', (['"""/fake_path"""', 'cam1'], {}), "('/fake_path', cam1)\n", (569, 589), False, 'from nirmapper.model import Texture\n'), ((625, 712), 'numpy.array', 'np.array', (['[[1, 1, -1], [-1, 1, 1], [1, 1, 1], [1, 1, -1], [-1, 1, -1], [-1, 1, 1]]'], {}), '([[1, 1, -1], [-1, 1, 1], [1, 1, 1], [1, 1, -1], [-1, 1, -1], [-1, \n 1, 1]])\n', (633, 712), True, 'import numpy as np\n'), ((824, 856), 'numpy.array', 'np.array', (['[[0, 7, 4], [0, 3, 7]]'], {}), '([[0, 7, 4], [0, 3, 7]])\n', (832, 856), True, 'import numpy as np\n'), ((926, 1102), 'numpy.array', 'np.array', (['[[0.31770833, 0.17592593], [0.68229167, 0.82407407], [0.31770833, \n 0.82407407], [0.31770833, 0.17592593], [0.68229167, 0.17592593], [\n 0.68229167, 0.82407407]]'], {}), '([[0.31770833, 0.17592593], [0.68229167, 0.82407407], [0.31770833, \n 0.82407407], [0.31770833, 0.17592593], [0.68229167, 0.17592593], [\n 0.68229167, 0.82407407]])\n', (934, 1102), True, 'import numpy as np\n'), ((1517, 1562), 'numpy.array', 'np.array', (['[[1, 1, -1], [-1, 1, 1], [1, 1, 1]]'], {}), '([[1, 1, -1], [-1, 1, 1], [1, 1, 1]])\n', (1525, 1562), True, 'import numpy as np\n'), ((1638, 1731), 'numpy.array', 'np.array', (['[[0.31770833, 0.17592593], [0.68229167, 0.82407407], [0.31770833, 0.82407407]]'], {}), '([[0.31770833, 0.17592593], [0.68229167, 0.82407407], [0.31770833, \n 0.82407407]])\n', (1646, 1731), True, 'import numpy as np\n'), ((1812, 1833), 'numpy.array', 'np.array', (['[[0, 7, 4]]'], {}), '([[0, 7, 4]])\n', (1820, 1833), True, 'import numpy as np\n'), ((1868, 1889), 'numpy.array', 'np.array', (['[[0, 7, 4]]'], {}), '([[0, 7, 4]])\n', (1876, 1889), True, 'import numpy as np\n'), ((1920, 1939), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (1928, 1939), True, 'import numpy as np\n'), ((2131, 2216), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['self.texture.visible_vertices', 'expected_visible_vertices'], {}), '(self.texture.visible_vertices,\n expected_visible_vertices)\n', (2154, 2216), True, 'import numpy as np\n'), ((2225, 2303), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['self.texture.verts_indices', 'expected_vis_vert_indices'], {}), '(self.texture.verts_indices, expected_vis_vert_indices)\n', (2248, 2303), True, 'import numpy as np\n'), ((2316, 2393), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['self.texture.normal_indices', 'expected_normal_indices'], {}), '(self.texture.normal_indices, expected_normal_indices)\n', (2339, 2393), True, 'import numpy as np\n'), ((2406, 2473), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['self.texture.uv_coords', 'expected_uv_coords'], {}), '(self.texture.uv_coords, expected_uv_coords)\n', (2429, 2473), True, 'import numpy as np\n'), ((2486, 2555), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['self.texture.uv_indices', 'expected_uv_indices'], {}), '(self.texture.uv_indices, expected_uv_indices)\n', (2509, 2555), True, 'import numpy as np\n'), ((2568, 2629), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['self.texture.counts', 'expected_counts'], {}), '(self.texture.counts, expected_counts)\n', (2591, 2629), True, 'import numpy as np\n'), ((2642, 2731), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['self.texture.vis_triangle_indices', 'expected_vis_triangle_ids'], {}), '(self.texture.vis_triangle_indices,\n expected_vis_triangle_ids)\n', (2665, 2731), True, 'import numpy as np\n'), ((2740, 2835), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['self.texture.duplicate_triangle_indices', 'expected_dup_triangle_ids'], {}), '(self.texture.duplicate_triangle_indices,\n expected_dup_triangle_ids)\n', (2763, 2835), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Code for performing reconstruction using pmvs2
import numpy
import pathlib
from pathlib import Path
import os
import inspect
pwd = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))
pmvs2Path = Path(pwd) / 'extern/CMVS-PMVS/program/main/pmvs2'
assert pmvs2Path.is_file(), "pmvs2 binary not found. Try running bootstrap.sh?"
from .load_camera_info import load_intrinsics, load_extrinsics
from .load_ply import load_ply
def set_up_visualize_subdirectory(images=None, inputPath=None, destPath=None):
"""
Create the "visualize" subdirectory required by PMVS.
This directory contains the actual source images.
Inputs:
inputPath -- full path to a directory containing undistorted images
destPath -- full path to a directory where the "visualize" subdir will be
created
"""
assert destPath is not None, "destPath is a required argument!"
assert (images is None)!=(inputPath is None), 'Please pass either "images" or "inputPath" but not both!'
visualizePath = destPath / 'visualize'
if not visualizePath.is_dir():
visualizePath.mkdir()
print('Setting up visualize subdirectory in ' + str(visualizePath)+'...')
if images is not None:
from PIL import Image
for i,image in enumerate(images):
assert len(image.shape) in (2,3), 'image shape does not make sense for an image!'
# Single channel image, needs to be converted to color image or PMVS2 will complain!
if len(image.shape)==2 or image.shape[2]==1:
import cv2
color_image = cv2.cvtColor(image,cv2.COLOR_GRAY2RGB)
else:
assert image.shape[2]==3, 'image has more than one but not 3 channels!'
color_image = image
# PMVS takes .ppm files... maybe png too, but we don't want compression overhead
destFilename = "%08i.ppm"%(i)
destFilePath = visualizePath / destFilename
Image.fromarray(color_image).save(destFilePath)
else:
# use imageMagick to convert the format of the files to .ppm
import glob
numCameras = len(list((inputPath.glob("*.png"))))
for i in range(numCameras):
sourceFilename = "image_camera%02i.png"%(i+1)
destFilename = "%08i.ppm"%(i)
sourcePath = inputPath / sourceFilename
destFilePath = visualizePath / destFilename
# Call image magick as a binary to convert the images
args = ['convert',str(sourcePath),str(destFilePath)]
print('Running command: ' + ' '.join(args)+' ...')
import subprocess
subprocess.check_output(args=args)
def set_up_txt_subdirectory(inputPath=None,all_camera_parameters=None,destPath=None):
""" Generate the txt/*.txt files that PMVS uses to
input the projection matrices for the images it runs on.
Input:
inputPath -- The directory containing the undistorted images,
and HALCON text files containing the intrinsics
and extrinsics of the images.
The names of the .txt files must be based on
names of the image files.
destPath -- full path to a directory where the "txt" subdir will be
created
"""
assert destPath is not None, "destPath is a required argument!"
assert (inputPath is None)!=(all_camera_parameters is None), 'Please pass either "images" or "inputPath" but not both!'
txtPath = destPath / 'txt'
if not txtPath.is_dir():
txtPath.mkdir()
if all_camera_parameters is None:
from load_camera_info import load_all_camera_parameters
all_camera_parameters = load_all_camera_parameters(inputPath, throw_error_if_radial_distortion=True)
numCameras = len(all_camera_parameters)
for i in range(numCameras):
camera_parameters = all_camera_parameters[i]
cameraMatrix = camera_parameters['camera_matrix']
R = camera_parameters['R']
T = camera_parameters['T']
# Compute the projection matrix pmvs2 wants,
# which combines intrinsics and extrinsics
temp = numpy.hstack((R,numpy.reshape(T,(3,1)))) # 3x4
P = numpy.dot(cameraMatrix,temp) # 3x4
outFilePath = txtPath / ('%08i.txt'%i)
numpy.savetxt(str(outFilePath),P,'%f',header='CONTOUR',comments='')
class PMVS2Options():
""" This class represents most of the user-supplied options to PMVS2.
It has sane default arguments that you can individually over-ride
when you instantiate it.
For argument descriptions see http://www.di.ens.fr/pmvs/documentation.html
"""
def __init__(self,
numCameras,
level=2, # 0 is full resolution
csize=2, # cell size
threshold=0.5,
wsize=8, # window size
minImageNum=2,
#CPU=8, # For a quad core with hyperthreading
#CPU=20, # For a 20 core with hyperthreading
CPU=40, # For a 20 core with hyperthreading
#CPU=80, # For a 20 core with hyperthreading
#CPU=10, # For a 20 core with hyperthreading
#CPU=20, # For a 20 core with hyperthreading
#CPU=1, # For a 20 core with hyperthreading
useVisData=1,
sequence=-1,
timages=None,
oimages=0,
numNeighbors=2):
self.level = level
self.csize = csize
self.threshold = threshold
self.wsize = wsize
self.minImageNum = minImageNum
self.CPU = CPU
self.useVisData = useVisData
self.sequence = sequence
self.numNeighbors = numNeighbors
if timages is None:
self.timages = (-1, 0, numCameras)
else:
self.timages = timages
self.oimages = oimages
def __hash__(self):
fields = list(self.__dict__.items())
fields.sort()
return hash(tuple(fields)) #
def write_options_file(self,
optionsDir=Path('.'),
optionsFile=Path('option.txt')):
optionsFilePath = optionsDir / optionsFile
with optionsFilePath.open('w') as fd:
for key,val in vars(self).items():
if key == 'numNeighbors':
continue # numNeighbors doesn't go in the options file!
if type(val) in (int,float):
fd.write('%s %s\n'%(key,str(val)))
continue
if type(val) in (list,tuple):
fd.write(key + ' ' + ' '.join(map(str, val)) + '\n')
continue
def write_vis_file_ring(numCameras,numNeighbors=1,visFilePath=Path('vis.dat')):
""" Generate a vis.dat file that pmvs2 expects for a camera array with
ring topology and a configurable number of neighbors to be used for reconstruction.
Inputs:
numCameras -- The number of cameras in the ring
numNeighbors -- For any camera, the number of other adjacent cameras to use for matching
i.e. 1 for stereo, 2 for trinocular...
"""
with visFilePath.open('w') as fd:
fd.write('VISDATA\n')
fd.write(str(numCameras)+'\n')
assert(numNeighbors >= 1)
assert(numNeighbors+1)
for center_camera in range(numCameras):
numPositiveNeighbors = int(numNeighbors)//2 + numNeighbors%2
numNegativeNeighbors = int(numNeighbors)//2
fd.write(str(center_camera)+' ')
fd.write(str(numNeighbors)+' ')
for i in range(numPositiveNeighbors):
neighbor_camera = (center_camera+i+1)%numCameras
fd.write(str(neighbor_camera) + ' ')
for i in range(numNegativeNeighbors):
neighbor_camera = (center_camera-i-1)%numCameras
fd.write(str(neighbor_camera) + ' ')
fd.write('\n')
def write_vis_file_sphere(numCameras, visFilePath=None, destPath=None, match_between_pairs=True):
""" Generate a vis.dat file that pmvs2 expects for a camera with
cameras arranged in stereo pairs.
Can also work for a ring as a special case, but there would be no matching between the pairs.
Inputs:
numCameras -- The number of cameras in the ring
visFilePath -- Path of the vis file to be created. Pass this or:
destPath -- Path of the directory where the vis.dat file will be created.
"""
assert (visFilePath is None) != (destPath is None), 'Please pass one of visFilePath or destPath!'
visFileName=Path('vis.dat')
if visFilePath is None:
visFilePath = destPath / visFileName
assert numCameras%2==0, "write_vis_file_sphere expects an even number of cameras!"
with visFilePath.open('w') as fd:
fd.write('VISDATA\n')
fd.write(str(numCameras)+'\n')
if match_between_pairs:
#above_pairings = ((1,8),(2,7),(3,12),(4,11),(5,10),(6,9)) # one based camera indeces
above_pairings = ((0,7),(1,6),(2,11),(3,10),(4,9),(5,8)) # zero based camera indeces
below_pairings = ( # Manually re-order by camera index
(6,1),
(7,0),
(8,5),
(9,4),
(10,3),
(11,2),
)
for camera_index in range(numCameras):
fd.write(str(camera_index)+' ')
numNeighbors=3
fd.write(str(numNeighbors)+' ')
if camera_index < 6:
fd.write(str((camera_index-1)%6)+' ')
fd.write(str((camera_index+1)%6)+' ')
fd.write(str(above_pairings[camera_index][1])+' ')
else:
fd.write(str((camera_index-6-1)%6+6)+' ')
fd.write(str((camera_index-6+1)%6+6)+' ')
fd.write(str(below_pairings[camera_index-6][1])+' ')
fd.write('\n')
else:
for camera_index in range(numCameras):
fd.write(str(camera_index)+' ')
numNeighbors=1
fd.write(str(numNeighbors)+' ')
if camera_index%2==0:
fd.write(str(camera_index+1)+' ')
else:
fd.write(str(camera_index-1)+' ')
fd.write('\n')
def set_up_pmvs_tree(images=None, all_camera_parameters=None, inputPath=None, destPath=None, options=None):
""" Set up a PMVS style file tree in destPath.
inputPath contains images and HALCON camera parameter files."""
assert destPath is not None, "set_up_pmvs_tree requires a destination path!"
assert (images is None)==(all_camera_parameters is None), "Please pass both or neither of images and all_camera_parameters"
assert (images is None)!=(inputPath is None), 'Please pass either "images" or "inputPath" but not both!'
set_up_visualize_subdirectory(images=images,inputPath=inputPath,destPath=destPath)
set_up_txt_subdirectory(inputPath=inputPath,destPath=destPath)
# Generate the empty directory where pmvs puts its ply files
modelsDir = destPath / 'models'
if not modelsDir.is_dir():
modelsDir.mkdir()
if all_camera_parameters is not None:
numCameras = len(all_camera_parameters)
else:
numCameras = len(list(inputPath.glob('*.png')))
# Generate PMVS options file
if options is None:
options = PMVS2Options(numCameras=numCameras)
options.write_options_file(optionsDir=destPath,
optionsFile='option.txt')
# Generate PMVS vis.dat file
write_vis_file_ring(numCameras=numCameras,
numNeighbors=options.numNeighbors,
visFilePath=destPath / 'vis.dat')
def run_pmvs(imagesPath, destDir=None, destFile=None, options=None, workDirectory=None, runtimeFile=None):
""" Run PMVS2 on a directory full of images.
The images must ALREADY be radially undistorted!
This single function interface is convenient, but does all I/O every time.
Arguments:
imagesPath -- A directory full of source images
destDir -- The destination directory of the ply file. (default current directory)
destFile -- The destination name of the ply file. (default <name of the directory>.ply)
options -- An instance of PMVS2Options
workDirectory -- Existing directory where pmvs will work. (default generates a temp directory)
runtimeFile -- The name of a file where info regarding the runtime will be stored.
"""
# By default, work in a temporary directory.
# "with...as" ensures the temp directory is cleared even if there is an error below.
if workDirectory is None:
from tempfile import TemporaryDirectory
with TemporaryDirectory(dir=str(Path(pwd)/'tmp')) as workDirectory:
run_pmvs(imagesPath=imagesPath,
destDir=destDir,
destFile=destFile,
options=options,
runtimeFile=runtimeFile,
workDirectory=Path(workDirectory))
return
if not workDirectory.is_dir():
workDirectory.mkdir()
imagesPath = imagesPath.resolve()
set_up_pmvs_tree(inputPath=imagesPath,
destPath=workDirectory,
options=options)
# Run PMVS2
import subprocess
from time import time
args = [str(pmvs2Path), './', str('option.txt')] # Careful! That damn slash after the dot is CRITICAL
print('Running command ', ' '.join(args))
t1 = time()
#result = subprocess.run(args=args, cwd=str(workDirectory), stdout=subprocess.PIPE) # Python 3.5
#stdout = result.stdout
#returncode = result.returncode
proc = subprocess.Popen(args=args, cwd=str(workDirectory), stdout=subprocess.PIPE) # Python 3.4
stdout, stderr = proc.communicate()
returncode = proc.returncode
t2 = time()
dt = t2-t1 # seconds. TODO: scrape more accurate timing from PMVS shell output
print("pmvs2 output:")
print(stdout.decode('utf8'))
if returncode != 0:
print("WARNING! pmvs2 returned a non-zero return value!")
# Copy the file to the appropriate destination
if destDir is None:
destDir = Path.cwd()
if destFile is None:
destFile = 'reconstruction.ply'
destPath = destDir / destFile
if runtimeFile is None:
runtimeFile = destPath.parent / (destPath.stem +'_runtime.txt')
with open(str(runtimeFile), 'w') as fd:
fd.write(str(dt)) # seconds
modelsDir = workDirectory / 'models'
plyPath = modelsDir / Path('option.txt' + '.ply')
if plyPath.is_file():
plyPath.rename(destPath)
else:
print(".ply file wasn't generated!")
print('modelsDir: ' + str(modelsDir))
print('plyPath: ' + str(plyPath))
assert False
class PMVS2StereoMatcher():
""" Wrapper class that calls PMVS2 on an array of cameras
Usage: Re-instantiate each time the camera geometry changes with a new calibration_path.
For each reconstruction, call either run_from_memory or run_from_disk depending on your use case.
"""
def __init__(self,
options=None,
calibration_path=None,
all_camera_parameters=None,
work_directory=Path('pmvs2_work_directory'),
):
assert (calibration_path is None) != (all_camera_parameters is None), "Please pass exactly one of all_camera_parameters or calibration_path!"
if calibration_path is not None:
from .load_camera_info import load_all_camera_parameters
self.all_camera_parameters = load_all_camera_parameters(calibration_path)
else:
self.all_camera_parameters = all_camera_parameters
self.num_cameras = len(all_camera_parameters)
if options is None:
self.options = PMVS2Options(numCameras=self.num_cameras)
else:
self.options = options
if not work_directory.is_dir():
work_directory.mkdir()
self.work_directory = work_directory
# set_up_pmvs_tree
set_up_txt_subdirectory(all_camera_parameters=all_camera_parameters,destPath=work_directory)
# Generate the empty directory where pmvs puts its ply files
modelsDir = work_directory / 'models'
if not modelsDir.is_dir():
modelsDir.mkdir()
self.options.write_options_file(optionsDir=work_directory)
# TODO I didn't do good bookkeeping yet in the database for the topology.
# There should be a list of sensible matching topologies in all_camera_parameters,
# and it shoud be added to the on-disk format.
write_vis_file_sphere(self.num_cameras, destPath=work_directory, match_between_pairs=True)
def run_from_memory(self, images, foreground_masks=None, dump_ply_files=False):
"""
Run PMVS2 on images that are already in memory.
Unfortunately, I still have to dump to disk, but at least I can hit the disk
as little as possible.
Inputs:
images -- The ALREADY UNDISTORTED images
foreground_masks -- unused, for API compatibility
dump_ply_files -- also unused, for API compatiblity
"""
# TODO Blow away previous images just to be safe?
# TODO Blow away previous reconstruction just to be safe?
# Dump out the new images into the already existing PMVS2 file tree
set_up_visualize_subdirectory(images=images, destPath=self.work_directory)
# Run PMVS2
import subprocess
from time import time
args = [str(pmvs2Path), './', str('option.txt')] # Careful! That damn slash after the dot is CRITICAL
print('Running command ', ' '.join(args))
t1 = time()
proc = subprocess.Popen(args=args, cwd=str(self.work_directory), stdout=subprocess.PIPE) # Python 3.4
stdout, stderr = proc.communicate()
t2 = time()
dt = t2-t1
returncode = proc.returncode
print("pmvs2 output:")
print(stdout.decode('utf8'))
if returncode != 0:
print("WARNING! pmvs2 returned a non-zero return value!")
# Load the ply file from disk and return the xyz part
modelsDir = self.work_directory / 'models'
plyPath = modelsDir / Path('option.txt' + '.ply')
assert plyPath.is_file(), 'PMVS2 did not generate a .ply file!'
data, columnnames, columntypes = load_ply(plyPath, enableCaching=False)
assert columnnames[0:3] == ['x', 'y', 'z']
xyz = data[:, 0:3]
return xyz,dt
# Some hard-coded options, roughly slow to fast
pmvsOptionsDict = {
#'pmvs_tuned1': PMVS2Options(minImageNum=3,
#CPU=7,
#useVisData=1,
#numNeighbors=2,
#oimages=0,
#sequence=-1,
#wsize=8,
#numCameras=12,
#timages=None,
#level=3,
#threshold=0.7,
#csize=6),
#'pmvs_tuned2': PMVS2Options(numCameras=12,
#level=3,
#csize=5,
#threshold=0.6,
#wsize=6,
#minImageNum=3,
#CPU=7,
#useVisData=1,
#sequence=-1,
#timages=None,
#oimages=0,
#numNeighbors=2),
'pmvs_medium': PMVS2Options(
numCameras=12, level=1, csize=4,
numNeighbors=2)
#,
#'pmvs_2_2_1': PMVS2Options(
#numCameras=12, level=2, csize=2,
#numNeighbors=1),
#'pmvs_2_4_1': PMVS2Options(
#numCameras=12, level=2, csize=4,
#numNeighbors=1),
#'pmvs_2_8_1': PMVS2Options(
#numCameras=12, level=2, csize=8,
#numNeighbors=1),
#'pmvs_2_2_2': PMVS2Options(
#numCameras=12, level=2, csize=2,
#numNeighbors=2),
#'pmvs_2_4_2': PMVS2Options(
#numCameras=12, level=2, csize=4,
#numNeighbors=2),
#'pmvs_2_8_2': PMVS2Options(
#numCameras=12, level=2, csize=8,
#numNeighbors=2),
#'pmvs_1_4_2': PMVS2Options(
#numCameras=12, level=1, csize=4,
#numNeighbors=2)
#,
#'pmvs_0_4_2': PMVS2Options(
#numCameras=12, level=0, csize=4,
#numNeighbors=2
#) # Used for generating the references (followed by hand cleanup)
}
pmvsOptionNames = pmvsOptionsDict.keys()
if __name__=='__main__':
print('Attempting to run a reconstruction using pmvs')
imagesPath = Path('data/undistorted_images/2016_10_24__17_43_02')
workDirectory=Path('working_directory_pmvs')
#options = pmvsOptionsDict['pmvs_2_2_1']
options = pmvsOptionsDict['pmvs_medium']
#options = pmvsOptionsDict['pmvs_tuned1']
run_pmvs(imagesPath, workDirectory=workDirectory, options=options)
#run_pmvs(imagesPath, options=options) # to te
| [
"cv2.cvtColor",
"subprocess.check_output",
"time.time",
"pathlib.Path",
"numpy.reshape",
"PIL.Image.fromarray",
"numpy.dot",
"pathlib.Path.cwd",
"inspect.stack",
"load_camera_info.load_all_camera_parameters"
] | [((227, 236), 'pathlib.Path', 'Path', (['pwd'], {}), '(pwd)\n', (231, 236), False, 'from pathlib import Path\n'), ((6901, 6916), 'pathlib.Path', 'Path', (['"""vis.dat"""'], {}), "('vis.dat')\n", (6905, 6916), False, 'from pathlib import Path\n'), ((8764, 8779), 'pathlib.Path', 'Path', (['"""vis.dat"""'], {}), "('vis.dat')\n", (8768, 8779), False, 'from pathlib import Path\n'), ((13819, 13825), 'time.time', 'time', ([], {}), '()\n', (13823, 13825), False, 'from time import time\n'), ((14174, 14180), 'time.time', 'time', ([], {}), '()\n', (14178, 14180), False, 'from time import time\n'), ((21182, 21234), 'pathlib.Path', 'Path', (['"""data/undistorted_images/2016_10_24__17_43_02"""'], {}), "('data/undistorted_images/2016_10_24__17_43_02')\n", (21186, 21234), False, 'from pathlib import Path\n'), ((21253, 21283), 'pathlib.Path', 'Path', (['"""working_directory_pmvs"""'], {}), "('working_directory_pmvs')\n", (21257, 21283), False, 'from pathlib import Path\n'), ((3769, 3845), 'load_camera_info.load_all_camera_parameters', 'load_all_camera_parameters', (['inputPath'], {'throw_error_if_radial_distortion': '(True)'}), '(inputPath, throw_error_if_radial_distortion=True)\n', (3795, 3845), False, 'from load_camera_info import load_all_camera_parameters\n'), ((4284, 4313), 'numpy.dot', 'numpy.dot', (['cameraMatrix', 'temp'], {}), '(cameraMatrix, temp)\n', (4293, 4313), False, 'import numpy\n'), ((6228, 6237), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (6232, 6237), False, 'from pathlib import Path\n'), ((6278, 6296), 'pathlib.Path', 'Path', (['"""option.txt"""'], {}), "('option.txt')\n", (6282, 6296), False, 'from pathlib import Path\n'), ((14508, 14518), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (14516, 14518), False, 'from pathlib import Path\n'), ((14867, 14894), 'pathlib.Path', 'Path', (["('option.txt' + '.ply')"], {}), "('option.txt' + '.ply')\n", (14871, 14894), False, 'from pathlib import Path\n'), ((15576, 15604), 'pathlib.Path', 'Path', (['"""pmvs2_work_directory"""'], {}), "('pmvs2_work_directory')\n", (15580, 15604), False, 'from pathlib import Path\n'), ((18071, 18077), 'time.time', 'time', ([], {}), '()\n', (18075, 18077), False, 'from time import time\n'), ((18245, 18251), 'time.time', 'time', ([], {}), '()\n', (18249, 18251), False, 'from time import time\n'), ((2687, 2721), 'subprocess.check_output', 'subprocess.check_output', ([], {'args': 'args'}), '(args=args)\n', (2710, 2721), False, 'import subprocess\n'), ((15923, 15967), 'load_camera_info.load_all_camera_parameters', 'load_all_camera_parameters', (['calibration_path'], {}), '(calibration_path)\n', (15949, 15967), False, 'from load_camera_info import load_all_camera_parameters\n'), ((18618, 18645), 'pathlib.Path', 'Path', (["('option.txt' + '.ply')"], {}), "('option.txt' + '.ply')\n", (18622, 18645), False, 'from pathlib import Path\n'), ((191, 206), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (204, 206), False, 'import inspect\n'), ((1618, 1657), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_GRAY2RGB'], {}), '(image, cv2.COLOR_GRAY2RGB)\n', (1630, 1657), False, 'import cv2\n'), ((4240, 4264), 'numpy.reshape', 'numpy.reshape', (['T', '(3, 1)'], {}), '(T, (3, 1))\n', (4253, 4264), False, 'import numpy\n'), ((2002, 2030), 'PIL.Image.fromarray', 'Image.fromarray', (['color_image'], {}), '(color_image)\n', (2017, 2030), False, 'from PIL import Image\n'), ((13326, 13345), 'pathlib.Path', 'Path', (['workDirectory'], {}), '(workDirectory)\n', (13330, 13345), False, 'from pathlib import Path\n'), ((13049, 13058), 'pathlib.Path', 'Path', (['pwd'], {}), '(pwd)\n', (13053, 13058), False, 'from pathlib import Path\n')] |
import numpy as np
def Minax(X,Y):
new_X = np.array(X)
new_Y = np.array(Y)
arg_max = np.argmax(new_Y)
arg_min = np.argmin(new_Y)
y_max,x_max = new_Y[arg_max],new_X[arg_max]
y_min,x_min = new_Y[arg_min],new_X[arg_min]
###
sorted_Y = np.sort(new_Y)
first_min, second_min, third_min, fourth_min, fifth_min = sorted_Y[0], sorted_Y[1], sorted_Y[2], sorted_Y[3],sorted_Y[4]
first_max, second_max, third_max, fourth_max, fifth_max = sorted_Y[-1],sorted_Y[-2],sorted_Y[-3],sorted_Y[-4],sorted_Y[-5]
arg_first_max = Y.index(first_max)
arg_second_max = Y.index(second_max)
arg_third_max = Y.index(third_max)
arg_fourth_max = Y.index(fourth_max)
arg_fifth_max = Y.index(fifth_max)
arg_first_min = Y.index(first_min)
arg_second_min = Y.index(second_min)
arg_third_min = Y.index(third_min)
arg_fourth_min = Y.index(fourth_min)
arg_fifth_min = Y.index(fifth_min)
###
return ('TOP 5 MAX:', (new_X[arg_first_max], new_Y[arg_first_max]),
(new_X[arg_second_max],new_Y[arg_second_max]),
(new_X[arg_third_max], new_Y[arg_third_max]),
(new_X[arg_fourth_max], new_Y[arg_fourth_max]),
(new_X[arg_fifth_max], new_Y[arg_fifth_max]),
'TOP 5 MIN:', (new_X[arg_first_min], new_Y[arg_first_min]),
(new_X[arg_second_min],new_Y[arg_second_min]),
(new_X[arg_third_min], new_Y[arg_third_min]),
(new_X[arg_fourth_min], new_Y[arg_fourth_min]),
(new_X[arg_fifth_min], new_Y[arg_fifth_min]),
)
| [
"numpy.sort",
"numpy.array",
"numpy.argmin",
"numpy.argmax"
] | [((48, 59), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (56, 59), True, 'import numpy as np\n'), ((72, 83), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (80, 83), True, 'import numpy as np\n'), ((103, 119), 'numpy.argmax', 'np.argmax', (['new_Y'], {}), '(new_Y)\n', (112, 119), True, 'import numpy as np\n'), ((134, 150), 'numpy.argmin', 'np.argmin', (['new_Y'], {}), '(new_Y)\n', (143, 150), True, 'import numpy as np\n'), ((280, 294), 'numpy.sort', 'np.sort', (['new_Y'], {}), '(new_Y)\n', (287, 294), True, 'import numpy as np\n')] |
import numpy as np
import os
import re
import itertools
import json
import requests
import scipy.sparse as sp
import pickle
from collections import Counter
from nltk.corpus import stopwords
from tqdm import tqdm
import ast
from hybrid_xml import arr_length
cachedStopWords = stopwords.words("english")
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold, StratifiedKFold, StratifiedShuffleSplit, ShuffleSplit
def clean_str(string):
# remove stopwords
# string = ' '.join([word for word in string.split() if word not in cachedStopWords])
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def pad_sentences(sentences, padding_word="<PAD/>", max_length=500):
sequence_length = min(max(len(x) for x in sentences), max_length)
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
if len(sentence) < max_length:
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
else:
new_sentence = sentence[:max_length]
padded_sentences.append(new_sentence)
return padded_sentences
def load_data_and_labels(data):
x_text = [clean_str(doc['text']) for doc in data]
x_text = [s.split(" ") for s in x_text]
# labels = [doc['catgy'] for doc in data]
labels = []
# for doc in data:
# labels.append([tuple(tuple(a) for a in doc['catgy'])])
# labels = [doc['catgy'][0] +[x + 19 for x in doc['catgy'][1]] for doc in data] # add 18 to the second object to have a 1d array
# for label in labels:
labels = []
for doc in data:
tmp_list = []
# tmp_list =doc['catgy'][0]
# tmp_list.append(18)
for i in range(len(doc['catgy'])):
tmp_list += [x + 19 * (i) for x in doc['catgy'][i]]
# tmp_list.append(doc['catgy'][i]+19*i)
tmp_list.append(18 + 19 * (i))
labels.append(tmp_list)
row_idx, col_idx, val_idx = [], [], []
for i in tqdm(range(len(labels))):
l_list = list(set(labels[i])) # remove duplicate cateories to avoid double count
# l_list = labels[i] # remove duplicate cateories to avoid double count
# l_list = {tuple(i) for i in labels}
for y in l_list:
row_idx.append(i)
col_idx.append(y)
val_idx.append(1)
m = max(row_idx) + 1
n = max(col_idx) + 1
n= arr_length # TODO: make this adaptive
# n = max([max(x) for x in col_idx])
Y = sp.csr_matrix((val_idx, (row_idx, col_idx)), shape=(m, n))
# Y = col_idx
return [x_text, Y]
def load_data_and_labels_n(data):
x_text = [clean_str(doc['text']) for doc in data]
x_text = [s.split(" ") for s in x_text]
labels = [doc['catgy'] for doc in data]
row_idx, col_idx, val_idx = [], [], []
for i in tqdm(range(len(labels))):
l_list = list(set(labels[i][0])) # remove duplicate cateories to avoid double count
for y in l_list:
row_idx.append(i)
col_idx.append(y)
val_idx.append(1)
m = max(row_idx) + 1
n = max(col_idx) + 1
n = arr_length # TODO: make this adaptive
# n = max([max(x) for x in col_idx])
Y = sp.csr_matrix((val_idx, (row_idx, col_idx)), shape=(m, n))
# Y = col_idx
return [x_text, Y]
def build_vocab(sentences, vocab_size=50000):
word_counts = Counter(itertools.chain(*sentences))
vocabulary_inv = [x[0] for x in word_counts.most_common(vocab_size)]
vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
# append <UNK/> symbol to the vocabulary
vocabulary['<UNK/>'] = len(vocabulary)
vocabulary_inv.append('<UNK/>')
return [vocabulary, vocabulary_inv]
def build_input_data(sentences, vocabulary):
x = np.array(
[[vocabulary[word] if word in vocabulary else vocabulary['<UNK/>'] for word in sentence] for sentence in
sentences])
# x = np.array([[vocabulary[word] if word in vocabulary else len(vocabulary) for word in sentence] for sentence in sentences])
return x
def get_valid_df():
test_data = []
#elements = requests.get(
# "https://api.baserow.io/api/database/rows/table/17789/",
# headers={
# "Authorization": "Token <KEY>"
# }
# )
#data = elements.json()
f = open("dataset_baserow.json", "r")
data = json.loads(f.read())
for element in data['results']:
filename = element['field_93157'].split(':')[0]
inner_data = ast.literal_eval(element['field_93158'])
description = inner_data['description']
if len(description)<10 or 'computational' in description:
continue
if True in [description in dicti.values() for dicti in test_data]:
continue
given_exp = ast.literal_eval(inner_data['given_exp'])
if len(given_exp[1])==18:
continue
true_exp = ast.literal_eval(inner_data['true_exp'])[1]
categories = []
for obj in true_exp:
obj = obj + [True]
categories.append([i for i, x in enumerate(obj) if x])
test_data.append({'text':description,'Id':filename,'split':'val',
'catgy':categories, 'num_words': len(description)
})
return test_data
def load_data(data_path, max_length=500, vocab_size=50000, split=0):
with open(os.path.join(data_path), 'rb') as fin:
# load data
# df = pd.read_json("../MasterThesis/one_to_4_25noise_shuffled order.json")
df = pd.read_json(data_path)
# df = pd.read_json("../MasterThesis/two_objects.json")
# df = pd.read_json("../MasterThesis/edited_files/all_edited.json")
df.head()
df['labels'] = df[df.columns[2:]].values.tolist()
new_df = df[['description', 'solution_matrix', 'file_name']].copy()
new_df.head()
# [train, test, vocab, catgy] = []
# split train_val and test
sss = ShuffleSplit(n_splits=5, test_size=0.3, random_state=5)
splits = [(train, test) for train, test in sss.split(new_df.description, new_df.solution_matrix)]
train_val_index, test_index = splits[split]
splits = [(train, test) for train, test in sss.split(new_df.description.iloc[train_val_index],
new_df.solution_matrix.iloc[train_val_index])]
tmp_idx_train, tmp_idx_val = splits[0]
train_index = train_val_index[tmp_idx_train]
val_index = train_val_index[tmp_idx_val]
train_df = new_df.iloc[train_index].reset_index(drop=True)
test_df = new_df.iloc[test_index].reset_index(drop=True)
valid = get_valid_df()
# train_df, test_df = train_test_split(new_df, test_size=0.2)
# now turn the df into arrays with dicts : 'split', 'text' 'Id' 'catgy'(list of label inices' 'num_words'
train = []
test = []
for index, row in train_df.iterrows():
# categories = [i for i, x in enumerate(row['solution_matrix']) if x]
categories = []
for obj in row['solution_matrix']:
categories.append([i for i, x in enumerate(obj) if x])
train.append({'split': 'train', 'text': row['description'], 'Id': row['file_name'], 'catgy': categories,
'num_words': len(row['description'])})
for index, row in test_df.iterrows():
categories = []
for obj in row['solution_matrix']:
categories.append([i for i, x in enumerate(obj) if x])
test.append(
{'split': 'test', 'text': row['description'], 'Id': row['file_name'], 'catgy': categories,
'num_words': len(row['description'])})
if len(test) == 0:
test[:5] = train[:5]
trn_sents, Y_trn = load_data_and_labels(train)
tst_sents, Y_tst = load_data_and_labels(test)
val_sents, Y_val = load_data_and_labels(valid)
trn_sents_padded = pad_sentences(trn_sents, max_length=max_length)
tst_sents_padded = pad_sentences(tst_sents, max_length=max_length)
val_sents_padded = pad_sentences(val_sents, max_length=max_length)
print("len:", len(trn_sents_padded), len(tst_sents_padded))
vocabulary, vocabulary_inv = build_vocab(trn_sents_padded + tst_sents_padded+val_sents_padded, vocab_size=vocab_size)
X_trn = build_input_data(trn_sents_padded, vocabulary)
X_tst = build_input_data(tst_sents_padded, vocabulary)
X_val = build_input_data(val_sents_padded, vocabulary)
return X_trn, Y_trn, X_tst, Y_tst, X_val,Y_val, vocabulary, vocabulary_inv
def load_data_2_obj(data_path, max_length=500, vocab_size=50000, split=0):
with open(os.path.join(data_path), 'rb') as fin:
# load data
df = pd.read_json(data_path)
df.head()
df['labels'] = df[df.columns[2:]].values.tolist()
new_df = df[['description', 'solution_matrix', 'file_name']].copy()
new_df.head()
# [train, test, vocab, catgy] = []
# split train_val and test
sss = ShuffleSplit(n_splits=5, test_size=0.3, random_state=52)
splits = [(train, test) for train, test in sss.split(new_df.description, new_df.solution_matrix)]
train_val_index, test_index = splits[split]
splits = [(train, test) for train, test in sss.split(new_df.description.iloc[train_val_index],
new_df.solution_matrix.iloc[train_val_index])]
tmp_idx_train, tmp_idx_val = splits[0]
train_index = train_val_index[tmp_idx_train]
train_df = new_df.iloc[train_index].reset_index(drop=True)
test_df = new_df.iloc[test_index].reset_index(drop=True)
train = []
test = []
for index, row in train_df.iterrows():
# categories = [i for i, x in enumerate(row['solution_matrix']) if x]
categories = []
for obj in row['solution_matrix']:
categories.append([i for i, x in enumerate(obj) if x])
train.append({'split': 'train', 'text': row['description'], 'Id': row['file_name'], 'catgy': categories,
'num_words': len(row['description'])})
for index, row in test_df.iterrows():
categories = []
for obj in row['solution_matrix']:
categories.append([i for i, x in enumerate(obj) if x])
test.append(
{'split': 'test', 'text': row['description'], 'Id': row['file_name'], 'catgy': categories,
'num_words': len(row['description'])})
if len(test) == 0:
test[:5] = train[:5]
if arr_length>18:
trn_sents, Y_trn = load_data_and_labels(train)
tst_sents, Y_tst = load_data_and_labels(test)
else:
trn_sents, Y_trn = load_data_and_labels_n(train)
tst_sents, Y_tst = load_data_and_labels_n(test)
trn_sents_padded = pad_sentences(trn_sents, max_length=max_length)
tst_sents_padded = pad_sentences(tst_sents, max_length=max_length)
print("len:", len(trn_sents_padded), len(tst_sents_padded))
vocabulary, vocabulary_inv = build_vocab(trn_sents_padded + tst_sents_padded, vocab_size=vocab_size)
X_trn = build_input_data(trn_sents_padded, vocabulary)
X_tst = build_input_data(tst_sents_padded, vocabulary)
return X_trn, Y_trn, X_tst, Y_tst, vocabulary, vocabulary_inv
# def load_data_n(data_path, max_length=500, vocab_size=50000,split=0):
# # Load and preprocess data
# with open(os.path.join(data_path), 'rb') as fin:
# # df = pd.read_json("../MasterThesis/one_to_4_25noise_shuffled order.json")
# df = pd.read_json(data_path)
# # df = pd.read_json("../MasterThesis/two_objects.json")
# # df = pd.read_json("../MasterThesis/edited_files/all_edited.json")
#
# df.head()
# df['labels'] = df[df.columns[2:]].values.tolist()
# new_df = df[['description', 'solution_matrix', 'file_name']].copy()
# new_df.head()
#
# sss = ShuffleSplit(n_splits=5, test_size=0.3, random_state=5)
#
# splits = [(train, test) for train, test in sss.split(new_df.description, new_df.solution_matrix)]
# train_val_index, test_index = splits[split]
#
# train_df = new_df.iloc[train_val_index].reset_index(drop=True)
# test_df = new_df.iloc[test_index].reset_index(drop=True)
# train =[]
# test = []
# for index, row in train_df.iterrows():
# # categories = [i for i, x in enumerate(row['solution_matrix']) if x]
# categories = []
# for obj in row['solution_matrix']:
# categories.append([i for i, x in enumerate(obj) if x])
# train.append({'split': 'train', 'text': row['description'], 'Id': row['file_name'], 'catgy': categories,
# 'num_words': len(row['description'])})
# for index, row in test_df.iterrows():
# categories = []
# for obj in row['solution_matrix']:
# categories.append([i for i, x in enumerate(obj) if x])
#
# test.append(
# {'split': 'test', 'text': row['description'], 'Id': row['file_name'], 'catgy': categories,
# 'num_words': len(row['description'])})
#
# trn_sents, Y_trn = load_data_and_labels_n(train)
# print('1.2')
#
# tst_sents, Y_tst = load_data_and_labels_n(test)
# print('1.3')
# trn_sents_padded = pad_sentences(trn_sents, max_length=max_length)
# tst_sents_padded = pad_sentences(tst_sents, max_length=max_length)
# print("len:", len(trn_sents_padded), len(tst_sents_padded))
# vocabulary, vocabulary_inv = build_vocab(trn_sents_padded + tst_sents_padded, vocab_size=vocab_size)
# X_trn = build_input_data(trn_sents_padded, vocabulary)
# X_tst = build_input_data(tst_sents_padded, vocabulary)
# return X_trn, Y_trn, X_tst, Y_tst, vocabulary, vocabulary_inv
# # return X_trn, Y_trn, vocabulary, vocabulary_inv
def batch_iter(data, batch_size, num_epochs):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int(len(data) / batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
| [
"os.path.join",
"pandas.read_json",
"scipy.sparse.csr_matrix",
"numpy.array",
"numpy.arange",
"nltk.corpus.stopwords.words",
"itertools.chain",
"ast.literal_eval",
"sklearn.model_selection.ShuffleSplit",
"re.sub"
] | [((277, 303), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (292, 303), False, 'from nltk.corpus import stopwords\n'), ((624, 670), 're.sub', 're.sub', (['"""[^A-Za-z0-9(),!?\\\\\'\\\\`]"""', '""" """', 'string'], {}), '("[^A-Za-z0-9(),!?\\\\\'\\\\`]", \' \', string)\n', (630, 670), False, 'import re\n'), ((683, 712), 're.sub', 're.sub', (['"""\\\\\'s"""', '""" \'s"""', 'string'], {}), '("\\\\\'s", " \'s", string)\n', (689, 712), False, 'import re\n'), ((727, 758), 're.sub', 're.sub', (['"""\\\\\'ve"""', '""" \'ve"""', 'string'], {}), '("\\\\\'ve", " \'ve", string)\n', (733, 758), False, 'import re\n'), ((773, 804), 're.sub', 're.sub', (['"""n\\\\\'t"""', '""" n\'t"""', 'string'], {}), '("n\\\\\'t", " n\'t", string)\n', (779, 804), False, 'import re\n'), ((819, 850), 're.sub', 're.sub', (['"""\\\\\'re"""', '""" \'re"""', 'string'], {}), '("\\\\\'re", " \'re", string)\n', (825, 850), False, 'import re\n'), ((865, 894), 're.sub', 're.sub', (['"""\\\\\'d"""', '""" \'d"""', 'string'], {}), '("\\\\\'d", " \'d", string)\n', (871, 894), False, 'import re\n'), ((909, 940), 're.sub', 're.sub', (['"""\\\\\'ll"""', '""" \'ll"""', 'string'], {}), '("\\\\\'ll", " \'ll", string)\n', (915, 940), False, 'import re\n'), ((955, 981), 're.sub', 're.sub', (['""","""', '""" , """', 'string'], {}), "(',', ' , ', string)\n", (961, 981), False, 'import re\n'), ((996, 1022), 're.sub', 're.sub', (['"""!"""', '""" ! """', 'string'], {}), "('!', ' ! ', string)\n", (1002, 1022), False, 'import re\n'), ((1037, 1067), 're.sub', 're.sub', (['"""\\\\("""', '""" \\\\( """', 'string'], {}), "('\\\\(', ' \\\\( ', string)\n", (1043, 1067), False, 'import re\n'), ((1080, 1110), 're.sub', 're.sub', (['"""\\\\)"""', '""" \\\\) """', 'string'], {}), "('\\\\)', ' \\\\) ', string)\n", (1086, 1110), False, 'import re\n'), ((1123, 1153), 're.sub', 're.sub', (['"""\\\\?"""', '""" \\\\? """', 'string'], {}), "('\\\\?', ' \\\\? ', string)\n", (1129, 1153), False, 'import re\n'), ((1166, 1196), 're.sub', 're.sub', (['"""\\\\s{2,}"""', '""" """', 'string'], {}), "('\\\\s{2,}', ' ', string)\n", (1172, 1196), False, 'import re\n'), ((3132, 3190), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['(val_idx, (row_idx, col_idx))'], {'shape': '(m, n)'}), '((val_idx, (row_idx, col_idx)), shape=(m, n))\n', (3145, 3190), True, 'import scipy.sparse as sp\n'), ((3849, 3907), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['(val_idx, (row_idx, col_idx))'], {'shape': '(m, n)'}), '((val_idx, (row_idx, col_idx)), shape=(m, n))\n', (3862, 3907), True, 'import scipy.sparse as sp\n'), ((4405, 4537), 'numpy.array', 'np.array', (["[[(vocabulary[word] if word in vocabulary else vocabulary['<UNK/>']) for\n word in sentence] for sentence in sentences]"], {}), "([[(vocabulary[word] if word in vocabulary else vocabulary['<UNK/>'\n ]) for word in sentence] for sentence in sentences])\n", (4413, 4537), True, 'import numpy as np\n'), ((6588, 6643), 'sklearn.model_selection.ShuffleSplit', 'ShuffleSplit', ([], {'n_splits': '(5)', 'test_size': '(0.3)', 'random_state': '(5)'}), '(n_splits=5, test_size=0.3, random_state=5)\n', (6600, 6643), False, 'from sklearn.model_selection import KFold, StratifiedKFold, StratifiedShuffleSplit, ShuffleSplit\n'), ((9567, 9623), 'sklearn.model_selection.ShuffleSplit', 'ShuffleSplit', ([], {'n_splits': '(5)', 'test_size': '(0.3)', 'random_state': '(52)'}), '(n_splits=5, test_size=0.3, random_state=52)\n', (9579, 9623), False, 'from sklearn.model_selection import KFold, StratifiedKFold, StratifiedShuffleSplit, ShuffleSplit\n'), ((14452, 14466), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (14460, 14466), True, 'import numpy as np\n'), ((4022, 4049), 'itertools.chain', 'itertools.chain', (['*sentences'], {}), '(*sentences)\n', (4037, 4049), False, 'import itertools\n'), ((5132, 5172), 'ast.literal_eval', 'ast.literal_eval', (["element['field_93158']"], {}), "(element['field_93158'])\n", (5148, 5172), False, 'import ast\n'), ((5426, 5467), 'ast.literal_eval', 'ast.literal_eval', (["inner_data['given_exp']"], {}), "(inner_data['given_exp'])\n", (5442, 5467), False, 'import ast\n'), ((6183, 6206), 'pandas.read_json', 'pd.read_json', (['data_path'], {}), '(data_path)\n', (6195, 6206), True, 'import pandas as pd\n'), ((9302, 9325), 'pandas.read_json', 'pd.read_json', (['data_path'], {}), '(data_path)\n', (9314, 9325), True, 'import pandas as pd\n'), ((5542, 5582), 'ast.literal_eval', 'ast.literal_eval', (["inner_data['true_exp']"], {}), "(inner_data['true_exp'])\n", (5558, 5582), False, 'import ast\n'), ((6027, 6050), 'os.path.join', 'os.path.join', (['data_path'], {}), '(data_path)\n', (6039, 6050), False, 'import os\n'), ((9230, 9253), 'os.path.join', 'os.path.join', (['data_path'], {}), '(data_path)\n', (9242, 9253), False, 'import os\n'), ((14678, 14698), 'numpy.arange', 'np.arange', (['data_size'], {}), '(data_size)\n', (14687, 14698), True, 'import numpy as np\n')] |
from .setup_scripts import run_setup as run1
from .analysis_scripts import run_analysis as run2
from .community_scripts import run_community as run3
import hydra
from omegaconf import DictConfig, OmegaConf
import logging
import socket
import time
import random
import numpy as np
@hydra.main(config_path="../../data/hydra", config_name="config.yaml")
def run_ncmw(cfg: DictConfig) -> None:
log = logging.getLogger(__name__)
log.info(OmegaConf.to_yaml(cfg))
log.info(f"Hostname: {socket.gethostname()}")
seed = cfg.seed
random.seed(seed)
np.random.seed(seed)
log.info(f"Random seed: {seed}")
start_time = time.time()
if cfg.run_setup:
log.info("Running setup...")
run1(cfg)
if cfg.run_analysis:
log.info("Running analysis...")
run2(cfg)
if cfg.run_community:
log.info("Running community...")
run3(cfg)
end_time = time.time()
runtime = end_time - start_time
log.info(f"Finished Workflow in {runtime} seconds")
| [
"omegaconf.OmegaConf.to_yaml",
"numpy.random.seed",
"time.time",
"socket.gethostname",
"random.seed",
"hydra.main",
"logging.getLogger"
] | [((286, 355), 'hydra.main', 'hydra.main', ([], {'config_path': '"""../../data/hydra"""', 'config_name': '"""config.yaml"""'}), "(config_path='../../data/hydra', config_name='config.yaml')\n", (296, 355), False, 'import hydra\n'), ((405, 432), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (422, 432), False, 'import logging\n'), ((545, 562), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (556, 562), False, 'import random\n'), ((567, 587), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (581, 587), True, 'import numpy as np\n'), ((643, 654), 'time.time', 'time.time', ([], {}), '()\n', (652, 654), False, 'import time\n'), ((918, 929), 'time.time', 'time.time', ([], {}), '()\n', (927, 929), False, 'import time\n'), ((446, 468), 'omegaconf.OmegaConf.to_yaml', 'OmegaConf.to_yaml', (['cfg'], {}), '(cfg)\n', (463, 468), False, 'from omegaconf import DictConfig, OmegaConf\n'), ((496, 516), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (514, 516), False, 'import socket\n')] |
"""
An example of extensible machine behavior. Random noise is added to machine health index
readings and monitored periodically instead of continuously.
"""
import random
import numpy as np
from simantha import Source, Machine, Sink, Maintainer, System, simulation, utils
class SensingEvent(simulation.Event):
# A new event type is used to assign the correct priority to the sensing event. In
# this case, sensing events are scheduled just after machine degradation events.
def get_action_priority(self):
return 5.5
class ConditionMonitoredMachine(Machine):
def __init__(self, sensing_interval=1, sensor_noise=0, **kwargs):
self.sensing_interval = sensing_interval
self.sensor_noise = sensor_noise
self.sensor_data = {'time': [], 'reading': []}
super().__init__(**kwargs)
def initialize_addon_processes(self):
self.env.schedule_event(
time=self.env.now,
location=self,
action=self.sense,
source=f'{self.name} initial addon process',
event_type=SensingEvent
)
def repair_addon_processes(self):
self.env.schedule_event(
time=self.env.now,
location=self,
action=self.sense,
source=f'{self.name} repair addon process at {self.env.now}',
event_type=SensingEvent
)
def sense(self):
self.sensor_reading = self.health + np.random.normal(0, self.sensor_noise)
self.sensor_data['time'].append(self.env.now)
self.sensor_data['reading'].append(self.sensor_reading)
self.env.schedule_event(
time=self.env.now+self.sensing_interval,
location=self,
action=self.sense,
source=f'{self.name} sensing at {self.env.now}',
event_type=SensingEvent
)
def main():
degradation_matrix = utils.generate_degradation_matrix(h_max=10, p=0.1)
cm_distribution = {'geometric': 0.1}
source = Source()
M1 = ConditionMonitoredMachine(
name='M1',
cycle_time=2,
degradation_matrix=degradation_matrix,
cm_distribution=cm_distribution,
sensing_interval=2,
sensor_noise=1
)
sink = Sink()
source.define_routing(downstream=[M1])
M1.define_routing(upstream=[source], downstream=[sink])
sink.define_routing(upstream=[M1])
system = System(objects=[source, M1, sink])
random.seed(1)
system.simulate(simulation_time=6*60)
# Print true health and corresponding sensor reading
rows = 12
print('\ntime health sensor reading')
for time, reading in zip(
M1.sensor_data['time'][:rows], M1.sensor_data['reading'][:rows]
):
timestamp = max([t for t in M1.health_data['time'] if t <= time])
idx = M1.health_data['time'].index(timestamp)
health = M1.health_data['health'][idx]
print(f'{time:<4} {health:<3} {reading:>8.4f}')
if __name__ == '__main__':
main()
| [
"simantha.Sink",
"simantha.utils.generate_degradation_matrix",
"simantha.System",
"random.seed",
"simantha.Source",
"numpy.random.normal"
] | [((1942, 1992), 'simantha.utils.generate_degradation_matrix', 'utils.generate_degradation_matrix', ([], {'h_max': '(10)', 'p': '(0.1)'}), '(h_max=10, p=0.1)\n', (1975, 1992), False, 'from simantha import Source, Machine, Sink, Maintainer, System, simulation, utils\n'), ((2048, 2056), 'simantha.Source', 'Source', ([], {}), '()\n', (2054, 2056), False, 'from simantha import Source, Machine, Sink, Maintainer, System, simulation, utils\n'), ((2290, 2296), 'simantha.Sink', 'Sink', ([], {}), '()\n', (2294, 2296), False, 'from simantha import Source, Machine, Sink, Maintainer, System, simulation, utils\n'), ((2454, 2488), 'simantha.System', 'System', ([], {'objects': '[source, M1, sink]'}), '(objects=[source, M1, sink])\n', (2460, 2488), False, 'from simantha import Source, Machine, Sink, Maintainer, System, simulation, utils\n'), ((2494, 2508), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (2505, 2508), False, 'import random\n'), ((1478, 1516), 'numpy.random.normal', 'np.random.normal', (['(0)', 'self.sensor_noise'], {}), '(0, self.sensor_noise)\n', (1494, 1516), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import sys, os, pdb
import numpy as np
from scipy import special
def GaussianSpot2D(x, y, A, sigma, x0, y0):
term0 = np.square(y - y0) + np.square(x-x0)
z = A*np.exp(-term0/(2*sigma*sigma))
return z
def GaussianLine2D(x, y, A, sigma, x0, y0, L, theta):
term0 = (y-y0)*np.cos(theta)+(x0-x)*np.sin(-theta)
expterm = A*np.exp(-term0*term0/(2*sigma*sigma))
erfterm = (special.erf((L+(x0-x)*np.cos(theta)+(y0-y)*np.sin(-theta))/(np.sqrt(2)*sigma)) \
- special.erf(((x0-x)*np.cos(theta)+(y0-y)*np.sin(-theta))/(np.sqrt(2)*sigma)))
z = expterm*erfterm
return z
def GaussianLine3D(A,sigmaxy,sigmaz,x,y,z,x0,y0,z0,x1,y1,z1):
# Define lengths
Lx = x1 - x0
Ly = y1 - y0
Lz = z1 - z0
# Define shifts in coordinates
xeff = x - x0
yeff = y - y0
zeff = z - z0
# Gaussian terms
expterm1 = Lz**2 * (xeff**2 + yeff**2) * sigmaxy**2
expterm2 = -2*Lz * (Lx*xeff + Ly*yeff)*zeff * sigmaxy**2
expterm3 = (Lx**2 + Ly**2)*(zeff**2)*sigmaxy**2
expterm4 = (Ly*xeff - Lx*yeff)**2*sigmaz**2
expdenom = 2*Lz**2*sigmaxy**4 + 2*(Lx**2 + Ly**2)*sigmaxy**2*sigmaz**2
expterm = np.exp(-(expterm1+expterm2+expterm3+expterm4)/expdenom)
# Erf terms
erfdenom = sigmaxy * sigmaz * np.sqrt(2*Lz**2*sigmaxy**2 + 2*(Lx**2 + Ly**2)*sigmaz**2)
erf1top = -Lz*zeff*sigmaxy**2 + (-Lx*xeff - Ly*yeff)*sigmaz**2
erf2top = Lz*(Lz - zeff)*sigmaxy**2 + (Lx*(Lx - xeff) + Ly*(Ly - yeff))*sigmaz**2
erf1 = special.erf(erf1top/erfdenom)
erf2 = special.erf(erf2top/erfdenom)
# Normalization term
L = np.sqrt(Lx**2 + Ly**2 + Lz**2)
normterm = A * L / (4 * np.pi * sigmaxy * np.sqrt(Lz**2 * sigmaxy**2 + Lx**2 * sigmaz**2 + Ly**2 * sigmaz**2))
zret = normterm * expterm *(-erf1 + erf2)
return zret
| [
"numpy.square",
"scipy.special.erf",
"numpy.sin",
"numpy.exp",
"numpy.cos",
"numpy.sqrt"
] | [((1184, 1247), 'numpy.exp', 'np.exp', (['(-(expterm1 + expterm2 + expterm3 + expterm4) / expdenom)'], {}), '(-(expterm1 + expterm2 + expterm3 + expterm4) / expdenom)\n', (1190, 1247), True, 'import numpy as np\n'), ((1513, 1544), 'scipy.special.erf', 'special.erf', (['(erf1top / erfdenom)'], {}), '(erf1top / erfdenom)\n', (1524, 1544), False, 'from scipy import special\n'), ((1554, 1585), 'scipy.special.erf', 'special.erf', (['(erf2top / erfdenom)'], {}), '(erf2top / erfdenom)\n', (1565, 1585), False, 'from scipy import special\n'), ((1618, 1654), 'numpy.sqrt', 'np.sqrt', (['(Lx ** 2 + Ly ** 2 + Lz ** 2)'], {}), '(Lx ** 2 + Ly ** 2 + Lz ** 2)\n', (1625, 1654), True, 'import numpy as np\n'), ((145, 162), 'numpy.square', 'np.square', (['(y - y0)'], {}), '(y - y0)\n', (154, 162), True, 'import numpy as np\n'), ((165, 182), 'numpy.square', 'np.square', (['(x - x0)'], {}), '(x - x0)\n', (174, 182), True, 'import numpy as np\n'), ((191, 227), 'numpy.exp', 'np.exp', (['(-term0 / (2 * sigma * sigma))'], {}), '(-term0 / (2 * sigma * sigma))\n', (197, 227), True, 'import numpy as np\n'), ((362, 406), 'numpy.exp', 'np.exp', (['(-term0 * term0 / (2 * sigma * sigma))'], {}), '(-term0 * term0 / (2 * sigma * sigma))\n', (368, 406), True, 'import numpy as np\n'), ((1291, 1366), 'numpy.sqrt', 'np.sqrt', (['(2 * Lz ** 2 * sigmaxy ** 2 + 2 * (Lx ** 2 + Ly ** 2) * sigmaz ** 2)'], {}), '(2 * Lz ** 2 * sigmaxy ** 2 + 2 * (Lx ** 2 + Ly ** 2) * sigmaz ** 2)\n', (1298, 1366), True, 'import numpy as np\n'), ((310, 323), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (316, 323), True, 'import numpy as np\n'), ((331, 345), 'numpy.sin', 'np.sin', (['(-theta)'], {}), '(-theta)\n', (337, 345), True, 'import numpy as np\n'), ((1695, 1774), 'numpy.sqrt', 'np.sqrt', (['(Lz ** 2 * sigmaxy ** 2 + Lx ** 2 * sigmaz ** 2 + Ly ** 2 * sigmaz ** 2)'], {}), '(Lz ** 2 * sigmaxy ** 2 + Lx ** 2 * sigmaz ** 2 + Ly ** 2 * sigmaz ** 2)\n', (1702, 1774), True, 'import numpy as np\n'), ((474, 484), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (481, 484), True, 'import numpy as np\n'), ((568, 578), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (575, 578), True, 'import numpy as np\n'), ((457, 471), 'numpy.sin', 'np.sin', (['(-theta)'], {}), '(-theta)\n', (463, 471), True, 'import numpy as np\n'), ((530, 543), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (536, 543), True, 'import numpy as np\n'), ((551, 565), 'numpy.sin', 'np.sin', (['(-theta)'], {}), '(-theta)\n', (557, 565), True, 'import numpy as np\n'), ((436, 449), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (442, 449), True, 'import numpy as np\n')] |
####################################################################################################
# CSE 6140 - Fall 2019
# <NAME>
# <NAME>
# <NAME>
# <NAME>
####################################################################################################
"""
This file has the implementation of the Simualted Annealing (SA) algorithm with local and global restarts.
SA probabilistically decides to move from a state to a candidate neighbor state in order to
minimize the thermodynamic energy of the system. This iterative process stops once an acceptable
solution is found, or until a computational budget is exceeded. In general, the probability that
simulated annealing terminates with the global minima converges to 1 as the cooling schedule is lengthened.
For our algorithm, we start with an initial guess of temperature, then restart with a slightly higher
temperature, reducing by the same geometric ratio every iteration. Once the number of local restarts has
reached convergence, we restart the whole problem with a new initial guess. In a sense,
"""
import util as ut
import time
import random
import math
import matplotlib.pyplot as plt
import pandas as pd
import statistics
import numpy as np
from scipy.spatial import distance_matrix
class SimulatedAnnealing(object):
def __init__(self, name, coordinates, random_seed = 0, temperature = 1e+10, alpha = 0.995, max_iterations=1000000, stop_temp = 1e-8, time_start = time.time(), max_time = 600, tour_flag = 0):
'''
Constructor for Simulated Annealing problem.
Parameters:
name: String for the name of the file
coordinates: dictionary of node IDs to coordinates.
Optional:
randomSeed: random seed for Python random number generator
alpha: temperature reduction factor
max_iterations: maximum number of iterations before stopping
stop_temp: temperature at which to stop
'''
# Problem parameters
self.time_start = time_start
self.time_delta = max_time/100
self.initial_temperature = temperature
self.coordinates = coordinates
self.distance_matrix = ut.calculate_distance_matrix(self.coordinates)
self.name = name
self.path = []
self.N = len(coordinates)
self.nodes = list(self.coordinates.keys())
self.max_time = max_time
self.solutions = [2003763, 7542, 893536, 52643, 277952, 100431, 1555060, 1395981, 655454, 810196, 1176151, 62962, 132709]
# Seed random number generator
random.seed(random_seed)
# Annealing parameters
self.iteration = 1
self.initial_alpha = alpha
self.alpha = self.initial_alpha
self.stop = max_iterations
self.stop_temp = stop_temp
self.temperature = temperature
# Solutions
self.best_solution = None
self.global_best_fit = float("Inf")
self.best_fit = float("Inf")
# Restarts
self.trace = []
self.result = []
#self.convergence = min(20, int(self.N/2))
self.convergence = self.N if self.N < 30 else 30
self.restart_count = 0
self.tour_flag = tour_flag
def random_tour(self):
'''
Random tour generated. Identifies all nodes, then shuffles order.
Returns:
solution: tour path
fitness: tour length
'''
path = list(self.coordinates.keys())
random.shuffle(path)
fitness = ut.get_tour_distance(path, self.coordinates)
return path, fitness
def nearest_neighbors_tour(self):
'''
Tour generated with greedy heuristic: Picks a random starting node, then appends
next nearest neighbor.
Returns:
solution: tour path
fitness: tour length
'''
solution = []
unassigned_nodes = set(self.nodes)
node = random.choice(self.nodes)
solution.append(node)
unassigned_nodes.remove(node)
while unassigned_nodes:
next = min(unassigned_nodes, key=lambda x: self.distance(node, x))
unassigned_nodes.remove(next)
solution.append(next)
node = next
fitness = ut.get_tour_distance(solution, self.coordinates)
if fitness < self.best_fit:
self.best_fit = fitness
self.best_solution = solution
return solution, fitness
def simulated_annealing(self, restart = False, current_solution = None, current_fit = None):
'''
Simulate annealing process:
1. Generate candidate solution via 2-opt strategy
2. Decide to accept candidate based on either best fitness comparison or
temperature-bound probability
3. Iterate by lowering temperature
'''
if self.max_time - (time.time()-self.time_start) < 2*self.time_delta:
#print("\tReached max time")
return
t1 = time.time()
# Start with a tour
if current_solution == None:
if self.tour_flag == 0:
self.current_solution, self.current_fit = self.nearest_neighbors_tour()
else:
self.current_solution, self.current_fit = self.random_tour()
else:
self.current_solution = current_solution
self.current_fit = current_fit
# While annealing conditions are still met...
while self.temperature >= self.stop_temp and self.iteration < self.stop and self.max_time - (time.time()-self.time_start) > 2*self.time_delta:
candidate = list(self.current_solution)
# Generate next candidate using 2-Opt
l = random.randint(2, self.N - 1)
i = random.randint(0, self.N - l)
candidate[i : (i + l)] = reversed(candidate[i : (i + l)])
# Determine if the candidate is worth keeping, with either:
# 1. Better than currently-known best fit
# 2. If not, probabilistically due to annealing
candidate_fit = ut.get_tour_distance(candidate, self.coordinates)
if candidate_fit < self.current_fit:
self.current_fit = candidate_fit
self.current_solution = candidate
else:
p = math.exp((self.current_fit- candidate_fit)/self.temperature)
r = random.random()
if r < p:
self.current_fit = candidate_fit
self.current_solution = candidate
if self.current_fit < self.best_fit:
self.restart_count = 0
self.best_fit = self.current_fit
self.best_solution = self.current_solution
if self.best_fit < self.global_best_fit:
self.global_best_fit = self.best_fit
self.trace.append([(time.time()-self.time_start), self.global_best_fit, self.best_solution])
# Cooling for next iteration
self.temperature *= self.alpha
self.iteration += 1
self.time_delta = max(self.time_delta, time.time()-t1)
# Proceed to cheat step
if self.best_fit in self.solutions:
return
# Restart with current solution?
if restart and not self.converged():
self.restart_count += 1
self.temperature = self.initial_temperature* (10**self.restart_count)
#print("\tIteration: {}, Temperature: {}, Current: {}, Local Best: {}, Global Best: {}".format(self.restart_count, self.temperature, self.current_fit, self.best_fit, self.global_best_fit))
self.iteration = 1
self.simulated_annealing(restart = True, current_solution = self.best_solution, current_fit = self.best_fit)
def converged(self):
#if len(self.result) >= self.convergence:
# return len(set(self.result[-self.convergence:])) == 1
#return False
return self.restart_count > self.convergence or self.iteration >= self.stop
def distance(self, n1, n2):
'''
Calculates the distances between nodes.
Parameters:
n1: Node 1 ID
n2: Node 2 ID
Returns: floating point Euclidean distance between two nodes
'''
return self.distance_matrix[n1-1,n2-1]
#x1,y1 = self.coordinates[n1]
#x2,y2 = self.coordinates[n2]
#return math.sqrt((x1-x2)**2 +(y1-y2)**2)
def simulated_annealing_single(file_path, random_seed, time_start, max_time, test_quality = None):
'''
Runs a single instance of the simulated annealing algorithm.
'''
random.seed(random_seed)
best_fit = None
best_solution = None
coordinates = ut.read_tsp_file(file_path)
sa = SimulatedAnnealing(file_path, coordinates, stop_temp = 1e-6, temperature = 1e+10, random_seed = random.randint(0, 100000), alpha = 0.999, time_start = time_start, max_time = max_time)
if test_quality != None:
sa.solutions.append(test_quality)
sa.simulated_annealing(restart = True)
best_fit = sa.best_fit
best_solution = sa.best_solution
while max_time-(time.time()-time_start)> 2*sa.time_delta and sa.best_fit not in sa.solutions:
trace = sa.trace
#print("Reiniting SA.")
sa.__init__(file_path, coordinates, stop_temp = 1e-6, temperature = 1e+10, random_seed = random.randint(0, 100000), alpha = 0.999, time_start = time_start, max_time = max_time, tour_flag = 0)
sa.trace = trace
sa.global_best_fit = best_fit
#sa.best_fit = best_fit
#sa.best_solution = best_solution
sa.simulated_annealing(restart = True)
if(sa.best_fit < best_fit):
best_fit = sa.best_fit
best_solution = sa.best_solution
#print("Results for {}: {}\n\tFitness: {}\n\tTime: {}".format(file_path, best_solution, best_fit, time.time()-time_start))
if best_solution == None:
return best_fit, None, sa.trace
else:
return best_fit, list(np.asarray(best_solution)+1), sa.trace
if __name__ == "__main__":
a = [4,5,6]
print(a)
print(list(np.asarray(a)-1))
| [
"math.exp",
"random.randint",
"util.calculate_distance_matrix",
"random.shuffle",
"numpy.asarray",
"random.choice",
"time.time",
"util.get_tour_distance",
"random.random",
"util.read_tsp_file",
"random.seed"
] | [((8701, 8725), 'random.seed', 'random.seed', (['random_seed'], {}), '(random_seed)\n', (8712, 8725), False, 'import random\n'), ((8789, 8816), 'util.read_tsp_file', 'ut.read_tsp_file', (['file_path'], {}), '(file_path)\n', (8805, 8816), True, 'import util as ut\n'), ((1461, 1472), 'time.time', 'time.time', ([], {}), '()\n', (1470, 1472), False, 'import time\n'), ((2180, 2226), 'util.calculate_distance_matrix', 'ut.calculate_distance_matrix', (['self.coordinates'], {}), '(self.coordinates)\n', (2208, 2226), True, 'import util as ut\n'), ((2580, 2604), 'random.seed', 'random.seed', (['random_seed'], {}), '(random_seed)\n', (2591, 2604), False, 'import random\n'), ((3491, 3511), 'random.shuffle', 'random.shuffle', (['path'], {}), '(path)\n', (3505, 3511), False, 'import random\n'), ((3530, 3574), 'util.get_tour_distance', 'ut.get_tour_distance', (['path', 'self.coordinates'], {}), '(path, self.coordinates)\n', (3550, 3574), True, 'import util as ut\n'), ((3942, 3967), 'random.choice', 'random.choice', (['self.nodes'], {}), '(self.nodes)\n', (3955, 3967), False, 'import random\n'), ((4265, 4313), 'util.get_tour_distance', 'ut.get_tour_distance', (['solution', 'self.coordinates'], {}), '(solution, self.coordinates)\n', (4285, 4313), True, 'import util as ut\n'), ((4996, 5007), 'time.time', 'time.time', ([], {}), '()\n', (5005, 5007), False, 'import time\n'), ((5728, 5757), 'random.randint', 'random.randint', (['(2)', '(self.N - 1)'], {}), '(2, self.N - 1)\n', (5742, 5757), False, 'import random\n'), ((5774, 5803), 'random.randint', 'random.randint', (['(0)', '(self.N - l)'], {}), '(0, self.N - l)\n', (5788, 5803), False, 'import random\n'), ((6101, 6150), 'util.get_tour_distance', 'ut.get_tour_distance', (['candidate', 'self.coordinates'], {}), '(candidate, self.coordinates)\n', (6121, 6150), True, 'import util as ut\n'), ((8922, 8947), 'random.randint', 'random.randint', (['(0)', '(100000)'], {}), '(0, 100000)\n', (8936, 8947), False, 'import random\n'), ((6337, 6400), 'math.exp', 'math.exp', (['((self.current_fit - candidate_fit) / self.temperature)'], {}), '((self.current_fit - candidate_fit) / self.temperature)\n', (6345, 6400), False, 'import math\n'), ((6418, 6433), 'random.random', 'random.random', ([], {}), '()\n', (6431, 6433), False, 'import random\n'), ((7167, 7178), 'time.time', 'time.time', ([], {}), '()\n', (7176, 7178), False, 'import time\n'), ((9440, 9465), 'random.randint', 'random.randint', (['(0)', '(100000)'], {}), '(0, 100000)\n', (9454, 9465), False, 'import random\n'), ((10196, 10209), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (10206, 10209), True, 'import numpy as np\n'), ((4872, 4883), 'time.time', 'time.time', ([], {}), '()\n', (4881, 4883), False, 'import time\n'), ((9208, 9219), 'time.time', 'time.time', ([], {}), '()\n', (9217, 9219), False, 'import time\n'), ((10085, 10110), 'numpy.asarray', 'np.asarray', (['best_solution'], {}), '(best_solution)\n', (10095, 10110), True, 'import numpy as np\n'), ((5559, 5570), 'time.time', 'time.time', ([], {}), '()\n', (5568, 5570), False, 'import time\n'), ((6917, 6928), 'time.time', 'time.time', ([], {}), '()\n', (6926, 6928), False, 'import time\n')] |
from beluga.bvpsol.BaseAlgorithm import BaseAlgorithm, BVPResult
import numpy as np
import copy
from scipy.optimize import minimize
class Collocation(BaseAlgorithm):
"""
Collocation algorithm for solving boundary-value problems.
:param args: Unused
:param kwargs: Additional parameters accepted by the solver.
:return: Collocation object.
+------------------------+-----------------+-----------------+
| Valid kwargs | Default Value | Valid Values |
+========================+=================+=================+
| adaptive_mesh | False | Bool |
+------------------------+-----------------+-----------------+
| cached | True | Bool |
+------------------------+-----------------+-----------------+
| tolerance | 1e-4 | > 0 |
+------------------------+-----------------+-----------------+
| max_error | 100 | > 0 |
+------------------------+-----------------+-----------------+
| max_iterations | 100 | > 0 |
+------------------------+-----------------+-----------------+
| number_of_nodes | 30 | >= 4 |
+------------------------+-----------------+-----------------+
| use_numba | False | Bool |
+------------------------+-----------------+-----------------+
| verbose | False | Bool |
+------------------------+-----------------+-----------------+
"""
def __init__(self, *args, **kwargs):
BaseAlgorithm.__init__(self, *args, **kwargs)
adaptive_mesh = kwargs.get('adaptive_mesh', False)
cached = kwargs.get('cached', True)
tolerance = kwargs.get('tolerance', 1e-4)
max_error = kwargs.get('max_error', 100)
max_iterations = kwargs.get('max_iterations', 100)
number_of_nodes = kwargs.get('number_of_nodes', 30)
use_numba = kwargs.get('use_numba', False)
verbose = kwargs.get('verbose', False)
self.adaptive_mesh = adaptive_mesh
self.cached = cached
self.tolerance = tolerance
self.max_error = max_error
self.max_iterations = max_iterations
self.number_of_nodes = number_of_nodes
self.use_numba = use_numba
self.verbose = verbose
self.constraint_midpoint = None
self.constraint_boundary = None
self.constraint_path = None
self.number_of_dynamical_params = None
self.number_of_nondynamical_params = None
self.tspan = None
self.number_of_odes = None
self.number_of_controls = None
self.const = None
self.number_of_quads = None
def solve(self, solinit, **kwargs):
"""
Solve a two-point boundary value problem using the collocation method.
:param solinit: An initial guess for a solution to the BVP.
:return: A solution to the BVP.
"""
sol = copy.deepcopy(solinit)
sol.set_interpolate_function('cubic')
number_of_datapoints = len(sol.t)
# Default costs and quads to return nothing if not defined
def return_nil(*_, **__):
return np.array([])
if self.quadrature_function is None:
self.quadrature_function = return_nil
if number_of_datapoints < 4:
# Special case where polynomial interpolation fails. Use linear interpolation to get 4 nodes.
t_new = np.linspace(sol.t[0], sol.t[-1], num=4)
y_new = np.column_stack([np.interp(t_new, sol.t, sol.y[:, ii]) for ii in range(sol.y.shape[1])])
if sol.q.size > 0:
q_new = np.column_stack([np.interp(t_new, sol.t, sol.q[:, ii]) for ii in range(sol.q.shape[1])])
else:
q_new = np.array([])
if sol.u.size > 0:
u_new = np.column_stack([np.interp(t_new, sol.t, sol.q[:, ii]) for ii in range(sol.q.shape[1])])
else:
u_new = np.array([])
sol.t = t_new
sol.y = y_new
sol.q = q_new
sol.u = u_new
if self.number_of_nodes > number_of_datapoints:
new_t = np.linspace(sol.t[0], sol.t[-1], self.number_of_nodes)
new_y, new_q, new_u = sol(new_t[0])
for ti in new_t[1:]:
yy, qq, uu = sol(ti)
new_y = np.vstack((new_y, yy))
new_q = np.vstack((new_q, qq))
new_u = np.vstack((new_u, uu))
sol.t = new_t
sol.y = new_y
sol.q = new_q
sol.u = new_u
else:
self.number_of_nodes = number_of_datapoints
# self.constraint = {'type': 'eq', 'fun': self._collocation_constraint}
self.constraint_midpoint = {'type': 'eq', 'fun': self._collocation_constraint_midpoint}
self.constraint_boundary = {'type': 'eq', 'fun': self._collocation_constraint_boundary}
self.constraint_path = {'type': 'ineq', 'fun': self._collocation_constraint_path}
self.tspan = sol.t
self.number_of_odes = sol.y.shape[1]
if sol.u.size > 0:
self.number_of_controls = sol.u.shape[1]
else:
self.number_of_controls = 0
if sol.q.size == 0:
self.number_of_quads = 0
sol.q = np.array([]).reshape((self.number_of_nodes, 0))
else:
self.number_of_quads = sol.q.shape[1]
if sol.dynamical_parameters is None:
sol.dynamical_parameters = np.array([], dtype=np.float64)
if sol.nondynamical_parameters is None:
sol.nondynamical_parameters = np.array([], dtype=np.float64)
self.number_of_dynamical_params = len(sol.dynamical_parameters)
self.number_of_nondynamical_params = len(sol.nondynamical_parameters)
vectorized = self._wrap_params(sol.y, sol.q, sol.u, sol.dynamical_parameters, sol.nondynamical_parameters)
self.const = sol.const
sol.converged = False
# noinspection PyTypeChecker
xopt = minimize(
self._collocation_cost, vectorized, args=(), method='SLSQP', jac=None,
hessp=None, bounds=None,
constraints=[self.constraint_midpoint, self.constraint_boundary, self.constraint_path],
tol=self.tolerance, callback=None, options={'maxiter': self.max_iterations})
sol.t = self.tspan
sol.y, q0, sol.u, sol.dynamical_parameters, sol.nondynamical_parameters = self._unwrap_params(xopt['x'])
if self.number_of_quads > 0:
sol.q = self._integrate(self.quadrature_function, self.derivative_function, sol.y, sol.u,
sol.dynamical_parameters, self.const, self.tspan, q0)
if 'kkt' in xopt:
sol.dual = self._kkt_to_dual(sol, xopt['kkt'][0])
else:
sol.dual = np.ones_like(sol.y)*np.nan
sol.converged = xopt['success']
out = BVPResult(sol=sol, success=xopt['success'], message=xopt['message'],
niter=xopt['nit'])
return out
@staticmethod
def _kkt_to_dual(sol, kkt):
nodes = len(sol.t)
dual = (kkt.reshape((nodes-1, sol.y.shape[1]), order='F').T/(sol.t[:-1]-sol.t[1:])).T
dual = np.vstack((dual, np.zeros(sol.y.shape[1])))
return dual
def _collocation_constraint_path(self, vectorized):
if self.inequality_constraint_function is None:
return ()
y, q0, u, params, nondynamical_params = self._unwrap_params(vectorized)
if u.size > 0:
cp = np.hstack([-self.inequality_constraint_function(y[ii], u[ii], params, self.const)
for ii in range(self.number_of_nodes)])
else:
cp = np.hstack([-self.inequality_constraint_function(y[ii], [], params, self.const)
for ii in range(self.number_of_nodes)])
return cp
def _collocation_constraint_midpoint(self, vectorized):
y, quads0, u, params, nondyn_params = self._unwrap_params(vectorized)
# TODO: Vectorized our code compiler so this line works
# dX1 = np.squeeze(self.derivative_function(y.T, u.T, params, self.const)).T
dX = np.squeeze([self.derivative_function(yi, ui, params, self.const) for yi, ui in zip(y, u)])
if len(dX.shape) == 1:
dX = np.array([dX]).T
dp0 = dX[:-1]
dp1 = dX[1:]
p0 = y[:-1]
p1 = y[1:]
t0 = self.tspan[:-1]
t1 = self.tspan[1:]
u0 = u[:-1]
uf = u[1:]
u_midpoint = (u0 + uf)/2
midpoint_predicted = self._midpoint(p0, p1, dp0, dp1, t0, t1)
midpoint_derivative_predicted = self._midpoint_derivative(p0, p1, dp0, dp1, t0, t1)
# TODO: Vectorize, so this one works as well
midpoint_derivative_actual = np.squeeze(
[self.derivative_function(yi, ui, params, self.const) for yi, ui in zip(midpoint_predicted, u_midpoint)])
if len(midpoint_derivative_actual.shape) == 1:
midpoint_derivative_actual = np.array([midpoint_derivative_actual]).T
outvec = midpoint_derivative_predicted - midpoint_derivative_actual
d2 = outvec.shape[1]
outvec = np.hstack([outvec[:, ii][:] for ii in range(d2)])
return outvec
def _collocation_constraint_boundary(self, vectorized):
y, quads0, u, params, nondyn_params = self._unwrap_params(vectorized)
qf = self._integrate(self.quadrature_function, self.derivative_function,
y, u, params, self.const, self.tspan, quads0)[-1]
return self.boundarycondition_function(y[0], quads0, u[0], y[-1], qf, u[-1], params, nondyn_params, self.const)
def _collocation_cost(self, vectorized):
y, quads0, u, params, nondyn_params = self._unwrap_params(vectorized)
if self.initial_cost_function is not None:
c0 = self.initial_cost_function(y[0], u[0], params, self.const)
else:
c0 = 0
if self.terminal_cost_function is not None:
cf = self.terminal_cost_function(y[-1], u[-1], params, self.const)
else:
cf = 0
cpath = 0
if self.path_cost_function is not None:
cpath = self._integrate(self.path_cost_function, self.derivative_function,
y, u, params, self.const, self.tspan, 0)[-1]
return c0 + cpath + cf
def _unwrap_params(self, vectorized):
X = vectorized[:self.number_of_odes * self.number_of_nodes].reshape([self.number_of_nodes, self.number_of_odes])
vectorized = np.delete(vectorized, np.arange(0, self.number_of_odes * self.number_of_nodes))
quads = vectorized[:self.number_of_quads]
vectorized = np.delete(vectorized, np.arange(0, self.number_of_quads))
u = vectorized[:self.number_of_controls * self.number_of_nodes].reshape([self.number_of_nodes,
self.number_of_controls])
vectorized = np.delete(vectorized, np.arange(0, self.number_of_controls * self.number_of_nodes))
dynamical_params = vectorized[:self.number_of_dynamical_params]
vectorized = np.delete(vectorized, np.arange(0, self.number_of_dynamical_params))
nondynamical_params = vectorized[:self.number_of_nondynamical_params]
# vectorized = np.delete(vectorized, np.arange(0, self.number_of_nondynamical_params))
return X, quads, u, dynamical_params, nondynamical_params
@staticmethod
def _wrap_params(y, q, u, params, nondyn_params):
return np.concatenate((y.flatten(), q[0], u.flatten(), params, nondyn_params))
@staticmethod
def _get_poly_coefficients_1_3(p0, dquads0, dquads12, dquads1):
C1 = p0
C2 = dquads0
C3 = -3/2*dquads0 + 2*dquads12 - 1/2*dquads1
C4 = 2/3*dquads0 - 4/3*dquads12 + 2/3*dquads1
return C1, C2, C3, C4
@staticmethod
def _get_poly_coefficients_2_2(p0, dp0, p1, dp1, h):
C1 = p0
C2 = dp0
C3 = -3*p0/h**2 - 2*dp0/h + 3*p1/h**2 - dp1/h
C4 = 2*p0/h**3 + dp0/h**2 - 2*p1/h**3 + dp1/h**2
return C1, C2, C3, C4
@classmethod
def _get_default_options(cls, options='default'):
""" Default options structure for Collocation. """
if options == 'default':
return [1, 10]
elif options == 'quiet':
return [0, 10]
# TODO: Cythonize _midpoint, _midpoint_derivative, _integrate
@staticmethod
def _midpoint(p0, p1, dp0, dp1, t0, t1):
return (1 / 2 * (p0 + p1).T + (t1 - t0) / 8 * (dp0 - dp1).T).T
@staticmethod
def _midpoint_derivative(p0, p1, dp0, dp1, t0, t1):
return (-3 / 2 * (p0 - p1).T / (t1 - t0) - 1 / 4 * (dp0 + dp1).T).T
def _integrate(self, fun, base, y, u, params, c, t, val0):
val0 = np.array([val0])
dX = np.squeeze([base(yi, ui, params, c) for yi, ui in zip(y, u)])
if len(dX.shape) == 1:
dX = np.array([dX]).T
dp0 = dX[:-1]
dp1 = dX[1:]
p0 = y[:-1]
p1 = y[1:]
t0 = t[:-1]
t1 = t[1:]
u0 = u[:-1]
uf = u[1:]
u_midpoint = (u0 + uf) / 2
y_midpoint = self._midpoint(p0, p1, dp0, dp1, t0, t1)
for ii in range(len(t) - 1):
c0 = fun(y[ii], u[ii], params, c)
c_mid = fun(y_midpoint[ii], u_midpoint[ii], params, c)
c1 = fun(y[ii + 1], u[ii + 1], params, c)
val0 = np.vstack((val0, val0[-1] + (1 / 6 * c0 + 4 / 6 * c_mid + 1 / 6 * c1) * (t[ii + 1] - t[ii])))
return val0
| [
"scipy.optimize.minimize",
"copy.deepcopy",
"numpy.ones_like",
"numpy.zeros",
"beluga.bvpsol.BaseAlgorithm.BaseAlgorithm.__init__",
"numpy.array",
"numpy.arange",
"numpy.linspace",
"numpy.interp",
"beluga.bvpsol.BaseAlgorithm.BVPResult",
"numpy.vstack"
] | [((1695, 1740), 'beluga.bvpsol.BaseAlgorithm.BaseAlgorithm.__init__', 'BaseAlgorithm.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (1717, 1740), False, 'from beluga.bvpsol.BaseAlgorithm import BaseAlgorithm, BVPResult\n'), ((3109, 3131), 'copy.deepcopy', 'copy.deepcopy', (['solinit'], {}), '(solinit)\n', (3122, 3131), False, 'import copy\n'), ((6220, 6502), 'scipy.optimize.minimize', 'minimize', (['self._collocation_cost', 'vectorized'], {'args': '()', 'method': '"""SLSQP"""', 'jac': 'None', 'hessp': 'None', 'bounds': 'None', 'constraints': '[self.constraint_midpoint, self.constraint_boundary, self.constraint_path]', 'tol': 'self.tolerance', 'callback': 'None', 'options': "{'maxiter': self.max_iterations}"}), "(self._collocation_cost, vectorized, args=(), method='SLSQP', jac=\n None, hessp=None, bounds=None, constraints=[self.constraint_midpoint,\n self.constraint_boundary, self.constraint_path], tol=self.tolerance,\n callback=None, options={'maxiter': self.max_iterations})\n", (6228, 6502), False, 'from scipy.optimize import minimize\n'), ((7119, 7211), 'beluga.bvpsol.BaseAlgorithm.BVPResult', 'BVPResult', ([], {'sol': 'sol', 'success': "xopt['success']", 'message': "xopt['message']", 'niter': "xopt['nit']"}), "(sol=sol, success=xopt['success'], message=xopt['message'], niter=\n xopt['nit'])\n", (7128, 7211), False, 'from beluga.bvpsol.BaseAlgorithm import BaseAlgorithm, BVPResult\n'), ((13089, 13105), 'numpy.array', 'np.array', (['[val0]'], {}), '([val0])\n', (13097, 13105), True, 'import numpy as np\n'), ((3341, 3353), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3349, 3353), True, 'import numpy as np\n'), ((3614, 3653), 'numpy.linspace', 'np.linspace', (['sol.t[0]', 'sol.t[-1]'], {'num': '(4)'}), '(sol.t[0], sol.t[-1], num=4)\n', (3625, 3653), True, 'import numpy as np\n'), ((4343, 4397), 'numpy.linspace', 'np.linspace', (['sol.t[0]', 'sol.t[-1]', 'self.number_of_nodes'], {}), '(sol.t[0], sol.t[-1], self.number_of_nodes)\n', (4354, 4397), True, 'import numpy as np\n'), ((5685, 5715), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float64'}), '([], dtype=np.float64)\n', (5693, 5715), True, 'import numpy as np\n'), ((5807, 5837), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float64'}), '([], dtype=np.float64)\n', (5815, 5837), True, 'import numpy as np\n'), ((10828, 10884), 'numpy.arange', 'np.arange', (['(0)', '(self.number_of_odes * self.number_of_nodes)'], {}), '(0, self.number_of_odes * self.number_of_nodes)\n', (10837, 10884), True, 'import numpy as np\n'), ((10980, 11014), 'numpy.arange', 'np.arange', (['(0)', 'self.number_of_quads'], {}), '(0, self.number_of_quads)\n', (10989, 11014), True, 'import numpy as np\n'), ((11270, 11330), 'numpy.arange', 'np.arange', (['(0)', '(self.number_of_controls * self.number_of_nodes)'], {}), '(0, self.number_of_controls * self.number_of_nodes)\n', (11279, 11330), True, 'import numpy as np\n'), ((11448, 11493), 'numpy.arange', 'np.arange', (['(0)', 'self.number_of_dynamical_params'], {}), '(0, self.number_of_dynamical_params)\n', (11457, 11493), True, 'import numpy as np\n'), ((13726, 13824), 'numpy.vstack', 'np.vstack', (['(val0, val0[-1] + (1 / 6 * c0 + 4 / 6 * c_mid + 1 / 6 * c1) * (t[ii + 1] -\n t[ii]))'], {}), '((val0, val0[-1] + (1 / 6 * c0 + 4 / 6 * c_mid + 1 / 6 * c1) * (t[\n ii + 1] - t[ii])))\n', (13735, 13824), True, 'import numpy as np\n'), ((3949, 3961), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3957, 3961), True, 'import numpy as np\n'), ((4149, 4161), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4157, 4161), True, 'import numpy as np\n'), ((4540, 4562), 'numpy.vstack', 'np.vstack', (['(new_y, yy)'], {}), '((new_y, yy))\n', (4549, 4562), True, 'import numpy as np\n'), ((4587, 4609), 'numpy.vstack', 'np.vstack', (['(new_q, qq)'], {}), '((new_q, qq))\n', (4596, 4609), True, 'import numpy as np\n'), ((4634, 4656), 'numpy.vstack', 'np.vstack', (['(new_u, uu)'], {}), '((new_u, uu))\n', (4643, 4656), True, 'import numpy as np\n'), ((7036, 7055), 'numpy.ones_like', 'np.ones_like', (['sol.y'], {}), '(sol.y)\n', (7048, 7055), True, 'import numpy as np\n'), ((7455, 7479), 'numpy.zeros', 'np.zeros', (['sol.y.shape[1]'], {}), '(sol.y.shape[1])\n', (7463, 7479), True, 'import numpy as np\n'), ((8545, 8559), 'numpy.array', 'np.array', (['[dX]'], {}), '([dX])\n', (8553, 8559), True, 'import numpy as np\n'), ((9251, 9289), 'numpy.array', 'np.array', (['[midpoint_derivative_actual]'], {}), '([midpoint_derivative_actual])\n', (9259, 9289), True, 'import numpy as np\n'), ((13229, 13243), 'numpy.array', 'np.array', (['[dX]'], {}), '([dX])\n', (13237, 13243), True, 'import numpy as np\n'), ((3691, 3728), 'numpy.interp', 'np.interp', (['t_new', 'sol.t', 'sol.y[:, ii]'], {}), '(t_new, sol.t, sol.y[:, ii])\n', (3700, 3728), True, 'import numpy as np\n'), ((5488, 5500), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5496, 5500), True, 'import numpy as np\n'), ((3835, 3872), 'numpy.interp', 'np.interp', (['t_new', 'sol.t', 'sol.q[:, ii]'], {}), '(t_new, sol.t, sol.q[:, ii])\n', (3844, 3872), True, 'import numpy as np\n'), ((4035, 4072), 'numpy.interp', 'np.interp', (['t_new', 'sol.t', 'sol.q[:, ii]'], {}), '(t_new, sol.t, sol.q[:, ii])\n', (4044, 4072), True, 'import numpy as np\n')] |
import numpy as np
from menpo.image import Image
from menpofit.base import name_of_callable
from menpofit.aam.fitter import AAMFitter
from menpofit.clm.fitter import CLMFitter
from menpofit.fitter import MultilevelFitter
class SDFitter(MultilevelFitter):
r"""
Abstract Supervised Descent Fitter.
"""
def _set_up(self):
r"""
Sets up the SD fitter object.
"""
def fit(self, image, initial_shape, max_iters=None, gt_shape=None,
**kwargs):
r"""
Fits a single image.
Parameters
-----------
image : :map:`MaskedImage`
The image to be fitted.
initial_shape : :map:`PointCloud`
The initial shape estimate from which the fitting procedure
will start.
max_iters : int or `list`, optional
The maximum number of iterations.
If `int`, then this will be the overall maximum number of iterations
for all the pyramidal levels.
If `list`, then a maximum number of iterations is specified for each
pyramidal level.
gt_shape : :map:`PointCloud`
The ground truth shape of the image.
**kwargs : `dict`
optional arguments to be passed through.
Returns
-------
fitting_list : :map:`FittingResultList`
A fitting result object.
"""
if max_iters is None:
max_iters = self.n_levels
return MultilevelFitter.fit(self, image, initial_shape,
max_iters=max_iters, gt_shape=gt_shape,
**kwargs)
class SDMFitter(SDFitter):
r"""
Supervised Descent Method.
Parameters
-----------
regressors : :map:`RegressorTrainer`
The trained regressors.
n_training_images : `int`
The number of images that were used to train the SDM fitter. It is
only used for informational reasons.
features : `callable` or ``[callable]``, optional
If list of length ``n_levels``, feature extraction is performed at
each level after downscaling of the image.
The first element of the list specifies the features to be extracted at
the lowest pyramidal level and so on.
If ``callable`` the specified feature will be applied to the original
image and pyramid generation will be performed on top of the feature
image. Also see the `pyramid_on_features` property.
reference_shape : :map:`PointCloud`
The reference shape that was used to resize all training images to a
consistent object size.
downscale : `float`
The downscale factor that will be used to create the different
pyramidal levels. The scale factor will be::
(downscale ** k) for k in range(n_levels)
References
----------
.. [XiongD13] Supervised Descent Method and its Applications to
Face Alignment
<NAME> and <NAME>
IEEE International Conference on Computer Vision and Pattern Recognition
May, 2013
"""
def __init__(self, regressors, n_training_images, features,
reference_shape, downscale):
self._fitters = regressors
self._features = features
self._reference_shape = reference_shape
self._downscale = downscale
self._n_training_images = n_training_images
@property
def algorithm(self):
r"""
Returns a string containing the algorithm used from the SDM family.
: str
"""
return 'SDM-' + self._fitters[0].algorithm
@property
def reference_shape(self):
r"""
The reference shape used during training.
:type: :map:`PointCloud`
"""
return self._reference_shape
@property
def features(self):
r"""
The feature type per pyramid level. Note that they are stored from
lowest to highest level resolution.
:type: `list`
"""
return self._features
@property
def n_levels(self):
r"""
The number of pyramidal levels used during training.
: int
"""
return len(self._fitters)
@property
def downscale(self):
r"""
The downscale per pyramidal level used during building the AAM.
The scale factor is: (downscale ** k) for k in range(n_levels)
:type: `float`
"""
return self._downscale
def __str__(self):
out = "Supervised Descent Method\n" \
" - Non-Parametric '{}' Regressor\n" \
" - {} training images.\n".format(
name_of_callable(self._fitters[0].regressor),
self._n_training_images)
# small strings about number of channels, channels string and downscale
down_str = []
for j in range(self.n_levels):
if j == self.n_levels - 1:
down_str.append('(no downscale)')
else:
down_str.append('(downscale by {})'.format(
self.downscale**(self.n_levels - j - 1)))
temp_img = Image(image_data=np.random.rand(40, 40))
if self.pyramid_on_features:
temp = self.features(temp_img)
n_channels = [temp.n_channels] * self.n_levels
else:
n_channels = []
for j in range(self.n_levels):
temp = self.features[j](temp_img)
n_channels.append(temp.n_channels)
# string about features and channels
if self.pyramid_on_features:
feat_str = "- Feature is {} with ".format(
name_of_callable(self.features))
if n_channels[0] == 1:
ch_str = ["channel"]
else:
ch_str = ["channels"]
else:
feat_str = []
ch_str = []
for j in range(self.n_levels):
if isinstance(self.features[j], str):
feat_str.append("- Feature is {} with ".format(
self.features[j]))
elif self.features[j] is None:
feat_str.append("- No features extracted. ")
else:
feat_str.append("- Feature is {} with ".format(
self.features[j].__name__))
if n_channels[j] == 1:
ch_str.append("channel")
else:
ch_str.append("channels")
if self.n_levels > 1:
out = "{} - Gaussian pyramid with {} levels and downscale " \
"factor of {}.\n".format(out, self.n_levels,
self.downscale)
if self.pyramid_on_features:
out = "{} - Pyramid was applied on feature space.\n " \
"{}{} {} per image.\n".format(out, feat_str,
n_channels[0], ch_str[0])
else:
out = "{} - Features were extracted at each pyramid " \
"level.\n".format(out)
for i in range(self.n_levels - 1, -1, -1):
out = "{} - Level {} {}: \n {}{} {} per " \
"image.\n".format(
out, self.n_levels - i, down_str[i], feat_str[i],
n_channels[i], ch_str[i])
else:
if self.pyramid_on_features:
feat_str = [feat_str]
out = "{0} - No pyramid used:\n {1}{2} {3} per image.\n".format(
out, feat_str[0], n_channels[0], ch_str[0])
return out
class SDAAMFitter(AAMFitter, SDFitter):
r"""
Supervised Descent Fitter for AAMs.
Parameters
-----------
aam : :map:`AAM`
The Active Appearance Model to be used.
regressors : :map:``RegressorTrainer`
The trained regressors.
n_training_images : `int`
The number of training images used to train the SDM fitter.
"""
def __init__(self, aam, regressors, n_training_images):
super(SDAAMFitter, self).__init__(aam)
self._fitters = regressors
self._n_training_images = n_training_images
@property
def algorithm(self):
r"""
Returns a string containing the algorithm used from the SDM family.
:type: `string`
"""
return 'SD-AAM-' + self._fitters[0].algorithm
def __str__(self):
return "{}Supervised Descent Method for AAMs:\n" \
" - Parametric '{}' Regressor\n" \
" - {} training images.\n".format(
self.aam.__str__(), name_of_callable(self._fitters[0].regressor),
self._n_training_images)
class SDCLMFitter(CLMFitter, SDFitter):
r"""
Supervised Descent Fitter for CLMs.
Parameters
-----------
clm : :map:`CLM`
The Constrained Local Model to be used.
regressors : :map:`RegressorTrainer`
The trained regressors.
n_training_images : `int`
The number of training images used to train the SDM fitter.
References
----------
.. [Asthana13] Robust Discriminative Response Map Fitting with Constrained
Local Models
<NAME>, <NAME>, <NAME>, <NAME>.
IEEE Conference onComputer Vision and Pattern Recognition.
Portland, Oregon, USA, June 2013.
"""
def __init__(self, clm, regressors, n_training_images):
super(SDCLMFitter, self).__init__(clm)
self._fitters = regressors
self._n_training_images = n_training_images
@property
def algorithm(self):
r"""
Returns a string containing the algorithm used from the SDM family.
:type: `string`
"""
return 'SD-CLM-' + self._fitters[0].algorithm
def __str__(self):
return "{}Supervised Descent Method for CLMs:\n" \
" - Parametric '{}' Regressor\n" \
" - {} training images.\n".format(
self.clm.__str__(), name_of_callable(self._fitters[0].regressor),
self._n_training_images)
| [
"numpy.random.rand",
"menpofit.fitter.MultilevelFitter.fit",
"menpofit.base.name_of_callable"
] | [((1494, 1596), 'menpofit.fitter.MultilevelFitter.fit', 'MultilevelFitter.fit', (['self', 'image', 'initial_shape'], {'max_iters': 'max_iters', 'gt_shape': 'gt_shape'}), '(self, image, initial_shape, max_iters=max_iters,\n gt_shape=gt_shape, **kwargs)\n', (1514, 1596), False, 'from menpofit.fitter import MultilevelFitter\n'), ((4689, 4733), 'menpofit.base.name_of_callable', 'name_of_callable', (['self._fitters[0].regressor'], {}), '(self._fitters[0].regressor)\n', (4705, 4733), False, 'from menpofit.base import name_of_callable\n'), ((8696, 8740), 'menpofit.base.name_of_callable', 'name_of_callable', (['self._fitters[0].regressor'], {}), '(self._fitters[0].regressor)\n', (8712, 8740), False, 'from menpofit.base import name_of_callable\n'), ((10057, 10101), 'menpofit.base.name_of_callable', 'name_of_callable', (['self._fitters[0].regressor'], {}), '(self._fitters[0].regressor)\n', (10073, 10101), False, 'from menpofit.base import name_of_callable\n'), ((5178, 5200), 'numpy.random.rand', 'np.random.rand', (['(40)', '(40)'], {}), '(40, 40)\n', (5192, 5200), True, 'import numpy as np\n'), ((5680, 5711), 'menpofit.base.name_of_callable', 'name_of_callable', (['self.features'], {}), '(self.features)\n', (5696, 5711), False, 'from menpofit.base import name_of_callable\n')] |
import groot.datasets as datasets_module
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import numpy as np
datasets = ["banknote-authentication", "blood-transfusion", "breast-cancer", "cylinder-bands", "diabetes", "haberman", "ionosphere", "wine"]
data_dir = "data/"
for dataset in datasets:
# From the groot.datasets module, find the function with name load_<selected_dataset> then execute it
X, y = getattr(datasets_module, f"load_{dataset.replace('-', '_')}")()[1:3]
scaler = MinMaxScaler()
# Perform a 80%/20% train/test split
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=0.2, random_state=0)
X_train = scaler.fit_transform(X_train)
# Make sure not to cheat on the test set when scaling
X_test = np.clip(scaler.transform(X_test), 0.0, 1.0)
np.save(data_dir + f"X_train_{dataset}.npy", X_train, allow_pickle=False)
np.save(data_dir + f"X_test_{dataset}.npy", X_test, allow_pickle=False)
np.save(data_dir + f"y_train_{dataset}.npy", y_train, allow_pickle=False)
np.save(data_dir + f"y_test_{dataset}.npy", y_test, allow_pickle=False)
| [
"sklearn.model_selection.train_test_split",
"numpy.save",
"sklearn.preprocessing.MinMaxScaler"
] | [((548, 562), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (560, 562), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((644, 709), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'stratify': 'y', 'test_size': '(0.2)', 'random_state': '(0)'}), '(X, y, stratify=y, test_size=0.2, random_state=0)\n', (660, 709), False, 'from sklearn.model_selection import train_test_split\n'), ((875, 948), 'numpy.save', 'np.save', (["(data_dir + f'X_train_{dataset}.npy')", 'X_train'], {'allow_pickle': '(False)'}), "(data_dir + f'X_train_{dataset}.npy', X_train, allow_pickle=False)\n", (882, 948), True, 'import numpy as np\n'), ((953, 1024), 'numpy.save', 'np.save', (["(data_dir + f'X_test_{dataset}.npy')", 'X_test'], {'allow_pickle': '(False)'}), "(data_dir + f'X_test_{dataset}.npy', X_test, allow_pickle=False)\n", (960, 1024), True, 'import numpy as np\n'), ((1030, 1103), 'numpy.save', 'np.save', (["(data_dir + f'y_train_{dataset}.npy')", 'y_train'], {'allow_pickle': '(False)'}), "(data_dir + f'y_train_{dataset}.npy', y_train, allow_pickle=False)\n", (1037, 1103), True, 'import numpy as np\n'), ((1108, 1179), 'numpy.save', 'np.save', (["(data_dir + f'y_test_{dataset}.npy')", 'y_test'], {'allow_pickle': '(False)'}), "(data_dir + f'y_test_{dataset}.npy', y_test, allow_pickle=False)\n", (1115, 1179), True, 'import numpy as np\n')] |
# Dataset Size: 46469
# Unique Labels: 195
# Dataset Shape: (46469, 89, 89)
# Labels Shape: (46469,)
import gdown
import numpy as np
import os
np.random.seed(1234)
def get_raw_dataset():
url = 'https://drive.google.com/uc?id=1-IqQIFQ8X2wM0KU1qyIYZOvur8C_KdVh'
output = '../output/dataset.txt'
gdown.download(url, output)
def get_learning_dataset(is_downloaded=False):
data_output = '../output/data.npy'
labels_output = '../output/labels.npy'
if is_downloaded is False:
data_url = 'https://drive.google.com/uc?id=1-91tO2R6_TKBTYAe0o4DnRQGqbUcmoyk'
labels_url = 'https://drive.google.com/uc?id=1hx1Li3u-R_EbpJttZtrcgBzkC9tX4aCj'
gdown.download(data_url, data_output)
gdown.download(labels_url, labels_output)
data = np.load(data_output, allow_pickle=True)
labels = np.load(labels_output, allow_pickle=True)
train_frac = 0.8
training_data, testing_data = data[:int(len(data) * train_frac)], data[int(len(data) * train_frac):]
training_labels, testing_labels = labels[:int(len(labels) * train_frac)], labels[int(len(labels) * train_frac):]
true_training_frac = 0.9
real_training, dev_data = training_data[:int(len(training_data) * true_training_frac)], data[
int(len(
training_data) * true_training_frac):]
real_training_labels, dev_labels = training_labels[:int(len(training_labels) * true_training_frac)], labels[int(
len(training_labels) * true_training_frac):]
print('Train Size: {} with shape: {}'.format(len(real_training), real_training.shape))
print('Dev Size: {} with shape: {}'.format(len(dev_data), dev_data.shape))
print('Test Size: {} with shape: {}'.format(len(testing_data), testing_data.shape))
return (real_training, real_training_labels), (dev_data, dev_labels), (testing_data, testing_labels)
def get_grouped_labels():
train_group_out = '../output/grouped_train_labels.npy'
dev_group_out = '../output/grouped_dev_labels.npy'
test_group_out = '../output/grouped_test_labels.npy'
train_ = 'https://drive.google.com/uc?id=1lWsvwDWQ_syd2bWgnkyWze6BjzVvWe57'
dev_ = 'https://drive.google.com/uc?id=17VAlNtL4hHcBH3Ibs2emzxnBTbdxoM0O'
test_ = 'https://drive.google.com/uc?id=1kmuf84Le-vER4IolgJCfZPy-1J-NSNtr'
if (not os.path.exists(train_group_out)) and (not os.path.exists(dev_group_out)) and (
not os.path.exists(test_group_out)):
print('Downloading Grouped Labels to ../output/')
gdown.download(train_, train_group_out)
gdown.download(dev_, dev_group_out)
gdown.download(test_, test_group_out)
def main():
save = False
data_file = '../output/data.npy'
labels_file = '../output/labels.npy'
if os.path.exists(data_file) and os.path.exists(labels_file):
print('Data and Labels files found. Loading from local disk')
train, dev, test = get_learning_dataset(is_downloaded=True)
else:
save = True
train, dev, test = get_learning_dataset()
get_grouped_labels()
train_set_data, train_set_label = train[0], train[1]
dev_set_data, dev_set_label = dev[0], dev[1]
test_set_data, test_set_label = test[0], test[1]
if save:
# Save the train, test and dev numpy files
print('Saving Files ...')
np.save('../output/train.npy', train_set_data, allow_pickle=True)
np.save('../output/train_labels.npy', train_set_label, allow_pickle=True)
np.save('../output/dev.npy', dev_set_data, allow_pickle=True)
np.save('../output/dev_labels.npy', dev_set_label, allow_pickle=True)
np.save('../output/test.npy', test_set_data, allow_pickle=True)
np.save('../output/test_labels.npy', test_set_label, allow_pickle=True)
print('All Files Saved Successfully!')
if __name__ == '__main__':
main()
| [
"numpy.load",
"numpy.save",
"numpy.random.seed",
"gdown.download",
"os.path.exists"
] | [((145, 165), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (159, 165), True, 'import numpy as np\n'), ((309, 336), 'gdown.download', 'gdown.download', (['url', 'output'], {}), '(url, output)\n', (323, 336), False, 'import gdown\n'), ((783, 822), 'numpy.load', 'np.load', (['data_output'], {'allow_pickle': '(True)'}), '(data_output, allow_pickle=True)\n', (790, 822), True, 'import numpy as np\n'), ((836, 877), 'numpy.load', 'np.load', (['labels_output'], {'allow_pickle': '(True)'}), '(labels_output, allow_pickle=True)\n', (843, 877), True, 'import numpy as np\n'), ((683, 720), 'gdown.download', 'gdown.download', (['data_url', 'data_output'], {}), '(data_url, data_output)\n', (697, 720), False, 'import gdown\n'), ((729, 770), 'gdown.download', 'gdown.download', (['labels_url', 'labels_output'], {}), '(labels_url, labels_output)\n', (743, 770), False, 'import gdown\n'), ((2667, 2706), 'gdown.download', 'gdown.download', (['train_', 'train_group_out'], {}), '(train_, train_group_out)\n', (2681, 2706), False, 'import gdown\n'), ((2715, 2750), 'gdown.download', 'gdown.download', (['dev_', 'dev_group_out'], {}), '(dev_, dev_group_out)\n', (2729, 2750), False, 'import gdown\n'), ((2759, 2796), 'gdown.download', 'gdown.download', (['test_', 'test_group_out'], {}), '(test_, test_group_out)\n', (2773, 2796), False, 'import gdown\n'), ((2915, 2940), 'os.path.exists', 'os.path.exists', (['data_file'], {}), '(data_file)\n', (2929, 2940), False, 'import os\n'), ((2945, 2972), 'os.path.exists', 'os.path.exists', (['labels_file'], {}), '(labels_file)\n', (2959, 2972), False, 'import os\n'), ((3488, 3553), 'numpy.save', 'np.save', (['"""../output/train.npy"""', 'train_set_data'], {'allow_pickle': '(True)'}), "('../output/train.npy', train_set_data, allow_pickle=True)\n", (3495, 3553), True, 'import numpy as np\n'), ((3562, 3635), 'numpy.save', 'np.save', (['"""../output/train_labels.npy"""', 'train_set_label'], {'allow_pickle': '(True)'}), "('../output/train_labels.npy', train_set_label, allow_pickle=True)\n", (3569, 3635), True, 'import numpy as np\n'), ((3644, 3705), 'numpy.save', 'np.save', (['"""../output/dev.npy"""', 'dev_set_data'], {'allow_pickle': '(True)'}), "('../output/dev.npy', dev_set_data, allow_pickle=True)\n", (3651, 3705), True, 'import numpy as np\n'), ((3714, 3783), 'numpy.save', 'np.save', (['"""../output/dev_labels.npy"""', 'dev_set_label'], {'allow_pickle': '(True)'}), "('../output/dev_labels.npy', dev_set_label, allow_pickle=True)\n", (3721, 3783), True, 'import numpy as np\n'), ((3792, 3855), 'numpy.save', 'np.save', (['"""../output/test.npy"""', 'test_set_data'], {'allow_pickle': '(True)'}), "('../output/test.npy', test_set_data, allow_pickle=True)\n", (3799, 3855), True, 'import numpy as np\n'), ((3864, 3935), 'numpy.save', 'np.save', (['"""../output/test_labels.npy"""', 'test_set_label'], {'allow_pickle': '(True)'}), "('../output/test_labels.npy', test_set_label, allow_pickle=True)\n", (3871, 3935), True, 'import numpy as np\n'), ((2473, 2504), 'os.path.exists', 'os.path.exists', (['train_group_out'], {}), '(train_group_out)\n', (2487, 2504), False, 'import os\n'), ((2515, 2544), 'os.path.exists', 'os.path.exists', (['dev_group_out'], {}), '(dev_group_out)\n', (2529, 2544), False, 'import os\n'), ((2568, 2598), 'os.path.exists', 'os.path.exists', (['test_group_out'], {}), '(test_group_out)\n', (2582, 2598), False, 'import os\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
import cv2 as cv
import numpy as np
import onnxruntime
class MiDaSPredictor(object):
def __init__(
self,
model_path='midas_predictor/midas_v2_1_small.onnx',
model_type='small',
):
if model_type == "large":
self._net_w, self._net_h = 384, 384
elif model_type == "small":
self._net_w, self._net_h = 256, 256
else:
print(f"model_type '{model_type}' not implemented")
assert False
self._model = onnxruntime.InferenceSession(model_path)
self._input_name = self._model.get_inputs()[0].name
self._output_name = self._model.get_outputs()[0].name
def __call__(
self,
image,
):
x = copy.deepcopy(image)
x = cv.resize(x, (self._net_h, self._net_w))
x = x[:, :, [2, 1, 0]] # BGR2RGB
x = x.reshape(1, self._net_h, self._net_w, 3)
x = x.astype('float32')
x /= 255.0
result = self._model.run([self._output_name], {self._input_name: x})[0]
result = np.array(result).reshape(self._net_h, self._net_w)
return result
| [
"onnxruntime.InferenceSession",
"copy.deepcopy",
"numpy.array",
"cv2.resize"
] | [((565, 605), 'onnxruntime.InferenceSession', 'onnxruntime.InferenceSession', (['model_path'], {}), '(model_path)\n', (593, 605), False, 'import onnxruntime\n'), ((795, 815), 'copy.deepcopy', 'copy.deepcopy', (['image'], {}), '(image)\n', (808, 815), False, 'import copy\n'), ((829, 869), 'cv2.resize', 'cv.resize', (['x', '(self._net_h, self._net_w)'], {}), '(x, (self._net_h, self._net_w))\n', (838, 869), True, 'import cv2 as cv\n'), ((1115, 1131), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (1123, 1131), True, 'import numpy as np\n')] |
from CGATReport.Tracker import *
import numpy as np
class AlignmentSummary(TrackerSQL):
'''
class to collect the alignment statistics from
picard
'''
def __call__(self, track, slice=None):
return self.getAll("""SELECT * FROM picard_stats_alignment_summary_metrics""")
class ReadsAligned(TrackerSQL):
'''
percent reads aligned
'''
def __call__(self, track, slice=None):
result = {}
for data in self.execute("""SELECT track, PCT_PF_READS_ALIGNED FROM picard_stats_alignment_summary_metrics"""):
result[data[0]] = data[1]
return result
class ReadsAlignedInPairs(TrackerSQL):
'''
percent reads aligned in pairs
'''
def __call__(self, track, slice=None):
result = {}
for data in self.execute("""SELECT track, PCT_READS_ALIGNED_IN_PAIRS FROM picard_stats_alignment_summary_metrics"""):
result[data[0]] = data[1]
return result
class MismatchRate(TrackerSQL):
'''
percent reads aligned in pairs
'''
def __call__(self, track, slice=None):
result = {}
for data in self.execute("""SELECT track, PF_MISMATCH_RATE FROM picard_stats_alignment_summary_metrics"""):
result[data[0]] = data[1]
return result
class InsertSizeSummary(TrackerSQL):
'''
class to collect the insert size statistics from
picard
'''
def __call__(self, track, slice=None):
return self.getAll("""SELECT * FROM picard_stats_insert_size_metrics""")
class CoverageSd(TrackerSQL):
'''
class to collect data on the standard deviation of base coverage
across contigs
'''
pattern = "(.*)_coverage_stats"
def __call__(self, track, slice=None):
result = []
for data in self.execute("""SELECT cov_sd FROM %(track)s_coverage_stats WHERE cov_sd > 0""" % locals()).fetchall():
result.append(data[0])
return np.log2(result)
class CoverageMean(TrackerSQL):
'''
class to collect data on the standard deviation of base coverage
across contigs
'''
pattern = "(.*)_coverage_stats"
def __call__(self, track, slice=None):
result = []
for data in self.execute("""SELECT cov_mean FROM %(track)s_coverage_stats WHERE cov_mean > 0""" % locals()).fetchall():
result.append(data[0])
return np.log2(result)
| [
"numpy.log2"
] | [((1949, 1964), 'numpy.log2', 'np.log2', (['result'], {}), '(result)\n', (1956, 1964), True, 'import numpy as np\n'), ((2383, 2398), 'numpy.log2', 'np.log2', (['result'], {}), '(result)\n', (2390, 2398), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 23 15:47:55 2020
Analysing the DJS of Fukuchi's paper
@author: nikorose
"""
from DJSFunctions import extract_preprocess_data, ankle_DJS
from plot_dynamics import plot_ankle_DJS
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from scipy import stats
from utilities_QS import multi_idx, create_df, best_hyper, change_labels
#stats
import scipy.stats as stats
import researchpy as rp
import statsmodels.api as sm
from statsmodels.formula.api import ols
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.multicomp import MultiComparison
import seaborn as sns
from scipy.stats.mstats import kruskal
import scikit_posthocs as sp
# =============================================================================
# Settings
# =============================================================================
instances = False
Age_group = False
Age_group_mod = False
Age_group_gen = False
Gender = False
individuals = True
statistics = True
sns.set_context('paper', font_scale=1.5)
sns.set_style("whitegrid")
# =============================================================================
# Helper functions
# =============================================================================
def power(col):
return col['RAnkleMomentZ']*col['Angular vel [deg / GC]']
# =============================================================================
# Charging the folders
# =============================================================================
root_dir = os.getcwd()
info_dir = '/home/nikorose/<EMAIL>/Tesis de Doctorado/Gait Analysis Data/' \
+ 'Downloaded/Fukuchi/TreadmillAndOverground/Dataset'
Fukuchi_df = pd.read_csv('Fukuchi/Fukuchi_mean.csv', header=[0,1], index_col=[0,1])
#Take care when filtering specific columns
idx = pd.IndexSlice
Fukuchi_df = Fukuchi_df.loc[idx[['RAnkleAngleZ', 'RAnkleMomentZ', 'RGRFY'],:],:]
# =============================================================================
# Adding power column by calculating through moment and ang vel dot product
# =============================================================================
#According to Winter https://ouhsc.edu/bserdac/dthompso/web/gait/epow/pow1.htm
#in order to calculate the power plot we should do the following:
#Obtain the derivative of the angle plot to obtain angular velocity
#make the product between the moment and the ang vel, then P=Mw
ang_vel = Fukuchi_df.loc['RAnkleAngleZ',:].apply(lambda x: np.gradient(x), axis=0)
ang_vel = multi_idx('Angular vel [deg / GC]', ang_vel)
Fukuchi_df = pd.concat([Fukuchi_df, ang_vel], axis=0)
power_df = Fukuchi_df.apply(power, axis=0)
power_df = multi_idx('Ankle Power [W]', power_df)
Fukuchi_df = pd.concat([Fukuchi_df, power_df], axis=0)
#Removing angular vel
Fukuchi_df = Fukuchi_df.drop(['Angular vel [deg / GC]'], axis =0)
#Replacing index names to be similar with Ferrarin
idx_names = ['Ankle Dorsi/Plantarflexion ', 'Ankle Dorsi/Plantarflexion',
'Vertical Force', 'Ankle'][::-1]
idx_old = list(Fukuchi_df.index.get_level_values(0).unique())
for num, name in enumerate(idx_names[::-1]):
Fukuchi_df.index = Fukuchi_df.index.set_levels(\
Fukuchi_df.index.levels[0].str.replace(idx_old[num], name), level=0)
# =============================================================================
# Performing the Shapiro Wilk test per category in order to see normal distributions
# =============================================================================
#Removing nan due to shapiro is sensitive to it.
Fukuchi_df_nan = Fukuchi_df.dropna(axis=1, how='all')
#In order to fill the few nan spaces
Fukuchi_df_nan = Fukuchi_df_nan.interpolate(axis=1)
shapiro = {}
for col in Fukuchi_df.columns.get_level_values(1).unique():
shapiro_t = pd.DataFrame(Fukuchi_df_nan.loc[:, idx[:,col]].apply(stats.shapiro, axis=1),
columns= ['res'])
shapiro_t['stats'] = shapiro_t.apply(lambda x: x.res[0], axis=1)
shapiro_t['p value'] = shapiro_t.apply(lambda x: x.res[1], axis=1)
shapiro_t = shapiro_t.drop(['res'], axis=1)
shapiro.update({'{}'.format(col):shapiro_t})
resume_shapiro = pd.concat([item.mean(level=0) for item in shapiro.values()], axis=1)
# =============================================================================
# Plotting dynamic parameters
# =============================================================================
# plot_dyn_gen = plot_dynamic(SD=True, save=True, plt_style='bmh')
# # # Plotting power information
# fig1 = plot_dyn_gen.gait_plot(Fukuchi_df,
# cols = np.r_[0:1],
# rows = np.r_[0:3],
# title='Ankle Dynamics Right Foot Fukuchi data at instances')
# =============================================================================
# Anthropometric information
meta_info = pd.read_excel('Fukuchi/WBDSinfo.xlsx')
meta_info = meta_info.drop(meta_info.iloc[:, [0,-1]].columns, axis=1)
anthro_info = meta_info.iloc[:,np.r_[2:6]]
anthro_info = anthro_info[['Mass','Age','Gender', 'Height']]
anthro_info.columns = ['mass','age','gender', 'height']
# =============================================================================
# we are going to analyze only angles and kinetic information
# =============================================================================
processed_dir = info_dir + '/WBDSascii'
# Index where the dynamic information is
index_angtxt = [j for j, i in enumerate(meta_info['FileName']) if 'ang.txt' in i]
index_knttxt = [j for j, i in enumerate(meta_info['FileName']) if 'knt.txt' in i]
#Index labels
labels_ang = meta_info['FileName'][index_angtxt]
labels_knt = meta_info['FileName'][index_knttxt]
meta_info_red = meta_info.filter(index_angtxt, axis=0)
meta_info_red['Subject'] = Fukuchi_df.columns.get_level_values(0)
meta_info_red['Mode'] = Fukuchi_df.columns.get_level_values(1)
# =============================================================================
# Obtaining the gait speed on each
# ============================================================================
#replacing with Nan
meta_info_valid = meta_info.replace('--', np.nan)
#dropping non nan values
meta_info_valid = meta_info_valid.dropna(axis=0, subset= ['GaitSpeed(m/s)'])
#Dropping non useful columns
meta_info_valid = meta_info_valid.dropna(axis=1)
# Extracting only the subject
meta_info_valid['Subject'] = meta_info_valid['FileName'].apply(lambda x: x[3:6])
#Index ending in .c3d
index_c3d = [j for j, i in zip(meta_info_valid.index, meta_info_valid['FileName']) if '.c3d' in i]
#Erasing all non c3d indexes
meta_info_valid = meta_info_valid.loc[index_c3d]
# Extracting only the trial
meta_info_valid['Trial'] = meta_info_valid['FileName'].apply(lambda x: x[10:-4])
#Mode column
meta_info_valid['Mode'] = meta_info_valid['Trial'].apply(lambda x: x[0])
meta_info_valid['Mode'] = meta_info_valid['Mode'].replace(['T', 'O'], ['Treadmill', 'Overground'])
#Type column
meta_info_valid['Type'] = meta_info_valid['Trial'].apply(lambda x: x[-1])
#Obtaining fraude number
meta_info_valid['Fraude'] = meta_info_valid['GaitSpeed(m/s)']/(np.sqrt(9.81*meta_info_valid['LegLength']))
#Obtaining Stansfiels number
meta_info_valid['Stansfield'] = 100*meta_info_valid['GaitSpeed(m/s)']/meta_info_valid['Height']
#Fraude vs Stansfield proportion Females: 0.5617, Male:0.5856, overall: 0.5747
# =============================================================================
# Building filters by speed
# =============================================================================
meta_very_slow = meta_info_valid.query("Mode == 'Overground' & Fraude < 0.227")
meta_slow = meta_info_valid.query("Mode == 'Overground' & Fraude >= 0.227 & Fraude < 0.363")
meta_free = meta_info_valid.query("Mode == 'Overground' & Fraude >= 0.363 & Fraude < 0.5")
meta_fast = meta_info_valid.query("Mode == 'Overground' & Fraude >= 0.5 & Fraude < 0.636")
meta_very_fast = meta_info_valid.query("Mode == 'Overground' & Fraude > 0.636")
#Unique trials
unique_type = meta_info_valid['Type'].unique()
mean_T = {i:[np.round(meta_info_valid.query("Type == '{}'".format(i))['Fraude'].mean(),2)] for i in unique_type}
std_T = {i:np.round(meta_info_valid.query("Type == '{}'".format(i))['Fraude'].std(),2) for i in unique_type}
for key in mean_T.keys():
mean_T[key].append(std_T[key]) #Including std in mean
new_dict_labels= ['T01','T02','T03','T04','T05','T06','T07','T08','OS','OC','OF']
vel_labels = [r'$v*={}({})$'.format(i,j) for i,j in mean_T.values()]
for num, old_key in enumerate(mean_T.keys()):
mean_T[new_dict_labels[num]] = mean_T.pop(old_key)
#As is the same and generate some errors
vel_labels[-3] = '$v*=0.30(0.04)$'
#Reordering Fukuchi_df_nan
Fukuchi_df_nan = Fukuchi_df_nan.reindex(new_dict_labels, level=1, axis=1)
#Params for all comparatives plots
color_labels = ['blue','red','green','violet','orange','grey','goldenrod']
color_regs = ['dark'+i for i in color_labels]
Colors_tab = [i[1] for i in mcolors.TABLEAU_COLORS.items()]*3
Color_DJS = [mcolors.CSS4_COLORS[item] for item in color_labels]*3
Color_reg = [mcolors.CSS4_COLORS[item] for item in color_regs]*3
params = {'sharex':False, 'sharey':False, 'left_margin': 0.2, 'arr_size':12,
'yticks': np.arange(-0.25, 1.80, 0.25), 'xticks':None,
'color_reg':Color_DJS, 'color_symbols': Color_reg, #'color_DJS': Colors_tab,
'alpha_prod': 0.3, 'alpha_absorb': 0.0, 'DJS_linewidth': 1.5,
'sd_linewidth': 0.08,'reg_linewidth': 1.0}
times=3
smooth_ = [2,3,4]
cluster_ = range(15*times, 20*times, times)
if instances:
# =============================================================================
# Let us keep only the piece of df and change labels
# =============================================================================
Fukuchi_df_modes = Fukuchi_df_nan.loc[:,idx[:,['T03','OS','T05','OC','T07','OF']]]
# =============================================================================
# Performing overall average over modes
# =============================================================================
Fukuchi_mean_modes = Fukuchi_df_modes.groupby(level=1, axis=1).mean()
Fukuchi_sd_modes = Fukuchi_df_modes.groupby(level=1, axis=1).std()
Fukuchi_modes = create_df(Fukuchi_mean_modes, Fukuchi_sd_modes)
# =============================================================================
# Setting variables and plotting in individuals
# =============================================================================
Fukuchi_instance = ankle_DJS(Fukuchi_modes,
features= ['Ankle Dorsi/Plantarflexion ',
'Ankle Dorsi/Plantarflexion',
'Vertical Force',
'Ankle'],
exp_name = 'Fukuchi Instances variation analysis')
all_dfs_instance = Fukuchi_instance.extract_df_DJS_data(idx=[0,2,1,3])
all_dfs_instance = Fukuchi_instance.interpolate_ankledf(times=times, replace=True)
all_dfs_instance = Fukuchi_instance.change_labels(["Free O", "Fast O", 'Slow O', 'Slow T',
'Free T', 'Fast T'])
df_turn_instance = best_hyper(all_dfs_instance, save='Fukuchi/best_params_instance.csv',
smooth_radius=smooth_,
cluster_radius=cluster_, verbose=False,
rows=[0,1])
Fukuchi_instance.energy_calculation()
#Sensitive results may vary when integrating degrees, the best is to do in radians
Fukuchi_instance.deg_to_rad()
total_work_instance = Fukuchi_instance.total_work()
# =============================================================================
# Obtaining the mechanical work through power instances in regular walking
# =============================================================================
cols_to_joint ={r'$v* = 0.3 \pm 0.04$': (2,3, False),
r'$v* = 0.43 \pm 0.05$': (0,4, True),
r'$v* = 0.55 \pm 0.06$': (1,5, True)}
for num, key in enumerate(cols_to_joint.keys()):
params.update({'hide_labels': (False, cols_to_joint[key][-1])})
DJS_instances = plot_ankle_DJS(SD=True, save=True, plt_style='bmh', sep=False,
alpha=1.5, fig_size=[3.0,2.5], params=params)
fig5 = DJS_instances.plot_DJS(all_dfs_instance,
cols=list(cols_to_joint[key][:2]), rows= np.r_[0,2],
title="Ankle DJS OvsT group comparison at {}".format(key),
legend=True, reg=df_turn_instance.loc[idx[:,'mean'],:],
integration= True, rad = True, header= key)
if num == 0:
reg_info_ins = pd.DataFrame(DJS_instances.reg_info_df)
work_ins = pd.DataFrame(DJS_instances.areas)
else:
reg_info_ins = pd.concat([reg_info_ins, DJS_instances.reg_info_df])
work_ins = pd.concat([work_ins, DJS_instances.areas])
reg_info_ins = reg_info_ins.round(3)
work_ins = work_ins.round(3)
params.update({'hide_labels': (False, False)})
# =============================================================================
# Plotting dynamic parameters
# =============================================================================
# plot_dyn_gen = plot_dynamic(SD=True, save=True, plt_style='bmh')
# # # Plotting power information
# fig1 = plot_dyn_gen.gait_plot(Fukuchi_instance.all_dfs_ankle,
# cols = None,
# rows = None,
# title='Ankle Dynamics Right Foot Fukuchi data at instances')
velocities = {k: r'$v* = {} \pm {}$'.format(round(j[0],2),round(j[1],2)) for k, j in mean_T.items()}
velocities_text=['Very Slow','Slow', 'Free', 'Fast', 'Very Fast']
# =============================================================================
# Comparing DJS of adults and old people
# =============================================================================
# Which subjects are adults
adult_info = meta_info_red[meta_info_red['AgeGroup'] == 'Young']
adult_group = {i:'Adult' for i in adult_info['Subject']}
# Which subjects are old
old_info = meta_info_red[meta_info_red['AgeGroup'] == 'Older']
old_group = {i:'Elderly' for i in old_info['Subject']}
age_dict = dict(adult_group, **old_group)
# Creating the df for Age population
Fukuchi_mean_age = Fukuchi_df.rename(columns= age_dict, level=0).mean(level=[0,1], axis=1)
#Merging column labels
Fukuchi_mean_age.columns = Fukuchi_mean_age.columns.map('{0[0]} {0[1]}'.format)
Fukuchi_sd_age = Fukuchi_df.rename(columns= age_dict, level=0).std(level=[0,1], axis=1)
#Merging column labels
Fukuchi_sd_age.columns = Fukuchi_sd_age.columns.map('{0[0]} {0[1]}'.format)
Fukuchi_age = create_df(Fukuchi_mean_age, Fukuchi_sd_age)
Fukuchi_age = Fukuchi_age.interpolate(axis=1)
if Age_group:
# =============================================================================
# Setting variables and plotting in individuals
# =============================================================================
Fukuchi_ages = ankle_DJS(Fukuchi_age,
features= ['Ankle Dorsi/Plantarflexion ',
'Vertical Force',
'Ankle Dorsi/Plantarflexion',
'Ankle'],
exp_name = 'Fukuchi Instances variation analysis')
all_dfs_ages = Fukuchi_ages.extract_df_DJS_data(idx=[0,2,1,3])
all_dfs_ages = Fukuchi_ages.interpolate_ankledf(times=times, replace=True)
df_turn_ages = best_hyper(all_dfs_ages, save='Fukuchi/best_params_ages.csv',
smooth_radius=smooth_,
cluster_radius=cluster_, verbose=False)
Fukuchi_ages.energy_calculation()
#Sensitive results may vary when integrating degrees, the best is to do in radians
Fukuchi_ages.deg_to_rad()
total_work_ages = Fukuchi_ages.total_work()
# =============================================================================
# Plotting QS one by one comparing adults and elderly people
# =============================================================================
cols_to_joint ={item: (num, num+11) for num, item in enumerate(meta_info_red['Mode'].unique())}
for num, key in enumerate(cols_to_joint.keys()):
if num == 2 or num == 3 or num == 7:
params.update({'hide_labels':(False, False)})
else:
params.update({'hide_labels':(False, True)})
DJS_comp = plot_ankle_DJS(SD=True, save=True, plt_style='bmh', sep=False,
alpha=1.5, fig_size=[3.0,2.5], params=params)
fig5 = DJS_comp.plot_DJS(Fukuchi_ages.all_dfs_ankle,
cols=list(cols_to_joint[key]), rows= np.r_[0,2],
title="Ankle DJS age group comparison at {}".format(key),
legend=True, reg=df_turn_ages, header=velocities[key],
integration= True, rad = True)
params.update({'hide_labels': (False, False)})
if Age_group_mod:
# =============================================================================
# Modifying Fukuchi ages
# =============================================================================
Fukuchi_age_mod = Fukuchi_age.drop(['Adult OC','Adult OS', 'Adult OF',
'Elderly OC','Elderly OS', 'Elderly OF'], level=0, axis=1)
order_labels = ['{} {}'.format(A, T) for A in ['Adult', 'Elderly'] for T in [1,3,6]]
order_labels.extend(['{} {}'.format(A, T) for A in ['Adult', 'Elderly'] for T in [5,8]])
#Groups average 01 and 02, 03 and 04, 06 and 07
Fukuchi_vel = {'{} {}'.format(j,k): Fukuchi_age_mod.loc[:,idx[['{} T0{}'.format(j,k),
'{} T0{}'.format(j,k+1)],:]].mean(axis=1,
level=1) for j,k in [(A,T) for A in ['Adult', 'Elderly'] for T in [1,3,6]]}
Fukuchi_vel.update({'{} {}'.format(j,k): Fukuchi_age_mod.loc[:,idx['{} T0{}'.format(j,k),:]].mean(axis=1,
level=1) for j,k in [(A,T) for A in ['Adult', 'Elderly'] for T in [5,8]]})
labels_vel = pd.MultiIndex.from_product([['{} {}'.format(V,A) for A in ['A', 'E'] for V in [r'$0.151 < v* < 0.265$', r'$0.263 < v* < 0.407$',
r'$0.378 < v* < 0.478$', r'$0.434 < v* < 0.623$',
r'$0.544 < v* < 0.0.689$']],['-1sd','mean','+1sd']])
Fukuchi_vel_df = pd.concat([Fukuchi_vel[key] for key in sorted(order_labels)], axis=1)
Fukuchi_vel_df.columns = labels_vel
# =============================================================================
# Setting variables and plotting in individuals
# =============================================================================
Fukuchi_ages_mod = ankle_DJS(Fukuchi_vel_df,
features= ['Ankle Dorsi/Plantarflexion ',
'Vertical Force',
'Ankle Dorsi/Plantarflexion',
'Ankle'],
exp_name = 'Fukuchi age mode variation analysis')
all_dfs_ages_mod = Fukuchi_ages_mod.extract_df_DJS_data(idx=[0,2,1,3])
all_dfs_ages_mod = Fukuchi_ages_mod.interpolate_ankledf(times=times, replace=True)
df_turn_ages_mod = best_hyper(all_dfs_ages_mod, save='Fukuchi/best_params_ages_mod.csv',
smooth_radius=smooth_,
cluster_radius=cluster_, verbose=False)
Fukuchi_ages_mod.energy_calculation()
#Sensitive results may vary when integrating degrees, the best is to do in radians
Fukuchi_ages_mod.deg_to_rad()
total_work_ages = Fukuchi_ages_mod.total_work()
# =============================================================================
# Plotting QS one by one comparing adults and elderly people
# =============================================================================
cols_to_joint = [(num, num+5) for num in range(5)]
for num, key in enumerate(cols_to_joint):
if num == 0:
params.update({'hide_labels':(False, False)})
else:
params.update({'hide_labels':(False, True)})
DJS_mod = plot_ankle_DJS(SD=True, save=True, plt_style='bmh', sep=False,
alpha=1.5, fig_size=[3.0,2.5], params=params)
fig5 = DJS_mod.plot_DJS(Fukuchi_ages_mod.all_dfs_ankle,
cols=list(key), rows= np.r_[0,2],
title="Ankle DJS AvsE group comparison at {}".format(velocities_text[num]),
legend=True, reg=df_turn_ages_mod, header=velocities_text[num],
integration= True, rad = True)
if num == 0:
reg_info_mode = pd.DataFrame(DJS_mod.reg_info_df)
work_mode = pd.DataFrame(DJS_mod.areas)
else:
reg_info_mode = pd.concat([reg_info_mode, DJS_mod.reg_info_df])
work_mode = pd.concat([work_mode, DJS_mod.areas])
reg_info_mode = reg_info_mode.round(3)
work_mode = work_mode.round(3)
params.update({'hide_labels': (False, False)})
if Age_group_gen:
# =============================================================================
# Modifying Fukuchi ages
# =============================================================================
Fukuchi_age_gen = Fukuchi_age.loc[:,idx[['Adult OC','Adult OS', 'Adult OF',
'Elderly OC','Elderly OS', 'Elderly OF'],:]]
# =============================================================================
# Setting variables and plotting in individuals
# =============================================================================
Fukuchi_ages_gen = ankle_DJS(Fukuchi_age_gen,
features= ['Ankle Dorsi/Plantarflexion ',
'Vertical Force',
'Ankle Dorsi/Plantarflexion',
'Ankle'],
exp_name = 'Fukuchi Overground and gender variation analysis')
all_dfs_ages_gen = Fukuchi_ages_gen.extract_df_DJS_data(idx=[0,2,1,3])
all_dfs_ages_gen = Fukuchi_ages_gen.interpolate_ankledf(times=times,
replace=True)
df_turn_ages_gen = best_hyper(all_dfs_ages_gen, save='Fukuchi/best_params_ages_gen.csv',
smooth_radius=[False],
cluster_radius=range(32,35), verbose=False)
Fukuchi_ages_gen.energy_calculation()
#Sensitive results may vary when integrating degrees, the best is to do in radians
Fukuchi_ages_gen.deg_to_rad()
total_work_gen = Fukuchi_ages_gen.total_work()
# =============================================================================
# Plotting QS one by one comparing adults and elderly people
# =============================================================================
cols_to_joint ={r'$v* = 0.3 \pm 0.04$': (2,5, False), r'$v* = 0.43 \pm 0.05$': (0,3, True),
r'$v* = 0.55 \pm 0.06$': (1,4, True)}
for num, key in enumerate(cols_to_joint.keys()):
params.update({'hide_labels': (False, cols_to_joint[key][-1])})
DJS_age_gen = plot_ankle_DJS(SD=True, save=True, plt_style='bmh', sep=False,
alpha=1.5, fig_size=[3.0,2.5], params=params)
fig5 = DJS_age_gen.plot_DJS(Fukuchi_ages_gen.all_dfs_ankle,
cols=list(cols_to_joint[key][:2]), rows= np.r_[0,2],
title="Ankle DJS age group comparison at {}".format(key),
legend=True, reg=df_turn_ages_gen,
integration= True, rad = True, header= key)
if num == 0:
reg_info_gen = pd.DataFrame(DJS_age_gen.reg_info_df)
work_gen = pd.DataFrame(DJS_age_gen.areas)
else:
reg_info_gen = pd.concat([reg_info_gen, DJS_age_gen.reg_info_df])
work_gen = pd.concat([work_gen, DJS_age_gen.areas])
reg_info_gen = reg_info_gen.round(3)
work_gen = work_gen.round(3)
params.update({'hide_labels': (False, False)})
if Gender:
# =============================================================================
# Comparing DJS of adults and old people
# =============================================================================
# Which subjects are males
M_info = meta_info_red[meta_info_red['Gender'] == 'M']
M_group = {i:'Male' for i in M_info['Subject']}
# Which subjects are females
F_info = meta_info_red[meta_info_red['Gender'] == 'F']
F_group = {i:'Female' for i in F_info['Subject']}
age_dict = dict(M_group, **F_group)
# Creating the df for Age population
Fukuchi_mean_gender = Fukuchi_df.rename(columns= age_dict, level=0).mean(level=[0,1], axis=1)
#Merging column labels
Fukuchi_mean_gender.columns = Fukuchi_mean_gender.columns.map('{0[0]} {0[1]}'.format)
Fukuchi_sd_gender = Fukuchi_df.rename(columns= age_dict, level=0).std(level=[0,1], axis=1)
#Merging column labels
Fukuchi_sd_gender.columns = Fukuchi_sd_gender.columns.map('{0[0]} {0[1]}'.format)
Fukuchi_gender = create_df(Fukuchi_mean_gender, Fukuchi_sd_gender)
# The best are 4 and 13
# df_turn_gender = hyperparams(Fukuchi_gender)
# =============================================================================
# Setting variables and plotting in individuals
# =============================================================================
Fukuchi_gen = ankle_DJS(Fukuchi_gender,
features= ['Ankle Dorsi/Plantarflexion ',
'Vertical Force',
'Ankle Dorsi/Plantarflexion',
'Ankle'],
exp_name = 'Fukuchi Gender Comparison analysis')
all_dfs_gender = Fukuchi_gen.extract_df_DJS_data(idx=[0,2,1,3])
all_dfs_gender = Fukuchi_gen.interpolate_ankledf(times, True)
df_turn_gender = best_hyper(all_dfs_gender, save='Fukuchi/best_params_gender.csv',
smooth_radius=smooth_,
cluster_radius=cluster_, verbose=False)
Fukuchi_gen.energy_calculation()
#Sensitive results may vary when integrating degrees, the best is to do in radians
Fukuchi_gen.deg_to_rad()
total_work_gen = Fukuchi_gen.total_work()
# =============================================================================
# Plotting QS one by one comparing adults and elderly people
# =============================================================================
cols_to_joint ={item: (num, num+11) for num, item in enumerate(meta_info_red['Mode'].unique())}
for num, key in enumerate(cols_to_joint.keys()):
if num == 2 or num == 3 or num == 7:
params.update({'hide_labels':(False, False)})
else:
params.update({'hide_labels':(False, True)})
DJS_gender = plot_ankle_DJS(SD=True, save=True, plt_style='bmh', sep=False,
alpha=1.5, fig_size=[3.0,2.5], params=params)
fig5 = DJS_gender.plot_DJS(Fukuchi_gen.all_dfs_ankle,
cols=list(cols_to_joint[key]), rows= np.r_[0,2],
title="Ankle DJS gender group comparison at {}".format(key),
legend=True, reg=df_turn_gender.loc[idx[:,'mean'],:], header=None,
integration= True, rad = True)
if num == 0:
reg_info_gen = pd.DataFrame(DJS_gender.reg_info_df)
work_gen = pd.DataFrame(DJS_gender.areas)
else:
reg_info_gen = pd.concat([reg_info_gen, DJS_gender.reg_info_df])
work_gen = pd.concat([work_gen, DJS_gender.areas])
reg_info_gen = reg_info_gen.round(3)
work_gen = work_gen.round(3)
params.update({'hide_labels': (False, False)})
if individuals:
params_ind = {'sharex':True, 'sharey':True, 'color_DJS':['slategray']*50,
'color_reg':['black']*50, 'color_symbols': ['slategray']*50,
'arr_size': 6, 'left_margin': 0.15, 'DJS_linewidth': 0.2,
'reg_linewidth': 1.0, 'grid': False, 'alpha_prod': 0.4,
'alpha_absorb': 0.1, 'yticks': np.arange(-0.25, 2.25, 0.25)}
#Do not forget to interpolate and see if the error does not appear
times=2
test_ind = False
if test_ind:
for num, lev1 in enumerate(Fukuchi_df_nan.columns.get_level_values(1).unique()):
Fukuchi_ind = ankle_DJS(Fukuchi_df_nan.loc[:,idx[:,lev1]],
features= ['Ankle Dorsi/Plantarflexion ',
'Vertical Force',
'Ankle Dorsi/Plantarflexion',
'Ankle'],
exp_name = 'Fukuchi individuals Comparison analysis')
all_dfs_ind = Fukuchi_ind.extract_df_DJS_data(idx=[0,2,1,3])
all_dfs_ind = Fukuchi_ind.interpolate_ankledf(times=times, replace=True)
Fukuchi_ind.energy_calculation()
#Sensitive results may vary when integrating degrees, the best is to do in radians
Fukuchi_ind.deg_to_rad()
total_work_ind = Fukuchi_ind.total_work()
# If calculating the best params is desired
optimize_params = False
if optimize_params:
best_df_turn = best_hyper(all_dfs_ind, save='Fukuchi/best_params_{}.csv'.format(lev1),
smooth_radius=[2,3,4],
cluster_radius=range(25,30), verbose=False)
else:
best_df_turn = pd.read_csv('Fukuchi/best_params_{}.csv'.format(lev1), index_col=[0,1])
DJS_ind = plot_ankle_DJS(SD=False, save=True, plt_style='bmh', sep=[6,7],
alpha=5.0, fig_size=[7*2,6*2], params=params_ind)
fig5 = DJS_ind.plot_DJS(Fukuchi_ind.all_dfs_ankle,
cols=None, rows= np.r_[0,2],
title="Ankle DJS subject comparison at {}".format(lev1),
legend=True, reg=best_df_turn, header=lev1,
integration= True, rad = True)
if num == 0:
reg_info_ind = pd.DataFrame(DJS_ind.reg_info_df)
work_ind = pd.DataFrame(DJS_ind.areas)
df_turn_ind_all = best_df_turn
total_work_ind_all = total_work_ind
else:
df_turn_ind_all = pd.concat([df_turn_ind_all, best_df_turn], axis = 0)
total_work_ind_all = pd.concat([total_work_ind_all, total_work_ind], axis = 0)
reg_info_ind = pd.concat([reg_info_ind, DJS_ind.reg_info_df])
work_ind = pd.concat([work_ind, DJS_ind.areas])
# Storing results
df_turn_ind_all.to_csv('Fukuchi/best_df_turn_all.csv')
total_work_ind_all.to_csv('Fukuchi/total_work_ind.csv')
reg_info_ind.to_csv('Fukuchi/regression_ind.csv')
work_ind.to_csv('Fukuchi/work_ind.csv')
else:
df_turn_ind_all = pd.read_csv('Fukuchi/best_df_turn_all.csv', index_col=[0,1])
total_work_ind_all = pd.read_csv('Fukuchi/total_work_ind.csv', index_col=[0,1])
reg_info_ind = pd.read_csv('Fukuchi/regression_ind.csv', index_col=[0,1,2])
work_ind = pd.read_csv('Fukuchi/work_ind.csv', index_col=[0,1])
reg_info_ind = reg_info_ind.round(3)
#Adjusting bad R2 results with MSE
work_ind = work_ind.round(3)
#reordering levels
reg_info_ind = reg_info_ind.sort_index(level=0, axis=0)
work_ind = work_ind.sort_index(level=0, axis=0)
df_turn_ind_all = df_turn_ind_all.sort_index(level=0, axis=0)
df_turn_ind_all = df_turn_ind_all.reindex(new_dict_labels, level=1, axis=0)
df_turn_ind_allGC = df_turn_ind_all.apply(lambda x: x/times)
#How many samples have got a bad R2 but a good MSE (below 0.0001)
reg_info_badR2 = reg_info_ind.query("R2 <= 0.2 and MSE <= 0.0001") # 55 samples with
reg_info_wobad = reg_info_ind.drop(reg_info_badR2.index, axis=0)
stiff_labels = ['CP', 'ERP', 'LRP','DP']
R2mean_per_cat = pd.DataFrame({cat: reg_info_wobad.loc[idx[:,:,cat],
:].mean() for cat in stiff_labels})
R2std_per_cat = pd.DataFrame({cat: reg_info_wobad.loc[idx[:,:,cat],
:].std() for cat in stiff_labels})
#Final results CP: 0.81(0.22), ERP 0.96(0.06), LRP: 0.95(0.11), DP 0.95(0.06)
# =============================================================================
# plotting per subject at different instances
# =============================================================================
per_subject = [False, 'all'] #If plot, if Overground plotting, otherwise is Treadmill
if per_subject[0]:
if per_subject[1] == 'over':
Fukuchi_df_T = Fukuchi_df_nan.drop(['T{:02d}'.format(ind) for ind in range(1,9)], level=1, axis=1)
df_turn_ind_T =df_turn_ind_all.drop(['T{:02d}'.format(ind) for ind in range(1,9)], level=1, axis=0)
mod = 'overground'
fig_s = [2,4]
sep_ = [1,3]
alpha_ = 2.5
elif per_subject[1] == 'tread':
Fukuchi_df_T = Fukuchi_df_nan.drop(['OC', 'OS', 'OF'], level=1, axis=1)
df_turn_ind_T =df_turn_ind_all.drop(['OC', 'OS', 'OF'], level=1, axis=0)
mod = 'treadmill'
fig_s = [4,6]
sep_ = [2,4]
alpha_ = 2.5
elif per_subject[1] == 'all':
Fukuchi_df_T = Fukuchi_df_nan
df_turn_ind_T =df_turn_ind_all
mod = 'all'
fig_s = [5,6]
sep_ = [3,4]
alpha_ = 3.0
for num, lev0 in enumerate(Fukuchi_df_T.columns.get_level_values(0).unique()[:5]):
Fukuchi_ind2 = ankle_DJS(Fukuchi_df_T.loc[:,idx[lev0,:]],
features= ['Ankle Dorsi/Plantarflexion ',
'Vertical Force',
'Ankle Dorsi/Plantarflexion',
'Ankle'],
exp_name = 'Fukuchi individuals Comparison analysis')
#Sensitive results may vary when integrating degrees, the best is to do in radians
Fukuchi_ind2.extract_df_DJS_data(idx=[0,2,1,3])
if mod == 'overground':
Fukuchi_ind2.change_labels([r'$v* = 0.3(0.04)$', r'$v* = 0.43(0.05)$',
r'$v* = 0.55(0.06)$'], level=1)
elif mod == 'treadmill':
Fukuchi_ind2.change_labels(vel_labels[:Fukuchi_ind2.all_dfs_ankle.shape[1]], level=1)
elif mod == 'all':
Fukuchi_ind2.change_labels(vel_labels, level=1)
Fukuchi_ind2.interpolate_ankledf(times, True)
Fukuchi_ind2.deg_to_rad()
DJS_ind2 = plot_ankle_DJS(SD=False, save=True, plt_style='bmh', sep=sep_,
alpha=alpha_, fig_size=fig_s, params=params_ind)
df_sub = Fukuchi_ind2.all_dfs_ankle.droplevel(0, axis=1)
df_sub.columns = pd.MultiIndex.from_product([list(df_sub.columns),['mean']])
fig10 = DJS_ind2.plot_DJS(df_sub,
cols=None, rows= np.r_[0,2],
title="Ankle DJS subject comparison in subject {} on {}".format(lev0, mod),
legend=True, reg=df_turn_ind_T.loc[idx[lev0,:]], header=None,
integration= True, rad = True)
# =============================================================================
# Adapting the general df to ANOVA study
# =============================================================================
decimal = 2 #How many decimals you want to show
if statistics:
#Knowing which values were out of the confidence
summary_df_turn = rp.summary_cont(reg_info_ind, decimals=decimal)
#converting all to absolute values as we would like to compare stiffness magnitudes
# reg_info_ind['stiffness'] = reg_info_ind['stiffness'].abs()
#Do not do this so far, there are opposite stiffneses
#For high stiffnesses we are setting 25 as it was the maximum value found
reg_info_ind['stiffness'][reg_info_ind['stiffness'] >= 25.0] = 20.0
meta_info_anova = meta_info_red
#Creating a categorical value to define speeds from Very Slow to Very Fast
speed_cat = {'OC': 'Free', 'OS':'Slow', 'OF':'Fast', 'T01': 'Very Slow',
'T02': 'Very Slow', 'T03': 'Slow', 'T04': 'Slow', 'T05': 'Free',
'T06': 'Fast', 'T07': 'Fast', 'T08': 'Very Fast'}
meta_info_anova.index = Fukuchi_df.columns
meta_info_anova['speed'] = meta_info_anova.index.get_level_values(1).map(speed_cat)
#Creating categorical value for Overground and Treadmill
meta_info_anova['mode'] = [x[0] for x in meta_info_anova.index.get_level_values(1)]
meta_info_anova['mode'] = meta_info_anova['mode'].replace(['T', 'O'], ['Treadmill',
'Overground'])
# bad samples
bad_samples = [('S22', 'T07'),('S23', 'T06'), ('S27', 'T07'),('S36', 'T07'),
('S06', 'T08'),('S23', 'T06'), ('S15', 'OC'), #Bad regressions
('S36', 'T01'),('S15', 'T03') #Bad regressions
]
# =============================================================================
# #Metrics
# =============================================================================
metrics = pd.concat([reg_info_ind['R2'].unstack(),reg_info_ind['MSE'].unstack()], axis=1)
metrics.columns = pd.MultiIndex.from_product([['R2', 'MSE'], stiff_labels])
metrics = metrics.drop(bad_samples, axis=0)
#Verifying those with good MSE and bad R2
who_badR2 = {item: metrics[metrics.MSE[item] <= 1e-3][metrics.R2[item] <= 0.6] \
for item in stiff_labels}
metrics_wobad = {item: metrics.loc[:,idx[:,item]].drop(who_badR2[item].index, axis=0) \
for item in stiff_labels}
metrics_wobad_mean = pd.concat({item: metrics_wobad[item].mean() for item in stiff_labels}, axis=0)
metrics_wobad_std = pd.concat({item: metrics_wobad[item].std() for item in stiff_labels}, axis=0)
metrics_wobad_res = pd.concat([metrics_wobad_mean, metrics_wobad_std], axis=1)
metrics_wobad_res = metrics_wobad_res.droplevel(2, axis=0)
metrics_wobad_res.columns = ['Mean', 'SD']
# metrics_wobad_res.drop(['CP'], level=0).mean(axis=0,level=1) to know R2 for the rest of stiffness
meta_info_anova = meta_info_anova.reindex(Fukuchi_df_nan.columns, axis=0)
meta_info_anova = meta_info_anova.drop(meta_info_anova.columns[np.r_[0,8:15,16,17]], axis=1)
#Adding All results
results_df = pd.concat([df_turn_ind_allGC, work_ind,
reg_info_ind['stiffness'].unstack()], axis=1)
#Appending to anova df
meta_info_anova = pd.concat([meta_info_anova, results_df], axis=1)
#Removing samples that visually are not coherent
meta_info_anova = meta_info_anova.drop(bad_samples, axis=0)
Fukuchi_df_nan = Fukuchi_df_nan.drop(bad_samples, axis=1)
Fukuchi_df_export = Fukuchi_df_nan.drop(['Vertical Force','Ankle Power [W]'],level=0)
Fukuchi_DJS = ankle_DJS(Fukuchi_df_export, dir_loc = 'Fukuchi',
exp_name = 'Adults and Elderle in Over and Tread')
Fukuchi_DJS_QS = Fukuchi_DJS.extract_df_QS_data(idx=[0,1])
Fukuchi_DJS_QS = Fukuchi_DJS.interpolate_ankledf(replace=True)
Fukuchi_DJS_QS.to_csv("Fukuchi/dynamic_data_Fukuchi.csv")
# =============================================================================
# How many are negatives and in which cases
# =============================================================================
CP_negative = meta_info_anova[meta_info_anova.CP <= 0]
LRP_negative = meta_info_anova[meta_info_anova.LRP <= 0]
# =============================================================================
# About work, look for narrow loops and directions
# =============================================================================
work_ind_wobad = work_ind.drop(bad_samples, axis=0)
cw = meta_info_anova.query("direction == 'cw'")
ccw = meta_info_anova.query("direction == 'ccw'")
narrow = meta_info_anova[meta_info_anova['work prod'] <= 0.02]
#Let us continue here
# =============================================================================
# #Redifining labels
# =============================================================================
dep_vars = meta_info_anova.columns[np.r_[11:18,19:23]]
labels = ['Init {} '.format(i)+r'$[\%GC]$' for i in ['ERP', 'LRP', 'DP', 'S', 'TS']]
labels.extend(['Work Absorbed '+r'$\frac{J}{kg}$', 'Net Work '+r'$\frac{J}{kg}$'])
labels.extend(['Stiffness {}'.format(stiff)+r'$\frac{Nm}{kg \times rad}$' for stiff in stiff_labels])
#Change the column order
labels_complete = list(meta_info_anova.columns[:-4])
labels_complete.extend(stiff_labels)
meta_info_anova = meta_info_anova.reindex(columns=labels_complete)
# =============================================================================
# Applying statistics in gender
# =============================================================================
# Performing variance analysis
Fem = meta_info_anova[meta_info_anova['Gender'] == 'F']
Male = meta_info_anova[meta_info_anova['Gender']== 'M']
#Main information
summary_Fem = multi_idx('Female', rp.summary_cont(Fem.groupby(Fem['speed']), decimals=decimal).round(2).T,
idx=False)
summary_Male = multi_idx('Male', rp.summary_cont(Male.groupby(Male['speed']), decimals=decimal).round(2).T,
idx=False)
#Let's perform the Bartetts's test whose Null Hypothesis is that the
#variances are equal. We will use a significance level of 5.0%
var_gender = {item: stats.bartlett(Fem[item],
Male[item]).pvalue for item in dep_vars}
#Variances are equal, excepting point 1, 5, ERP, CP and DP, It means that the F-statistics
# is not reliable when ANOVA is applied
# =============================================================================
# ttest student analysis
# =============================================================================
# Assumptions:
# 1. Independent samples
# 2. Large enough sample size or observations come from a normally-distributed
# population
# 3. Variances are equal
ttest_gender = {item: stats.ttest_ind(Fem[item], Male[item],
equal_var=var_gender[item] > 0.05).pvalue for item in dep_vars}
#There is then a statistical significant difference between gender
#in the two analyzed groups. The DP, ERP, and CP, point 1 is significantly different
#We need to see which is higher
# summary content
summary_dep_variables_gen = rp.summary_cont(meta_info_anova.loc[:,dep_vars], decimals=decimal)
summary_gender = rp.summary_cont(meta_info_anova.groupby(meta_info_anova['Gender']), decimals=decimal).T
# =============================================================================
# Plot statistics
# =============================================================================
#Seeing statistical differences between gender and the significan
fig6, axes = plt.subplots(2,2, figsize = (8,8))
deps_gen = dep_vars[np.r_[1,7,8,9]]
labels_gen = np.array(labels)[np.r_[1,7,8,9]]
for num, ax in enumerate(np.ravel(axes)):
sns.boxplot(x='Gender', y=deps_gen[num], data=meta_info_anova, ax=ax)
ax.set_ylabel(labels_gen[num])
# fig6.suptitle('Variables with statistical differences in gender', fontsize = 18)
fig6.savefig('Fukuchi/stats_diff_gender.png')
# =============================================================================
# Applying statistics in Ages
# =============================================================================
# Performing variance analysis
adults = meta_info_anova[meta_info_anova['AgeGroup']=='Young']
old = meta_info_anova[meta_info_anova['AgeGroup']=='Older']
#Let's perform the Bartetts's test whose Null Hypothesis is that the
#variances are equal. We will use a significance level of 5.0%
var_ages = {item: stats.bartlett(adults[item],
old[item]).pvalue for item in dep_vars}
#Variances are unequal in point 0, work prod and DP
# =============================================================================
# ttest student analysis
# =============================================================================
# Assumptions:
# 1. Independent samples
# 2. Large enough sample size or observations come from a normally-distributed
# population
# 3. Variances are equal
ttest_ages = {item: stats.ttest_ind(adults[item], old[item],
equal_var=var_ages[item] > 0.05).pvalue for item in dep_vars}
#There is then a statistical significant difference between gender
#in the two analyzed groups. The work prod, work abs and DP phase showed statistical differences
#We need to see which is higher
# summary content
summary_dep_variables_age = rp.summary_cont(meta_info_anova.loc[:,dep_vars], decimals=decimal)
summary_ages = rp.summary_cont(meta_info_anova.groupby(meta_info_anova['AgeGroup']), decimals=decimal)
summary_adults = multi_idx('Adults',
rp.summary_cont(adults.groupby(adults['speed']), decimals=decimal).T, idx=False)
summary_old = multi_idx('Elderly',
rp.summary_cont(old.groupby(old['speed']), decimals=decimal).T, idx=False)
#OLS method
results_ages = {item: ols("Q('{}') ~ C(AgeGroup)".format(item), data=meta_info_anova).fit() \
for item in dep_vars}
#If p-value is greater than the alpha (0.05) value then there is no association between the
#dependent variable and AgeGroup
table_ANOVA_age = pd.Series({item: sm.stats.anova_lm(results_ages[item],
typ=2).iloc[0,-1] for item in dep_vars})
#Conclusion: there is an association between Agegroups and work abs, prod and stiffness DP.
# =============================================================================
# Tukey's Analysis
# Null hypothesis: There is no significant difference betweem groups
# =============================================================================
mc_ages = {item: MultiComparison(meta_info_anova[item],
meta_info_anova['AgeGroup']) for item in dep_vars}
mc_results_ages = pd.DataFrame({item: [mc_ages[item].tukeyhsd().reject[0],
mc_ages[item].tukeyhsd().pvalues[0],
mc_ages[item].tukeyhsd().meandiffs[0]]
for item in dep_vars}, index=['reject','p value','mean diff']).T
# conclusion: We can reject the null hypothesis in which there is no significant
# differences in age groups only for the dependent variables work prod, abs and DP,
# the remaining ones do not have statistical differences.
# =============================================================================
# Plot statistics for age groups
# =============================================================================
#Seeing statistical differences between gender and the significant
fig7, axes = plt.subplots(2,2, figsize = (8,8))
deps_age = dep_vars[np.r_[5:8,9]]
labels_age = np.array(labels)[np.r_[5:8,9]]
for num, ax in enumerate(np.ravel(axes)):
sns.boxplot(x='AgeGroup', y=deps_gen[num], data=meta_info_anova, ax=ax)
ax.set_ylabel(labels_age[num])
# fig7.suptitle('Variables with statistical differences in Age groups', fontsize = 18)
fig7.savefig('Fukuchi/stats_diff_ages.png')
# =============================================================================
# Applying statistics overground vs treadmill
# =============================================================================
# Performing variance analysis
overground = meta_info_anova[meta_info_anova['mode'] == 'Overground']
treadmill = meta_info_anova[meta_info_anova['mode'] == 'Treadmill']
#We need to remove the very fast and slow in order to homogenized the speed
#dropping T01, T02, T04, T06 and T08
treadmill_adj = treadmill.drop(['T01','T02','T04','T06','T08'], level=1,axis=0)
# concat the df for further operations
mode_df = pd.concat([overground, treadmill_adj], axis=0)
#Let's perform the Bartetts's test whose Null Hypothesis is that the
#variances are equal. We will use a significance level of 5.0%
var_mode = {item: stats.bartlett(overground[item],
treadmill_adj[item]).pvalue for item in dep_vars}
#Variances are point 2 and work prod, rest of them are unequal
# =============================================================================
# ttest student analysis
# =============================================================================
# Assumptions:
# 1. Independent samples
# 2. Large enough sample size or observations come from a normally-distributed
# population
# 3. Variances are equal, if not apply weltch test
# Does the samples come from a normally distributed population
ttest_mode = {item: stats.ttest_ind(overground[item], treadmill_adj[item],
equal_var=var_mode[item] > 0.05).pvalue for item in dep_vars}
#There is then a statistical significant difference between gender
#in the two analyzed groups. Point 0, 2, work prod (0.054) do not show statistical differences
#The rest of them were statistically significant.
# summary content
summary_dep_variables_mode = rp.summary_cont(mode_df.loc[:,dep_vars], decimals=decimal)
summary_mode = rp.summary_cont(mode_df.groupby(mode_df['mode']), decimals=decimal)
# =============================================================================
# Statistical Analysis for AGes taking out VS and VF
# =============================================================================
adults_o = overground.query("AgeGroup == 'Young'")
olds_o = overground.query("AgeGroup == 'Older'")
var_ages_m = {item: stats.bartlett(adults_o[item],
olds_o[item]).pvalue for item in dep_vars}
ttest_ages_m = {item: stats.ttest_ind(adults_o[item], olds_o[item],
equal_var=var_ages_m[item] > 0.05).pvalue for item in dep_vars}
# THERE IS NO STATISTICAL DIFFERENCES BETWEEN ELDERLY AND ADULTS IN GENERAL
olds_slow = olds_o.query("speed == 'Slow'")
adults_slow = adults_o.query("speed == 'Slow'")
olds_free = olds_o.query("speed == 'Free'")
adults_free = adults_o.query("speed == 'Free'")
olds_fast = olds_o.query("speed == 'Fast'")
adults_fast = adults_o.query("speed == 'Fast'")
groups_ages = [olds_slow, adults_slow, olds_free,
adults_free, olds_fast, adults_fast]
# Let us determine the normality in subgroups
normality_ages = pd.concat([pd.DataFrame({item: stats.shapiro(data_ages[item]) \
for item in dep_vars}) for data_ages in groups_ages], axis=0)
normality_ages.index = pd.MultiIndex.from_product([['Slow', 'Free', 'Fast'],
['Olds','Adults'],['stats', 'p value']])
#MOST OF THE SUBGROUPS ARE NORMAL, HOWEVER A CONSIDERABLE PORCENTAGE IS NOT
# Determining statistical differences between speeds on subgroups
kruskal_speed_AS = {item: kruskal(olds_slow[item].values,
adults_slow[item].values).pvalue for item in dep_vars}
#Point 1
kruskal_speed_AC = {item: kruskal(olds_free[item].values,
adults_free[item].values).pvalue for item in dep_vars}
#Point 1 is the only one different (0.04)
kruskal_speed_AF = {item: kruskal(olds_fast[item].values,
adults_fast[item].values).pvalue for item in dep_vars}
#Point 5
#Analysis between
kruskal_speed_olds = {item: kruskal(olds_o.query("speed == 'Slow'")[item].values,
olds_o.query("speed == 'Free'")[item].values,
olds_o.query("speed == 'Fast'")[item].values).pvalue for item in dep_vars}
#Stats diff in point 1,3,4,abs and prod, and CP
# Let us proceed with dunn analysis on those outputs in which
dunn_old = pd.concat([sp.posthoc_dunn(olds_o, val_col = item,
group_col= 'speed', p_adjust='holm') for item in dep_vars[np.r_[0,2,3,4,5,6,7]]], axis=0)
dunn_old.index = pd.MultiIndex.from_product([dep_vars[np.r_[0,2,3,4,5,6,7]],
list(dunn_old.index.unique())])
dunn_oldbool = dunn_old.apply(lambda x: x < 0.05)
kruskal_speed_adults = {item: kruskal(adults_o.query("speed == 'Slow'")[item].values,
adults_o.query("speed == 'Free'")[item].values,
adults_o.query("speed == 'Fast'")[item].values).pvalue for item in dep_vars}
#Stats diff in point 3,4, abs and prod, CP and LRP
dunn_adults = pd.concat([sp.posthoc_dunn(adults_o, val_col = item, group_col = 'speed',
p_adjust='holm') for item in dep_vars[np.r_[2,3,5,6,7,10]]], axis=0)
dunn_adults.index = pd.MultiIndex.from_product([dep_vars[np.r_[2,3,5,6,7,10]],list(dunn_adults.index.unique())])
dunn_adultsbool = dunn_adults.apply(lambda x: x < 0.05)
summary_adults_m = multi_idx('Adults',
rp.summary_cont(adults_o.groupby(adults_o['speed']), decimals=decimal).T, idx=False)
summary_old_m = multi_idx('Elderly',
rp.summary_cont(olds_o.groupby(olds_o['speed']), decimals=decimal).T,
idx=False)
summary_over = multi_idx('Overground',
rp.summary_cont(overground.groupby(overground['speed'])).round(2).T, idx=False)
summary_tread_adj = multi_idx('Treadmill',
rp.summary_cont(treadmill_adj.groupby(treadmill_adj['speed'])).round(2).T, idx=False)
#OLS method
results_mode = {item: ols("Q('{}') ~ C(mode)".format(item), data=mode_df).fit() \
for item in dep_vars}
#If p-value is greater than the alpha (0.05) value then there is no association between the
#dependent variable and AgeGroup
table_ANOVA_mode = pd.Series({item: sm.stats.anova_lm(results_mode[item],
typ=2).iloc[0,-1] for item in dep_vars})
#Conclusion: there is an association between Agegroups and work abs, prod and stiffness DP.
# =============================================================================
# Tukey's Analysis
# Null hypothesis: There is no significant difference betweem groups
# =============================================================================
mc_mode = {item: MultiComparison(mode_df[item],
mode_df['mode']) for item in dep_vars}
mc_results_mode = pd.DataFrame({item: [mc_mode[item].tukeyhsd().reject[0],
mc_mode[item].tukeyhsd().pvalues[0],
mc_mode[item].tukeyhsd().meandiffs[0]]
for item in dep_vars}, index=['reject','p value','mean diff']).T
# conclusion: We can reject the null hypothesis in which there is no significant
# differences in age groups only for the dependent variables work prod, abs and DP,
# the remaining ones do not have statistical differences.
# =============================================================================
# Plot statistics for walking mode
# =============================================================================
#Seeing statistical differences between gender and the significant
fig8, axes = plt.subplots(2,4, figsize = (16,8))
deps_mode = dep_vars[np.r_[1,3:10]]
labels_mode = np.array(labels)[np.r_[1,3:10]]
for num, ax in enumerate(np.ravel(axes)):
sns.boxplot(x='mode', y=deps_mode[num], data=mode_df, ax=ax)
ax.set_ylabel(labels_mode[num])
# fig8.suptitle('Variables with statistical differences in Overground vs Treadmill', fontsize = 18)
fig8.savefig('Fukuchi/stats_diff_mode.png')
# =============================================================================
# Differences within speed and mode
# =============================================================================
norm_speed_O = {item: stats.shapiro(overground[item]) for item in dep_vars}
norm_speed_T = {item: stats.shapiro(treadmill_adj[item]) for item in dep_vars}
#outputs are not normal distributed
O_slow = overground.query("speed == 'Slow'")
O_free = overground.query("speed == 'Free'")
O_fast = overground.query("speed == 'Fast'")
T_vslow = treadmill.query("speed == 'Very Slow'")
T_slow = treadmill.query("speed == 'Slow'")
T_free = treadmill.query("speed == 'Free'")
T_fast = treadmill.query("speed == 'Fast'")
T_vfast = treadmill.query("speed == 'Very Fast'")
#Let us see if we see same variances in overground
var_speed_O = {item: stats.bartlett(O_slow[item], O_free[item], O_fast[item]).pvalue for item in dep_vars}
#Now for treadmill
var_speed_T = {item: stats.bartlett(T_vslow[item], T_slow[item], T_free[item],
T_fast[item], T_vfast[item]).pvalue for item in dep_vars}
# As variances are different we would need to implement a non-parametric method
# We will apply kruskal wallis
#Null hypothesis
# the null hypothesis is that the medians of all groups are equal,
# and the alternative hypothesis is that at least one population median
# of one group is different from the population median of at least one other group.
kruskal_speed_O = {item: kruskal(O_slow[item].values, O_free[item].values,
O_fast[item].values).pvalue for item in dep_vars}
kruskal_speed_T = {item: kruskal(T_vslow[item].values, T_slow[item].values,
T_free[item].values, T_fast[item].values,
T_vfast[item].values).pvalue for item in dep_vars}
kruskal_speed_T_adj = {item: kruskal(T_slow[item].values,
T_free[item].values,
T_fast[item].values).pvalue for item in dep_vars}
kruskal_speed_MS = {item: kruskal(O_slow[item].values,
T_slow[item].values).pvalue for item in dep_vars}
#Stat diff in points 4, work abs, CP, ERP, LRP, DP
kruskal_speed_MC = {item: kruskal(O_free[item].values,
T_free[item].values).pvalue for item in dep_vars}
#Stats diff in point 5, work prod and abs, CP and DP
kruskal_speed_MF = {item: kruskal(O_fast[item].values,
T_fast[item].values).pvalue for item in dep_vars}
#Stats diff in point 4,5, abs and prod, CP and DP.
#At 5% the null hypothesis is rejected for:
# Overground: point 0, point 2, point 3, work abs, and work prod
# Treadmill: all were rejected excepting DP (0.07)
# Let us proceed with dunn analysis on those outputs in which
dunn_O = pd.concat([sp.posthoc_dunn(overground, val_col = item,
group_col= 'speed', p_adjust='holm') for item in dep_vars[np.r_[0,2:8,10]]], axis=0)
dunn_O.index = pd.MultiIndex.from_product([dep_vars[np.r_[0,2:8,10]],list(dunn_O.index.unique())])
dunn_Obool = dunn_O.apply(lambda x: x < 0.05)
dunn_T = pd.concat([sp.posthoc_dunn(treadmill_adj, val_col = item, group_col = 'speed', #Take out adj for Very classes
p_adjust='holm') for item in dep_vars[np.r_[0,2,3,5,6]]], axis=0)
dunn_T.index = pd.MultiIndex.from_product([dep_vars[np.r_[0,2,3,5,6]],list(dunn_T.index.unique())])
dunn_Tbool = dunn_T.apply(lambda x: x < 0.05)
legend_ = [1,1,0]*3
fig9, axes = plt.subplots(3,3, figsize = (12,12))
fig9.tight_layout()
fig9.subplots_adjust(wspace=.3, left=0.1)
deps_mod_ = dep_vars[np.r_[0:4,5:10]]
labels_mod_ = np.array(labels)[np.r_[0:4,5:10]]
for num, ax in enumerate(np.ravel(axes)):
sns.boxplot(x='mode', y=deps_mod_[num], hue='speed',
data=mode_df, ax=ax, hue_order = ['Slow', 'Free', 'Fast'])
ax.set_ylabel(labels_mod_[num])
if bool(legend_[num]):
ax.get_legend().remove()
else:
ax.legend(loc='upper right')
ax.set_xlabel('Environment')
# fig9.suptitle('Variables with statistical differences in OvsT and speed', fontsize = 18)
fig9.savefig('Fukuchi/stats_diff_mode_speed.png')
# =============================================================================
# Differences within speed and gender
# =============================================================================
Fem = mode_df[mode_df['Gender'] == 'F']
Male = mode_df[mode_df['Gender'] == 'M']
norm_speed_F = {item: stats.shapiro(Fem[item]) for item in dep_vars}
norm_speed_M = {item: stats.shapiro(Male[item]) for item in dep_vars}
#Main information
summary_Fem_m = multi_idx('Female', rp.summary_cont(Fem.groupby(Fem['speed']),
decimals=decimal).T, idx=False)
summary_Male_m = multi_idx('Male', rp.summary_cont(Male.groupby(Male['speed']),
decimals=decimal).T, idx=False)
# =============================================================================
# Summary concat
# =============================================================================
summary_concat = pd.concat([summary_adults_m, summary_old_m, summary_Fem_m,
summary_Male_m, summary_over, summary_tread_adj], axis=1)
trials_num = summary_concat.iloc[0,:].astype(np.int64)
trials_num.name = ('','N')
#Dropping non independent vars
summary_concat = summary_concat.loc[idx[dep_vars,:],:]
#Dropping N,and SE
summary_concat = summary_concat.loc[idx[:,['Mean', 'SD','95% Conf.', 'Interval']],:]
summary_concat = change_labels(summary_concat, ['Mean', 'SD','95% CI min', '95% CI max'],
index=True, level=1)
# summary_concat = change_labels(summary_concat, labels,
# index=True, level=0)
summary_concat = pd.concat([pd.DataFrame(trials_num).T, summary_concat], axis=0)
#Changing order on level 1 columns
summary_concat = summary_concat.reindex(['Slow', 'Free', 'Fast'], axis=1, level=1)
# Export to latex
with open("Fukuchi/table2.tex", "w+") as pt:
summary_concat.to_latex(buf=pt, col_space=10, longtable=True, multirow=True,
caption='Cuantitative ankle DJS characteristics at different population groups'+\
r' three different gait speeds: Slow ({})'.format(vel_labels[-3])+\
r', Free ({}) and Fast({})'.format(vel_labels[-2], vel_labels[-1]),
label='tab:table2')
summary_N = summary_concat.iloc[0,:]
summary_N.name = ('Group', 'Speed')
summary_N.columns = ['Number N']
mult_idx_N = pd.MultiIndex.from_product([['Young adult (A) (age $< 31$ years old)',
'Elder adult (E) (age $\ge 54$ years old',
'Female (F)',
'Male (M)',
'Overground (O)',
'Treadmill (T)'],
['Slow (S) ($v^*\le 0.34$)',
'Free (C) ($0.37 < v^* \le 0.48$)',
'Fast (F) ($v^*>0.48$)']])
summary_N.index = mult_idx_N
with open("Fukuchi/tableN.tex", "w+") as pt:
summary_N.to_latex(buf=pt, col_space=10, longtable=False, multirow=True,
caption='caption',
label='tab:tableN')
#To csv
summary_concat.to_csv('Fukuchi/summary_concat.csv')
#outputs are not normal distributed
M_slow = mode_df.query("speed == 'Slow' & Gender == 'M'")
M_free = mode_df.query("speed == 'Free' & Gender == 'M'")
M_fast = mode_df.query("speed == 'Fast' & Gender == 'M'")
F_slow = mode_df.query("speed == 'Slow' & Gender == 'F'")
F_free = mode_df.query("speed == 'Free' & Gender == 'F'")
F_fast = mode_df.query("speed == 'Fast' & Gender == 'F'")
#Let us see if we see same variances in Female groups
var_speed_F = {item: stats.bartlett(F_slow[item], F_free[item], F_fast[item]).pvalue for item in dep_vars}
#Variances are equal in point 1, point 4, work abs and DP
#Now for Males
var_speed_M = {item: stats.bartlett(M_slow[item], M_free[item],
M_fast[item]).pvalue for item in dep_vars}
#Variances are equal in work abs, DP and ERP
# As variances are different we would need to implement a non-parametric method
# We will apply kruskal wallis
#Null hypothesis
# the null hypothesis is that the medians of all groups are equal,
# and the alternative hypothesis is that at least one population median
# of one group is different from the population median of at least one other group.
kruskal_speed_F = {item: kruskal(F_slow[item].values, F_free[item].values,
F_fast[item].values).pvalue for item in dep_vars}
#Point 1, point 3, 4, 5, abs, prod, CP, and LRP
kruskal_speed_M = {item: kruskal(M_slow[item].values,
M_free[item].values,
M_fast[item].values).pvalue for item in dep_vars}
# Points 1,2,3,4,abs, prod, LRP
kruskal_speed_Gen = {item: kruskal(M_slow[item].values, M_free[item].values,
M_fast[item].values,F_slow[item].values,
F_free[item].values,
F_fast[item].values).pvalue for item in dep_vars}
#We will use kruskal wallis to see the differences between speeds
kruskal_speed_GenS = {item: kruskal(F_slow[item].values,
M_slow[item].values).pvalue for item in dep_vars}
#Statistical differences in point 1, CP, DP, ERP, LRP
kruskal_speed_GenC = {item: kruskal(F_free[item].values,
M_free[item].values).pvalue for item in dep_vars}
# Stats diff DP and ERP
kruskal_speed_GenF = {item: kruskal(F_fast[item].values,
M_fast[item].values).pvalue for item in dep_vars}
# Stats diff DP and ERP
#At 5% the null hypothesis is rejected for:
# Females: point 0, point 1, point 2, point 3, work abs, and work prod
# Males: point 0, point 2, point 3, work abs, and work prod
# Overall: point 4 is the unique with no statistical differences
# =============================================================================
# Determining which values are statistically significant comparing classes at same speed
# =============================================================================
def which_significant(dic):
res = {0.001: [], 0.01:[], 0.05:[]}
for key, item in dic.items():
if item <= 0.001:
res[0.001].append((key,item))
elif item <= 0.01:
res[0.01].append((key,item))
elif item <= 0.05:
res[0.05].append((key,item))
return res
kruskal_etiquete_class = ['{} {}'.format(i,j) for i in ['Age', 'Gender', 'Mode'] for j in ['Slow', 'Free', 'Fast']]
kruskals_all_class = [kruskal_speed_AS, kruskal_speed_AC, kruskal_speed_AF,
kruskal_speed_GenS, kruskal_speed_GenC, kruskal_speed_GenF,
kruskal_speed_MS, kruskal_speed_MC, kruskal_speed_MF]
kruskals_speed = {key: which_significant(item) for key, item in zip(kruskal_etiquete_class, kruskals_all_class)}
#Now for classes within the group
kruskal_etiquete_group = ['Adults', 'Elder', 'Female', 'Male', 'Overground', 'Treadmill']
kruskal_all_group = [kruskal_speed_adults, kruskal_speed_olds,
kruskal_speed_F, kruskal_speed_M,
kruskal_speed_O, kruskal_speed_T_adj]
kruskals_group = {key: which_significant(item) for key, item in zip(kruskal_etiquete_group,
kruskal_all_group)}
# Let us proceed with dunn analysis on those outputs in which
dunn_F = pd.concat([sp.posthoc_dunn(Fem, val_col = item,
group_col= 'speed', p_adjust='holm') for item in dep_vars[np.r_[0,2,3,5,6,10]]], axis=0)
dunn_F.index = pd.MultiIndex.from_product([dep_vars[np.r_[0,2,3,5,6,10]],list(dunn_F.index.unique())])
dunn_Fbool = dunn_F.apply(lambda x: x < 0.05)
dunn_M = pd.concat([sp.posthoc_dunn(Male, val_col = item, group_col = 'speed',
p_adjust='holm') for item in dep_vars[np.r_[0,2,3,5,6,7]]], axis=0)
dunn_M.index = pd.MultiIndex.from_product([dep_vars[np.r_[0,2,3,5,6,7]],list(dunn_M.index.unique())])
dunn_Mbool = dunn_M.apply(lambda x: x < 0.05)
legend_ = [1,0,1]*3
fig11, axes = plt.subplots(3,3, figsize = (12,12))
fig11.tight_layout()
fig11.subplots_adjust(wspace=.3, left=0.1)
for num, ax in enumerate(np.ravel(axes)):
sns.boxplot(x='Gender', y=deps_mod_[num], hue='speed',
data=mode_df, ax=ax, hue_order = ['Slow', 'Free', 'Fast'])
ax.set_ylabel(labels_mod_[num])
if bool(legend_[num]):
ax.get_legend().remove()
else:
ax.legend(loc='upper right')
# fig11.suptitle('Variables with statistical differences in Gender and speed', fontsize = 18)
fig11.savefig('Fukuchi/stats_diff_gender_speed.png')
# Format: diagonal, non-significant, p<0.001, p<0.01, p<0.05
cmap = ['1', '#fb6a4a', '#08306b', '#4292c6', '#c6dbef']
heatmap_args = {'cmap': cmap, 'linewidths': 0.25, 'linecolor': '0.5',
'clip_on': False, 'square': True, 'cbar_ax_bbox': [0.80, 0.35, 0.04, 0.3]}
concat_dunn = [dunn_adults, dunn_old, dunn_F,
dunn_M, dunn_O, dunn_T]
for num, info in enumerate(concat_dunn):
fig12, axs = plt.subplots(1,1, figsize=[8,8])
axs = sp.sign_plot(info, **heatmap_args)
# axs.set_ylabel(kruskal_etiquete_group[num])
fig12.savefig('Fukuchi/dunn_results_{}.png'.format(kruskal_etiquete_group[num]))
# Saving a table with the general statistical significance
#Saving the significance of the speed within the vars of the same class
significance_group = pd.concat([sp.sign_table(dunn_) for dunn_ in concat_dunn], axis=1)
significance_group = significance_group.fillna('NS')
significance_group = significance_group.replace('-', 'NS')
significance_group = significance_group.replace('NS', ' ') #Removing NS
significance_group = significance_group.replace('*', 0.05) #0.05
significance_group = significance_group.replace('**', 0.01) #0.01
significance_group = significance_group.replace('***', 0.001) #0.001
#Changing order on level 1 columns and index
significance_group.columns = pd.MultiIndex.from_product([kruskal_etiquete_group,['Fast', 'Free', 'Slow']])
significance_group = significance_group.reindex(['Slow', 'Free', 'Fast'], axis=1, level=1)
significance_group = significance_group.reindex(['Slow', 'Free', 'Fast'], axis=0, level=1)
# Reindexing significance
significance_group = significance_group.reindex(dep_vars[np.r_[0,2,3,4,5,6,7,10]], axis=0, level=0)
# significance_group = change_labels(significance_group, np.array(labels)[np.r_[0,2,3,4,5,6,7,10]],
# level=0)
#Removing TS as they do not have good significance
significance_group = significance_group.drop(['point 5'], axis=0, level=0)
with open("Fukuchi/table3.tex", "w+") as pt:
significance_group.to_latex(buf=pt, col_space=10, longtable=False, multirow=True,
caption=r'Significant differences (p value) between '+\
r'three different gait speeds: Slow ({})'.format(vel_labels[-3])+\
r', Free ({}) and Fast({})'.format(vel_labels[-2], vel_labels[-1])+\
' for each population group.',
label='tab:table3')
#Meta info_anova to csv
#Creating a categorical value to define speeds from Very Slow to Very Fast
speed_cat_simp = {'OC': 'C', 'OS':'S', 'OF':'F', 'T01': 'VS',
'T02': 'VS', 'T03': 'S', 'T04': 'S', 'T05': 'C',
'T06': 'F', 'T07': 'F', 'T08': 'VF'}
meta_info_anova['speed'] = meta_info_anova.index.get_level_values(1).map(speed_cat_simp)
meta_info_anova.to_csv("Fukuchi/meta_info_Fukuchi.csv")
| [
"utilities_QS.best_hyper",
"numpy.ravel",
"pandas.read_csv",
"researchpy.summary_cont",
"utilities_QS.create_df",
"statsmodels.api.stats.anova_lm",
"numpy.arange",
"DJSFunctions.ankle_DJS",
"scikit_posthocs.sign_plot",
"pandas.DataFrame",
"scipy.stats.mstats.kruskal",
"matplotlib.pyplot.subplo... | [((1101, 1141), 'seaborn.set_context', 'sns.set_context', (['"""paper"""'], {'font_scale': '(1.5)'}), "('paper', font_scale=1.5)\n", (1116, 1141), True, 'import seaborn as sns\n'), ((1142, 1168), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (1155, 1168), True, 'import seaborn as sns\n'), ((1625, 1636), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1634, 1636), False, 'import os\n'), ((1794, 1866), 'pandas.read_csv', 'pd.read_csv', (['"""Fukuchi/Fukuchi_mean.csv"""'], {'header': '[0, 1]', 'index_col': '[0, 1]'}), "('Fukuchi/Fukuchi_mean.csv', header=[0, 1], index_col=[0, 1])\n", (1805, 1866), True, 'import pandas as pd\n'), ((2622, 2666), 'utilities_QS.multi_idx', 'multi_idx', (['"""Angular vel [deg / GC]"""', 'ang_vel'], {}), "('Angular vel [deg / GC]', ang_vel)\n", (2631, 2666), False, 'from utilities_QS import multi_idx, create_df, best_hyper, change_labels\n'), ((2680, 2720), 'pandas.concat', 'pd.concat', (['[Fukuchi_df, ang_vel]'], {'axis': '(0)'}), '([Fukuchi_df, ang_vel], axis=0)\n', (2689, 2720), True, 'import pandas as pd\n'), ((2777, 2815), 'utilities_QS.multi_idx', 'multi_idx', (['"""Ankle Power [W]"""', 'power_df'], {}), "('Ankle Power [W]', power_df)\n", (2786, 2815), False, 'from utilities_QS import multi_idx, create_df, best_hyper, change_labels\n'), ((2829, 2870), 'pandas.concat', 'pd.concat', (['[Fukuchi_df, power_df]'], {'axis': '(0)'}), '([Fukuchi_df, power_df], axis=0)\n', (2838, 2870), True, 'import pandas as pd\n'), ((5008, 5046), 'pandas.read_excel', 'pd.read_excel', (['"""Fukuchi/WBDSinfo.xlsx"""'], {}), "('Fukuchi/WBDSinfo.xlsx')\n", (5021, 5046), True, 'import pandas as pd\n'), ((15102, 15145), 'utilities_QS.create_df', 'create_df', (['Fukuchi_mean_age', 'Fukuchi_sd_age'], {}), '(Fukuchi_mean_age, Fukuchi_sd_age)\n', (15111, 15145), False, 'from utilities_QS import multi_idx, create_df, best_hyper, change_labels\n'), ((7274, 7318), 'numpy.sqrt', 'np.sqrt', (["(9.81 * meta_info_valid['LegLength'])"], {}), "(9.81 * meta_info_valid['LegLength'])\n", (7281, 7318), True, 'import numpy as np\n'), ((9398, 9425), 'numpy.arange', 'np.arange', (['(-0.25)', '(1.8)', '(0.25)'], {}), '(-0.25, 1.8, 0.25)\n', (9407, 9425), True, 'import numpy as np\n'), ((10449, 10496), 'utilities_QS.create_df', 'create_df', (['Fukuchi_mean_modes', 'Fukuchi_sd_modes'], {}), '(Fukuchi_mean_modes, Fukuchi_sd_modes)\n', (10458, 10496), False, 'from utilities_QS import multi_idx, create_df, best_hyper, change_labels\n'), ((10760, 10941), 'DJSFunctions.ankle_DJS', 'ankle_DJS', (['Fukuchi_modes'], {'features': "['Ankle Dorsi/Plantarflexion ', 'Ankle Dorsi/Plantarflexion',\n 'Vertical Force', 'Ankle']", 'exp_name': '"""Fukuchi Instances variation analysis"""'}), "(Fukuchi_modes, features=['Ankle Dorsi/Plantarflexion ',\n 'Ankle Dorsi/Plantarflexion', 'Vertical Force', 'Ankle'], exp_name=\n 'Fukuchi Instances variation analysis')\n", (10769, 10941), False, 'from DJSFunctions import extract_preprocess_data, ankle_DJS\n'), ((11441, 11590), 'utilities_QS.best_hyper', 'best_hyper', (['all_dfs_instance'], {'save': '"""Fukuchi/best_params_instance.csv"""', 'smooth_radius': 'smooth_', 'cluster_radius': 'cluster_', 'verbose': '(False)', 'rows': '[0, 1]'}), "(all_dfs_instance, save='Fukuchi/best_params_instance.csv',\n smooth_radius=smooth_, cluster_radius=cluster_, verbose=False, rows=[0, 1])\n", (11451, 11590), False, 'from utilities_QS import multi_idx, create_df, best_hyper, change_labels\n'), ((15468, 15647), 'DJSFunctions.ankle_DJS', 'ankle_DJS', (['Fukuchi_age'], {'features': "['Ankle Dorsi/Plantarflexion ', 'Vertical Force',\n 'Ankle Dorsi/Plantarflexion', 'Ankle']", 'exp_name': '"""Fukuchi Instances variation analysis"""'}), "(Fukuchi_age, features=['Ankle Dorsi/Plantarflexion ',\n 'Vertical Force', 'Ankle Dorsi/Plantarflexion', 'Ankle'], exp_name=\n 'Fukuchi Instances variation analysis')\n", (15477, 15647), False, 'from DJSFunctions import extract_preprocess_data, ankle_DJS\n'), ((15972, 16101), 'utilities_QS.best_hyper', 'best_hyper', (['all_dfs_ages'], {'save': '"""Fukuchi/best_params_ages.csv"""', 'smooth_radius': 'smooth_', 'cluster_radius': 'cluster_', 'verbose': '(False)'}), "(all_dfs_ages, save='Fukuchi/best_params_ages.csv', smooth_radius\n =smooth_, cluster_radius=cluster_, verbose=False)\n", (15982, 16101), False, 'from utilities_QS import multi_idx, create_df, best_hyper, change_labels\n'), ((19363, 19544), 'DJSFunctions.ankle_DJS', 'ankle_DJS', (['Fukuchi_vel_df'], {'features': "['Ankle Dorsi/Plantarflexion ', 'Vertical Force',\n 'Ankle Dorsi/Plantarflexion', 'Ankle']", 'exp_name': '"""Fukuchi age mode variation analysis"""'}), "(Fukuchi_vel_df, features=['Ankle Dorsi/Plantarflexion ',\n 'Vertical Force', 'Ankle Dorsi/Plantarflexion', 'Ankle'], exp_name=\n 'Fukuchi age mode variation analysis')\n", (19372, 19544), False, 'from DJSFunctions import extract_preprocess_data, ankle_DJS\n'), ((19894, 20030), 'utilities_QS.best_hyper', 'best_hyper', (['all_dfs_ages_mod'], {'save': '"""Fukuchi/best_params_ages_mod.csv"""', 'smooth_radius': 'smooth_', 'cluster_radius': 'cluster_', 'verbose': '(False)'}), "(all_dfs_ages_mod, save='Fukuchi/best_params_ages_mod.csv',\n smooth_radius=smooth_, cluster_radius=cluster_, verbose=False)\n", (19904, 20030), False, 'from utilities_QS import multi_idx, create_df, best_hyper, change_labels\n'), ((22410, 22605), 'DJSFunctions.ankle_DJS', 'ankle_DJS', (['Fukuchi_age_gen'], {'features': "['Ankle Dorsi/Plantarflexion ', 'Vertical Force',\n 'Ankle Dorsi/Plantarflexion', 'Ankle']", 'exp_name': '"""Fukuchi Overground and gender variation analysis"""'}), "(Fukuchi_age_gen, features=['Ankle Dorsi/Plantarflexion ',\n 'Vertical Force', 'Ankle Dorsi/Plantarflexion', 'Ankle'], exp_name=\n 'Fukuchi Overground and gender variation analysis')\n", (22419, 22605), False, 'from DJSFunctions import extract_preprocess_data, ankle_DJS\n'), ((25987, 26036), 'utilities_QS.create_df', 'create_df', (['Fukuchi_mean_gender', 'Fukuchi_sd_gender'], {}), '(Fukuchi_mean_gender, Fukuchi_sd_gender)\n', (25996, 26036), False, 'from utilities_QS import multi_idx, create_df, best_hyper, change_labels\n'), ((26374, 26554), 'DJSFunctions.ankle_DJS', 'ankle_DJS', (['Fukuchi_gender'], {'features': "['Ankle Dorsi/Plantarflexion ', 'Vertical Force',\n 'Ankle Dorsi/Plantarflexion', 'Ankle']", 'exp_name': '"""Fukuchi Gender Comparison analysis"""'}), "(Fukuchi_gender, features=['Ankle Dorsi/Plantarflexion ',\n 'Vertical Force', 'Ankle Dorsi/Plantarflexion', 'Ankle'], exp_name=\n 'Fukuchi Gender Comparison analysis')\n", (26383, 26554), False, 'from DJSFunctions import extract_preprocess_data, ankle_DJS\n'), ((26866, 26998), 'utilities_QS.best_hyper', 'best_hyper', (['all_dfs_gender'], {'save': '"""Fukuchi/best_params_gender.csv"""', 'smooth_radius': 'smooth_', 'cluster_radius': 'cluster_', 'verbose': '(False)'}), "(all_dfs_gender, save='Fukuchi/best_params_gender.csv',\n smooth_radius=smooth_, cluster_radius=cluster_, verbose=False)\n", (26876, 26998), False, 'from utilities_QS import multi_idx, create_df, best_hyper, change_labels\n'), ((76527, 76605), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[kruskal_etiquete_group, ['Fast', 'Free', 'Slow']]"], {}), "([kruskal_etiquete_group, ['Fast', 'Free', 'Slow']])\n", (76553, 76605), True, 'import pandas as pd\n'), ((2588, 2602), 'numpy.gradient', 'np.gradient', (['x'], {}), '(x)\n', (2599, 2602), True, 'import numpy as np\n'), ((12481, 12594), 'plot_dynamics.plot_ankle_DJS', 'plot_ankle_DJS', ([], {'SD': '(True)', 'save': '(True)', 'plt_style': '"""bmh"""', 'sep': '(False)', 'alpha': '(1.5)', 'fig_size': '[3.0, 2.5]', 'params': 'params'}), "(SD=True, save=True, plt_style='bmh', sep=False, alpha=1.5,\n fig_size=[3.0, 2.5], params=params)\n", (12495, 12594), False, 'from plot_dynamics import plot_ankle_DJS\n'), ((16963, 17076), 'plot_dynamics.plot_ankle_DJS', 'plot_ankle_DJS', ([], {'SD': '(True)', 'save': '(True)', 'plt_style': '"""bmh"""', 'sep': '(False)', 'alpha': '(1.5)', 'fig_size': '[3.0, 2.5]', 'params': 'params'}), "(SD=True, save=True, plt_style='bmh', sep=False, alpha=1.5,\n fig_size=[3.0, 2.5], params=params)\n", (16977, 17076), False, 'from plot_dynamics import plot_ankle_DJS\n'), ((20814, 20927), 'plot_dynamics.plot_ankle_DJS', 'plot_ankle_DJS', ([], {'SD': '(True)', 'save': '(True)', 'plt_style': '"""bmh"""', 'sep': '(False)', 'alpha': '(1.5)', 'fig_size': '[3.0, 2.5]', 'params': 'params'}), "(SD=True, save=True, plt_style='bmh', sep=False, alpha=1.5,\n fig_size=[3.0, 2.5], params=params)\n", (20828, 20927), False, 'from plot_dynamics import plot_ankle_DJS\n'), ((23978, 24091), 'plot_dynamics.plot_ankle_DJS', 'plot_ankle_DJS', ([], {'SD': '(True)', 'save': '(True)', 'plt_style': '"""bmh"""', 'sep': '(False)', 'alpha': '(1.5)', 'fig_size': '[3.0, 2.5]', 'params': 'params'}), "(SD=True, save=True, plt_style='bmh', sep=False, alpha=1.5,\n fig_size=[3.0, 2.5], params=params)\n", (23992, 24091), False, 'from plot_dynamics import plot_ankle_DJS\n'), ((27854, 27967), 'plot_dynamics.plot_ankle_DJS', 'plot_ankle_DJS', ([], {'SD': '(True)', 'save': '(True)', 'plt_style': '"""bmh"""', 'sep': '(False)', 'alpha': '(1.5)', 'fig_size': '[3.0, 2.5]', 'params': 'params'}), "(SD=True, save=True, plt_style='bmh', sep=False, alpha=1.5,\n fig_size=[3.0, 2.5], params=params)\n", (27868, 27967), False, 'from plot_dynamics import plot_ankle_DJS\n'), ((29176, 29204), 'numpy.arange', 'np.arange', (['(-0.25)', '(2.25)', '(0.25)'], {}), '(-0.25, 2.25, 0.25)\n', (29185, 29204), True, 'import numpy as np\n'), ((32197, 32258), 'pandas.read_csv', 'pd.read_csv', (['"""Fukuchi/best_df_turn_all.csv"""'], {'index_col': '[0, 1]'}), "('Fukuchi/best_df_turn_all.csv', index_col=[0, 1])\n", (32208, 32258), True, 'import pandas as pd\n'), ((32287, 32346), 'pandas.read_csv', 'pd.read_csv', (['"""Fukuchi/total_work_ind.csv"""'], {'index_col': '[0, 1]'}), "('Fukuchi/total_work_ind.csv', index_col=[0, 1])\n", (32298, 32346), True, 'import pandas as pd\n'), ((32369, 32431), 'pandas.read_csv', 'pd.read_csv', (['"""Fukuchi/regression_ind.csv"""'], {'index_col': '[0, 1, 2]'}), "('Fukuchi/regression_ind.csv', index_col=[0, 1, 2])\n", (32380, 32431), True, 'import pandas as pd\n'), ((32449, 32502), 'pandas.read_csv', 'pd.read_csv', (['"""Fukuchi/work_ind.csv"""'], {'index_col': '[0, 1]'}), "('Fukuchi/work_ind.csv', index_col=[0, 1])\n", (32460, 32502), True, 'import pandas as pd\n'), ((37189, 37236), 'researchpy.summary_cont', 'rp.summary_cont', (['reg_info_ind'], {'decimals': 'decimal'}), '(reg_info_ind, decimals=decimal)\n', (37204, 37236), True, 'import researchpy as rp\n'), ((39117, 39174), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[['R2', 'MSE'], stiff_labels]"], {}), "([['R2', 'MSE'], stiff_labels])\n", (39143, 39174), True, 'import pandas as pd\n'), ((39807, 39865), 'pandas.concat', 'pd.concat', (['[metrics_wobad_mean, metrics_wobad_std]'], {'axis': '(1)'}), '([metrics_wobad_mean, metrics_wobad_std], axis=1)\n', (39816, 39865), True, 'import pandas as pd\n'), ((40505, 40553), 'pandas.concat', 'pd.concat', (['[meta_info_anova, results_df]'], {'axis': '(1)'}), '([meta_info_anova, results_df], axis=1)\n', (40514, 40553), True, 'import pandas as pd\n'), ((40879, 40980), 'DJSFunctions.ankle_DJS', 'ankle_DJS', (['Fukuchi_df_export'], {'dir_loc': '"""Fukuchi"""', 'exp_name': '"""Adults and Elderle in Over and Tread"""'}), "(Fukuchi_df_export, dir_loc='Fukuchi', exp_name=\n 'Adults and Elderle in Over and Tread')\n", (40888, 40980), False, 'from DJSFunctions import extract_preprocess_data, ankle_DJS\n'), ((44981, 45048), 'researchpy.summary_cont', 'rp.summary_cont', (['meta_info_anova.loc[:, dep_vars]'], {'decimals': 'decimal'}), '(meta_info_anova.loc[:, dep_vars], decimals=decimal)\n', (44996, 45048), True, 'import researchpy as rp\n'), ((45491, 45525), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(8, 8)'}), '(2, 2, figsize=(8, 8))\n', (45503, 45525), True, 'import matplotlib.pyplot as plt\n'), ((47606, 47673), 'researchpy.summary_cont', 'rp.summary_cont', (['meta_info_anova.loc[:, dep_vars]'], {'decimals': 'decimal'}), '(meta_info_anova.loc[:, dep_vars], decimals=decimal)\n', (47621, 47673), True, 'import researchpy as rp\n'), ((50027, 50061), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(8, 8)'}), '(2, 2, figsize=(8, 8))\n', (50039, 50061), True, 'import matplotlib.pyplot as plt\n'), ((51214, 51260), 'pandas.concat', 'pd.concat', (['[overground, treadmill_adj]'], {'axis': '(0)'}), '([overground, treadmill_adj], axis=0)\n', (51223, 51260), True, 'import pandas as pd\n'), ((52650, 52709), 'researchpy.summary_cont', 'rp.summary_cont', (['mode_df.loc[:, dep_vars]'], {'decimals': 'decimal'}), '(mode_df.loc[:, dep_vars], decimals=decimal)\n', (52665, 52709), True, 'import researchpy as rp\n'), ((54270, 54371), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[['Slow', 'Free', 'Fast'], ['Olds', 'Adults'], ['stats', 'p value']]"], {}), "([['Slow', 'Free', 'Fast'], ['Olds', 'Adults'], [\n 'stats', 'p value']])\n", (54296, 54371), True, 'import pandas as pd\n'), ((59443, 59478), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(4)'], {'figsize': '(16, 8)'}), '(2, 4, figsize=(16, 8))\n', (59455, 59478), True, 'import matplotlib.pyplot as plt\n'), ((64036, 64072), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {'figsize': '(12, 12)'}), '(3, 3, figsize=(12, 12))\n', (64048, 64072), True, 'import matplotlib.pyplot as plt\n'), ((65959, 66079), 'pandas.concat', 'pd.concat', (['[summary_adults_m, summary_old_m, summary_Fem_m, summary_Male_m,\n summary_over, summary_tread_adj]'], {'axis': '(1)'}), '([summary_adults_m, summary_old_m, summary_Fem_m, summary_Male_m,\n summary_over, summary_tread_adj], axis=1)\n', (65968, 66079), True, 'import pandas as pd\n'), ((66458, 66556), 'utilities_QS.change_labels', 'change_labels', (['summary_concat', "['Mean', 'SD', '95% CI min', '95% CI max']"], {'index': '(True)', 'level': '(1)'}), "(summary_concat, ['Mean', 'SD', '95% CI min', '95% CI max'],\n index=True, level=1)\n", (66471, 66556), False, 'from utilities_QS import multi_idx, create_df, best_hyper, change_labels\n'), ((67638, 67920), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[['Young adult (A) (age $< 31$ years old)',\n 'Elder adult (E) (age $\\\\ge 54$ years old', 'Female (F)', 'Male (M)',\n 'Overground (O)', 'Treadmill (T)'], ['Slow (S) ($v^*\\\\le 0.34$)',\n 'Free (C) ($0.37 < v^* \\\\le 0.48$)', 'Fast (F) ($v^*>0.48$)']]"], {}), "([['Young adult (A) (age $< 31$ years old)',\n 'Elder adult (E) (age $\\\\ge 54$ years old', 'Female (F)', 'Male (M)',\n 'Overground (O)', 'Treadmill (T)'], ['Slow (S) ($v^*\\\\le 0.34$)',\n 'Free (C) ($0.37 < v^* \\\\le 0.48$)', 'Fast (F) ($v^*>0.48$)']])\n", (67664, 67920), True, 'import pandas as pd\n'), ((74390, 74426), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {'figsize': '(12, 12)'}), '(3, 3, figsize=(12, 12))\n', (74402, 74426), True, 'import matplotlib.pyplot as plt\n'), ((9135, 9165), 'matplotlib.colors.TABLEAU_COLORS.items', 'mcolors.TABLEAU_COLORS.items', ([], {}), '()\n', (9163, 9165), True, 'import matplotlib.colors as mcolors\n'), ((13054, 13093), 'pandas.DataFrame', 'pd.DataFrame', (['DJS_instances.reg_info_df'], {}), '(DJS_instances.reg_info_df)\n', (13066, 13093), True, 'import pandas as pd\n'), ((13117, 13150), 'pandas.DataFrame', 'pd.DataFrame', (['DJS_instances.areas'], {}), '(DJS_instances.areas)\n', (13129, 13150), True, 'import pandas as pd\n'), ((13192, 13244), 'pandas.concat', 'pd.concat', (['[reg_info_ins, DJS_instances.reg_info_df]'], {}), '([reg_info_ins, DJS_instances.reg_info_df])\n', (13201, 13244), True, 'import pandas as pd\n'), ((13268, 13310), 'pandas.concat', 'pd.concat', (['[work_ins, DJS_instances.areas]'], {}), '([work_ins, DJS_instances.areas])\n', (13277, 13310), True, 'import pandas as pd\n'), ((21389, 21422), 'pandas.DataFrame', 'pd.DataFrame', (['DJS_mod.reg_info_df'], {}), '(DJS_mod.reg_info_df)\n', (21401, 21422), True, 'import pandas as pd\n'), ((21447, 21474), 'pandas.DataFrame', 'pd.DataFrame', (['DJS_mod.areas'], {}), '(DJS_mod.areas)\n', (21459, 21474), True, 'import pandas as pd\n'), ((21517, 21564), 'pandas.concat', 'pd.concat', (['[reg_info_mode, DJS_mod.reg_info_df]'], {}), '([reg_info_mode, DJS_mod.reg_info_df])\n', (21526, 21564), True, 'import pandas as pd\n'), ((21589, 21626), 'pandas.concat', 'pd.concat', (['[work_mode, DJS_mod.areas]'], {}), '([work_mode, DJS_mod.areas])\n', (21598, 21626), True, 'import pandas as pd\n'), ((24541, 24578), 'pandas.DataFrame', 'pd.DataFrame', (['DJS_age_gen.reg_info_df'], {}), '(DJS_age_gen.reg_info_df)\n', (24553, 24578), True, 'import pandas as pd\n'), ((24602, 24633), 'pandas.DataFrame', 'pd.DataFrame', (['DJS_age_gen.areas'], {}), '(DJS_age_gen.areas)\n', (24614, 24633), True, 'import pandas as pd\n'), ((24675, 24725), 'pandas.concat', 'pd.concat', (['[reg_info_gen, DJS_age_gen.reg_info_df]'], {}), '([reg_info_gen, DJS_age_gen.reg_info_df])\n', (24684, 24725), True, 'import pandas as pd\n'), ((24749, 24789), 'pandas.concat', 'pd.concat', (['[work_gen, DJS_age_gen.areas]'], {}), '([work_gen, DJS_age_gen.areas])\n', (24758, 24789), True, 'import pandas as pd\n'), ((28429, 28465), 'pandas.DataFrame', 'pd.DataFrame', (['DJS_gender.reg_info_df'], {}), '(DJS_gender.reg_info_df)\n', (28441, 28465), True, 'import pandas as pd\n'), ((28489, 28519), 'pandas.DataFrame', 'pd.DataFrame', (['DJS_gender.areas'], {}), '(DJS_gender.areas)\n', (28501, 28519), True, 'import pandas as pd\n'), ((28561, 28610), 'pandas.concat', 'pd.concat', (['[reg_info_gen, DJS_gender.reg_info_df]'], {}), '([reg_info_gen, DJS_gender.reg_info_df])\n', (28570, 28610), True, 'import pandas as pd\n'), ((28634, 28673), 'pandas.concat', 'pd.concat', (['[work_gen, DJS_gender.areas]'], {}), '([work_gen, DJS_gender.areas])\n', (28643, 28673), True, 'import pandas as pd\n'), ((29447, 29658), 'DJSFunctions.ankle_DJS', 'ankle_DJS', (['Fukuchi_df_nan.loc[:, idx[:, lev1]]'], {'features': "['Ankle Dorsi/Plantarflexion ', 'Vertical Force',\n 'Ankle Dorsi/Plantarflexion', 'Ankle']", 'exp_name': '"""Fukuchi individuals Comparison analysis"""'}), "(Fukuchi_df_nan.loc[:, idx[:, lev1]], features=[\n 'Ankle Dorsi/Plantarflexion ', 'Vertical Force',\n 'Ankle Dorsi/Plantarflexion', 'Ankle'], exp_name=\n 'Fukuchi individuals Comparison analysis')\n", (29456, 29658), False, 'from DJSFunctions import extract_preprocess_data, ankle_DJS\n'), ((30783, 30906), 'plot_dynamics.plot_ankle_DJS', 'plot_ankle_DJS', ([], {'SD': '(False)', 'save': '(True)', 'plt_style': '"""bmh"""', 'sep': '[6, 7]', 'alpha': '(5.0)', 'fig_size': '[7 * 2, 6 * 2]', 'params': 'params_ind'}), "(SD=False, save=True, plt_style='bmh', sep=[6, 7], alpha=5.0,\n fig_size=[7 * 2, 6 * 2], params=params_ind)\n", (30797, 30906), False, 'from plot_dynamics import plot_ankle_DJS\n'), ((35020, 35229), 'DJSFunctions.ankle_DJS', 'ankle_DJS', (['Fukuchi_df_T.loc[:, idx[lev0, :]]'], {'features': "['Ankle Dorsi/Plantarflexion ', 'Vertical Force',\n 'Ankle Dorsi/Plantarflexion', 'Ankle']", 'exp_name': '"""Fukuchi individuals Comparison analysis"""'}), "(Fukuchi_df_T.loc[:, idx[lev0, :]], features=[\n 'Ankle Dorsi/Plantarflexion ', 'Vertical Force',\n 'Ankle Dorsi/Plantarflexion', 'Ankle'], exp_name=\n 'Fukuchi individuals Comparison analysis')\n", (35029, 35229), False, 'from DJSFunctions import extract_preprocess_data, ankle_DJS\n'), ((36135, 36250), 'plot_dynamics.plot_ankle_DJS', 'plot_ankle_DJS', ([], {'SD': '(False)', 'save': '(True)', 'plt_style': '"""bmh"""', 'sep': 'sep_', 'alpha': 'alpha_', 'fig_size': 'fig_s', 'params': 'params_ind'}), "(SD=False, save=True, plt_style='bmh', sep=sep_, alpha=alpha_,\n fig_size=fig_s, params=params_ind)\n", (36149, 36250), False, 'from plot_dynamics import plot_ankle_DJS\n'), ((45591, 45607), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (45599, 45607), True, 'import numpy as np\n'), ((45657, 45671), 'numpy.ravel', 'np.ravel', (['axes'], {}), '(axes)\n', (45665, 45671), True, 'import numpy as np\n'), ((45686, 45755), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""Gender"""', 'y': 'deps_gen[num]', 'data': 'meta_info_anova', 'ax': 'ax'}), "(x='Gender', y=deps_gen[num], data=meta_info_anova, ax=ax)\n", (45697, 45755), True, 'import seaborn as sns\n'), ((48983, 49050), 'statsmodels.stats.multicomp.MultiComparison', 'MultiComparison', (['meta_info_anova[item]', "meta_info_anova['AgeGroup']"], {}), "(meta_info_anova[item], meta_info_anova['AgeGroup'])\n", (48998, 49050), False, 'from statsmodels.stats.multicomp import MultiComparison\n'), ((50125, 50141), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (50133, 50141), True, 'import numpy as np\n'), ((50189, 50203), 'numpy.ravel', 'np.ravel', (['axes'], {}), '(axes)\n', (50197, 50203), True, 'import numpy as np\n'), ((50218, 50289), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""AgeGroup"""', 'y': 'deps_gen[num]', 'data': 'meta_info_anova', 'ax': 'ax'}), "(x='AgeGroup', y=deps_gen[num], data=meta_info_anova, ax=ax)\n", (50229, 50289), True, 'import seaborn as sns\n'), ((58417, 58464), 'statsmodels.stats.multicomp.MultiComparison', 'MultiComparison', (['mode_df[item]', "mode_df['mode']"], {}), "(mode_df[item], mode_df['mode'])\n", (58432, 58464), False, 'from statsmodels.stats.multicomp import MultiComparison\n'), ((59545, 59561), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (59553, 59561), True, 'import numpy as np\n'), ((59610, 59624), 'numpy.ravel', 'np.ravel', (['axes'], {}), '(axes)\n', (59618, 59624), True, 'import numpy as np\n'), ((59639, 59699), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""mode"""', 'y': 'deps_mode[num]', 'data': 'mode_df', 'ax': 'ax'}), "(x='mode', y=deps_mode[num], data=mode_df, ax=ax)\n", (59650, 59699), True, 'import seaborn as sns\n'), ((60167, 60198), 'scipy.stats.shapiro', 'stats.shapiro', (['overground[item]'], {}), '(overground[item])\n', (60180, 60198), True, 'import scipy.stats as stats\n'), ((60251, 60285), 'scipy.stats.shapiro', 'stats.shapiro', (['treadmill_adj[item]'], {}), '(treadmill_adj[item])\n', (60264, 60285), True, 'import scipy.stats as stats\n'), ((64219, 64235), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (64227, 64235), True, 'import numpy as np\n'), ((64287, 64301), 'numpy.ravel', 'np.ravel', (['axes'], {}), '(axes)\n', (64295, 64301), True, 'import numpy as np\n'), ((64316, 64429), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""mode"""', 'y': 'deps_mod_[num]', 'hue': '"""speed"""', 'data': 'mode_df', 'ax': 'ax', 'hue_order': "['Slow', 'Free', 'Fast']"}), "(x='mode', y=deps_mod_[num], hue='speed', data=mode_df, ax=ax,\n hue_order=['Slow', 'Free', 'Fast'])\n", (64327, 64429), True, 'import seaborn as sns\n'), ((65205, 65229), 'scipy.stats.shapiro', 'stats.shapiro', (['Fem[item]'], {}), '(Fem[item])\n', (65218, 65229), True, 'import scipy.stats as stats\n'), ((65282, 65307), 'scipy.stats.shapiro', 'stats.shapiro', (['Male[item]'], {}), '(Male[item])\n', (65295, 65307), True, 'import scipy.stats as stats\n'), ((74541, 74555), 'numpy.ravel', 'np.ravel', (['axes'], {}), '(axes)\n', (74549, 74555), True, 'import numpy as np\n'), ((74570, 74685), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""Gender"""', 'y': 'deps_mod_[num]', 'hue': '"""speed"""', 'data': 'mode_df', 'ax': 'ax', 'hue_order': "['Slow', 'Free', 'Fast']"}), "(x='Gender', y=deps_mod_[num], hue='speed', data=mode_df, ax=ax,\n hue_order=['Slow', 'Free', 'Fast'])\n", (74581, 74685), True, 'import seaborn as sns\n'), ((75563, 75597), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '[8, 8]'}), '(1, 1, figsize=[8, 8])\n', (75575, 75597), True, 'import matplotlib.pyplot as plt\n'), ((75614, 75648), 'scikit_posthocs.sign_plot', 'sp.sign_plot', (['info'], {}), '(info, **heatmap_args)\n', (75626, 75648), True, 'import scikit_posthocs as sp\n'), ((75980, 76000), 'scikit_posthocs.sign_table', 'sp.sign_table', (['dunn_'], {}), '(dunn_)\n', (75993, 76000), True, 'import scikit_posthocs as sp\n'), ((31363, 31396), 'pandas.DataFrame', 'pd.DataFrame', (['DJS_ind.reg_info_df'], {}), '(DJS_ind.reg_info_df)\n', (31375, 31396), True, 'import pandas as pd\n'), ((31424, 31451), 'pandas.DataFrame', 'pd.DataFrame', (['DJS_ind.areas'], {}), '(DJS_ind.areas)\n', (31436, 31451), True, 'import pandas as pd\n'), ((31603, 31653), 'pandas.concat', 'pd.concat', (['[df_turn_ind_all, best_df_turn]'], {'axis': '(0)'}), '([df_turn_ind_all, best_df_turn], axis=0)\n', (31612, 31653), True, 'import pandas as pd\n'), ((31693, 31748), 'pandas.concat', 'pd.concat', (['[total_work_ind_all, total_work_ind]'], {'axis': '(0)'}), '([total_work_ind_all, total_work_ind], axis=0)\n', (31702, 31748), True, 'import pandas as pd\n'), ((31782, 31828), 'pandas.concat', 'pd.concat', (['[reg_info_ind, DJS_ind.reg_info_df]'], {}), '([reg_info_ind, DJS_ind.reg_info_df])\n', (31791, 31828), True, 'import pandas as pd\n'), ((31856, 31892), 'pandas.concat', 'pd.concat', (['[work_ind, DJS_ind.areas]'], {}), '([work_ind, DJS_ind.areas])\n', (31865, 31892), True, 'import pandas as pd\n'), ((43844, 43881), 'scipy.stats.bartlett', 'stats.bartlett', (['Fem[item]', 'Male[item]'], {}), '(Fem[item], Male[item])\n', (43858, 43881), True, 'import scipy.stats as stats\n'), ((44565, 44638), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['Fem[item]', 'Male[item]'], {'equal_var': '(var_gender[item] > 0.05)'}), '(Fem[item], Male[item], equal_var=var_gender[item] > 0.05)\n', (44580, 44638), True, 'import scipy.stats as stats\n'), ((46545, 46584), 'scipy.stats.bartlett', 'stats.bartlett', (['adults[item]', 'old[item]'], {}), '(adults[item], old[item])\n', (46559, 46584), True, 'import scipy.stats as stats\n'), ((47178, 47251), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['adults[item]', 'old[item]'], {'equal_var': '(var_ages[item] > 0.05)'}), '(adults[item], old[item], equal_var=var_ages[item] > 0.05)\n', (47193, 47251), True, 'import scipy.stats as stats\n'), ((51436, 51489), 'scipy.stats.bartlett', 'stats.bartlett', (['overground[item]', 'treadmill_adj[item]'], {}), '(overground[item], treadmill_adj[item])\n', (51450, 51489), True, 'import scipy.stats as stats\n'), ((52191, 52283), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['overground[item]', 'treadmill_adj[item]'], {'equal_var': '(var_mode[item] > 0.05)'}), '(overground[item], treadmill_adj[item], equal_var=var_mode[\n item] > 0.05)\n', (52206, 52283), True, 'import scipy.stats as stats\n'), ((53212, 53256), 'scipy.stats.bartlett', 'stats.bartlett', (['adults_o[item]', 'olds_o[item]'], {}), '(adults_o[item], olds_o[item])\n', (53226, 53256), True, 'import scipy.stats as stats\n'), ((53347, 53432), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['adults_o[item]', 'olds_o[item]'], {'equal_var': '(var_ages_m[item] > 0.05)'}), '(adults_o[item], olds_o[item], equal_var=var_ages_m[item] > 0.05\n )\n', (53362, 53432), True, 'import scipy.stats as stats\n'), ((54625, 54682), 'scipy.stats.mstats.kruskal', 'kruskal', (['olds_slow[item].values', 'adults_slow[item].values'], {}), '(olds_slow[item].values, adults_slow[item].values)\n', (54632, 54682), False, 'from scipy.stats.mstats import kruskal\n'), ((54806, 54863), 'scipy.stats.mstats.kruskal', 'kruskal', (['olds_free[item].values', 'adults_free[item].values'], {}), '(olds_free[item].values, adults_free[item].values)\n', (54813, 54863), False, 'from scipy.stats.mstats import kruskal\n'), ((55020, 55077), 'scipy.stats.mstats.kruskal', 'kruskal', (['olds_fast[item].values', 'adults_fast[item].values'], {}), '(olds_fast[item].values, adults_fast[item].values)\n', (55027, 55077), False, 'from scipy.stats.mstats import kruskal\n'), ((55647, 55720), 'scikit_posthocs.posthoc_dunn', 'sp.posthoc_dunn', (['olds_o'], {'val_col': 'item', 'group_col': '"""speed"""', 'p_adjust': '"""holm"""'}), "(olds_o, val_col=item, group_col='speed', p_adjust='holm')\n", (55662, 55720), True, 'import scikit_posthocs as sp\n'), ((56463, 56538), 'scikit_posthocs.posthoc_dunn', 'sp.posthoc_dunn', (['adults_o'], {'val_col': 'item', 'group_col': '"""speed"""', 'p_adjust': '"""holm"""'}), "(adults_o, val_col=item, group_col='speed', p_adjust='holm')\n", (56478, 56538), True, 'import scikit_posthocs as sp\n'), ((60881, 60937), 'scipy.stats.bartlett', 'stats.bartlett', (['O_slow[item]', 'O_free[item]', 'O_fast[item]'], {}), '(O_slow[item], O_free[item], O_fast[item])\n', (60895, 60937), True, 'import scipy.stats as stats\n'), ((61032, 61122), 'scipy.stats.bartlett', 'stats.bartlett', (['T_vslow[item]', 'T_slow[item]', 'T_free[item]', 'T_fast[item]', 'T_vfast[item]'], {}), '(T_vslow[item], T_slow[item], T_free[item], T_fast[item],\n T_vfast[item])\n', (61046, 61122), True, 'import scipy.stats as stats\n'), ((61654, 61724), 'scipy.stats.mstats.kruskal', 'kruskal', (['O_slow[item].values', 'O_free[item].values', 'O_fast[item].values'], {}), '(O_slow[item].values, O_free[item].values, O_fast[item].values)\n', (61661, 61724), False, 'from scipy.stats.mstats import kruskal\n'), ((61829, 61947), 'scipy.stats.mstats.kruskal', 'kruskal', (['T_vslow[item].values', 'T_slow[item].values', 'T_free[item].values', 'T_fast[item].values', 'T_vfast[item].values'], {}), '(T_vslow[item].values, T_slow[item].values, T_free[item].values,\n T_fast[item].values, T_vfast[item].values)\n', (61836, 61947), False, 'from scipy.stats.mstats import kruskal\n'), ((62094, 62164), 'scipy.stats.mstats.kruskal', 'kruskal', (['T_slow[item].values', 'T_free[item].values', 'T_fast[item].values'], {}), '(T_slow[item].values, T_free[item].values, T_fast[item].values)\n', (62101, 62164), False, 'from scipy.stats.mstats import kruskal\n'), ((62304, 62353), 'scipy.stats.mstats.kruskal', 'kruskal', (['O_slow[item].values', 'T_slow[item].values'], {}), '(O_slow[item].values, T_slow[item].values)\n', (62311, 62353), False, 'from scipy.stats.mstats import kruskal\n'), ((62519, 62568), 'scipy.stats.mstats.kruskal', 'kruskal', (['O_free[item].values', 'T_free[item].values'], {}), '(O_free[item].values, T_free[item].values)\n', (62526, 62568), False, 'from scipy.stats.mstats import kruskal\n'), ((62736, 62785), 'scipy.stats.mstats.kruskal', 'kruskal', (['O_fast[item].values', 'T_fast[item].values'], {}), '(O_fast[item].values, T_fast[item].values)\n', (62743, 62785), False, 'from scipy.stats.mstats import kruskal\n'), ((63221, 63298), 'scikit_posthocs.posthoc_dunn', 'sp.posthoc_dunn', (['overground'], {'val_col': 'item', 'group_col': '"""speed"""', 'p_adjust': '"""holm"""'}), "(overground, val_col=item, group_col='speed', p_adjust='holm')\n", (63236, 63298), True, 'import scikit_posthocs as sp\n'), ((63592, 63677), 'scikit_posthocs.posthoc_dunn', 'sp.posthoc_dunn', (['treadmill_adj'], {'val_col': 'item', 'group_col': '"""speed"""', 'p_adjust': '"""holm"""'}), "(treadmill_adj, val_col=item, group_col='speed', p_adjust='holm'\n )\n", (63607, 63677), True, 'import scikit_posthocs as sp\n'), ((69193, 69249), 'scipy.stats.bartlett', 'stats.bartlett', (['F_slow[item]', 'F_free[item]', 'F_fast[item]'], {}), '(F_slow[item], F_free[item], F_fast[item])\n', (69207, 69249), True, 'import scipy.stats as stats\n'), ((69406, 69462), 'scipy.stats.bartlett', 'stats.bartlett', (['M_slow[item]', 'M_free[item]', 'M_fast[item]'], {}), '(M_slow[item], M_free[item], M_fast[item])\n', (69420, 69462), True, 'import scipy.stats as stats\n'), ((70051, 70121), 'scipy.stats.mstats.kruskal', 'kruskal', (['F_slow[item].values', 'F_free[item].values', 'F_fast[item].values'], {}), '(F_slow[item].values, F_free[item].values, F_fast[item].values)\n', (70058, 70121), False, 'from scipy.stats.mstats import kruskal\n'), ((70282, 70352), 'scipy.stats.mstats.kruskal', 'kruskal', (['M_slow[item].values', 'M_free[item].values', 'M_fast[item].values'], {}), '(M_slow[item].values, M_free[item].values, M_fast[item].values)\n', (70289, 70352), False, 'from scipy.stats.mstats import kruskal\n'), ((70541, 70678), 'scipy.stats.mstats.kruskal', 'kruskal', (['M_slow[item].values', 'M_free[item].values', 'M_fast[item].values', 'F_slow[item].values', 'F_free[item].values', 'F_fast[item].values'], {}), '(M_slow[item].values, M_free[item].values, M_fast[item].values,\n F_slow[item].values, F_free[item].values, F_fast[item].values)\n', (70548, 70678), False, 'from scipy.stats.mstats import kruskal\n'), ((70948, 70997), 'scipy.stats.mstats.kruskal', 'kruskal', (['F_slow[item].values', 'M_slow[item].values'], {}), '(F_slow[item].values, M_slow[item].values)\n', (70955, 70997), False, 'from scipy.stats.mstats import kruskal\n'), ((71170, 71219), 'scipy.stats.mstats.kruskal', 'kruskal', (['F_free[item].values', 'M_free[item].values'], {}), '(F_free[item].values, M_free[item].values)\n', (71177, 71219), False, 'from scipy.stats.mstats import kruskal\n'), ((71362, 71411), 'scipy.stats.mstats.kruskal', 'kruskal', (['F_fast[item].values', 'M_fast[item].values'], {}), '(F_fast[item].values, M_fast[item].values)\n', (71369, 71411), False, 'from scipy.stats.mstats import kruskal\n'), ((73619, 73689), 'scikit_posthocs.posthoc_dunn', 'sp.posthoc_dunn', (['Fem'], {'val_col': 'item', 'group_col': '"""speed"""', 'p_adjust': '"""holm"""'}), "(Fem, val_col=item, group_col='speed', p_adjust='holm')\n", (73634, 73689), True, 'import scikit_posthocs as sp\n'), ((73991, 74062), 'scikit_posthocs.posthoc_dunn', 'sp.posthoc_dunn', (['Male'], {'val_col': 'item', 'group_col': '"""speed"""', 'p_adjust': '"""holm"""'}), "(Male, val_col=item, group_col='speed', p_adjust='holm')\n", (74006, 74062), True, 'import scikit_posthocs as sp\n'), ((66756, 66780), 'pandas.DataFrame', 'pd.DataFrame', (['trials_num'], {}), '(trials_num)\n', (66768, 66780), True, 'import pandas as pd\n'), ((48442, 48486), 'statsmodels.api.stats.anova_lm', 'sm.stats.anova_lm', (['results_ages[item]'], {'typ': '(2)'}), '(results_ages[item], typ=2)\n', (48459, 48486), True, 'import statsmodels.api as sm\n'), ((54119, 54149), 'scipy.stats.shapiro', 'stats.shapiro', (['data_ages[item]'], {}), '(data_ages[item])\n', (54132, 54149), True, 'import scipy.stats as stats\n'), ((57876, 57920), 'statsmodels.api.stats.anova_lm', 'sm.stats.anova_lm', (['results_mode[item]'], {'typ': '(2)'}), '(results_mode[item], typ=2)\n', (57893, 57920), True, 'import statsmodels.api as sm\n')] |
from os.path import join as pjoin
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
from settings import BASE_DIR
class WalkersOstrichEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
xml_path = pjoin(BASE_DIR, "environments", "assets", "WalkersOstrich.xml")
mujoco_env.MujocoEnv.__init__(self, xml_path, 5)
utils.EzPickle.__init__(self)
def step(self, a):
posbefore = self.sim.data.qpos[0]
self.do_simulation(a, self.frame_skip)
posafter, height, ang = self.sim.data.qpos[0:3]
alive_bonus = 1.0
reward = ((posafter - posbefore) / self.dt)
reward += alive_bonus
reward -= 1e-3 * np.square(a).sum()
done = not (
0.8 < height < 2.0 and
-1.0 < ang < 1.0 and
self.sim.data.site_xpos[0, 2] > 1.1
)
ob = self._get_obs()
return ob, reward, done, {}
def _get_obs(self):
qpos = self.sim.data.qpos
qvel = self.sim.data.qvel
return np.concatenate([qpos[1:], np.clip(qvel, -10, 10)])
def reset_model(self):
self.set_state(
self.init_qpos + self.np_random.uniform(low=-0.005, high=0.005, size=self.model.nq),
self.init_qvel + self.np_random.uniform(low=-0.005, high=0.005, size=self.model.nv)
)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.trackbodyid = 2
self.viewer.cam.distance = self.model.stat.extent * 0.5
self.viewer.cam.lookat[2] += 0.8
self.viewer.cam.elevation = -20
class WalkersHorseEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
xml_path = pjoin(BASE_DIR, "environments", "assets", "WalkersHorse.xml")
mujoco_env.MujocoEnv.__init__(self, xml_path, 5)
utils.EzPickle.__init__(self)
def step(self, action):
xposbefore = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
xposafter = self.sim.data.qpos[0]
ob = self._get_obs()
reward_ctrl = - 0.1 * np.square(action).sum()
reward_run = (xposafter - xposbefore) / self.dt
reward = reward_ctrl + reward_run
alive_bonus = 1
reward += alive_bonus
s = self.state_vector()
done = not (
np.isfinite(s).all() and (np.abs(s[2:]) < 100).all() and
self.sim.data.site_xpos[0, 2] > 0.7 and
self.sim.data.site_xpos[1, 2] > 0.7
)
return ob, reward, done, dict(reward_run=reward_run, reward_ctrl=reward_ctrl)
def _get_obs(self):
qpos = self.sim.data.qpos
qvel = self.sim.data.qvel
return np.concatenate([qpos[1:], np.clip(qvel, -10, 10)])
def reset_model(self):
self.set_state(
self.init_qpos + self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq),
self.init_qvel + self.np_random.randn(self.model.nv) * 0.1
)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.distance = self.model.stat.extent * 0.5
class WalkersKangarooEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
xml_path = pjoin(BASE_DIR, "environments", "assets", "WalkersKangaroo.xml")
mujoco_env.MujocoEnv.__init__(self, xml_path, 5)
utils.EzPickle.__init__(self)
def step(self, a):
posbefore = self.sim.data.qpos[0]
self.do_simulation(a, self.frame_skip)
posafter, height, ang = self.sim.data.qpos[0:3]
alive_bonus = 1.0
reward = ((posafter - posbefore) / self.dt) / 2.0
reward += alive_bonus
reward -= 1e-3 * np.square(a).sum()
done = not (
0.8 < height < 2.0 and -1.0 < ang < 1.0 and
0.8 < self.sim.data.site_xpos[0, 2] < 1.6
)
ob = self._get_obs()
return ob, reward, done, {}
def _get_obs(self):
qpos = self.sim.data.qpos
qvel = self.sim.data.qvel
return np.concatenate([qpos[1:], np.clip(qvel, -10, 10)])
def reset_model(self):
self.set_state(
self.init_qpos + self.np_random.uniform(low=-0.005, high=0.005, size=self.model.nq),
self.init_qvel + self.np_random.uniform(low=-0.005, high=0.005, size=self.model.nv)
)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.trackbodyid = 2
self.viewer.cam.distance = self.model.stat.extent * 0.5
self.viewer.cam.lookat[2] += 0.8
self.viewer.cam.elevation = -20
| [
"numpy.abs",
"gym.envs.mujoco.mujoco_env.MujocoEnv.__init__",
"numpy.square",
"numpy.isfinite",
"numpy.clip",
"os.path.join",
"gym.utils.EzPickle.__init__"
] | [((254, 317), 'os.path.join', 'pjoin', (['BASE_DIR', '"""environments"""', '"""assets"""', '"""WalkersOstrich.xml"""'], {}), "(BASE_DIR, 'environments', 'assets', 'WalkersOstrich.xml')\n", (259, 317), True, 'from os.path import join as pjoin\n'), ((327, 375), 'gym.envs.mujoco.mujoco_env.MujocoEnv.__init__', 'mujoco_env.MujocoEnv.__init__', (['self', 'xml_path', '(5)'], {}), '(self, xml_path, 5)\n', (356, 375), False, 'from gym.envs.mujoco import mujoco_env\n'), ((384, 413), 'gym.utils.EzPickle.__init__', 'utils.EzPickle.__init__', (['self'], {}), '(self)\n', (407, 413), False, 'from gym import utils\n'), ((1724, 1785), 'os.path.join', 'pjoin', (['BASE_DIR', '"""environments"""', '"""assets"""', '"""WalkersHorse.xml"""'], {}), "(BASE_DIR, 'environments', 'assets', 'WalkersHorse.xml')\n", (1729, 1785), True, 'from os.path import join as pjoin\n'), ((1795, 1843), 'gym.envs.mujoco.mujoco_env.MujocoEnv.__init__', 'mujoco_env.MujocoEnv.__init__', (['self', 'xml_path', '(5)'], {}), '(self, xml_path, 5)\n', (1824, 1843), False, 'from gym.envs.mujoco import mujoco_env\n'), ((1852, 1881), 'gym.utils.EzPickle.__init__', 'utils.EzPickle.__init__', (['self'], {}), '(self)\n', (1875, 1881), False, 'from gym import utils\n'), ((3231, 3295), 'os.path.join', 'pjoin', (['BASE_DIR', '"""environments"""', '"""assets"""', '"""WalkersKangaroo.xml"""'], {}), "(BASE_DIR, 'environments', 'assets', 'WalkersKangaroo.xml')\n", (3236, 3295), True, 'from os.path import join as pjoin\n'), ((3305, 3353), 'gym.envs.mujoco.mujoco_env.MujocoEnv.__init__', 'mujoco_env.MujocoEnv.__init__', (['self', 'xml_path', '(5)'], {}), '(self, xml_path, 5)\n', (3334, 3353), False, 'from gym.envs.mujoco import mujoco_env\n'), ((3362, 3391), 'gym.utils.EzPickle.__init__', 'utils.EzPickle.__init__', (['self'], {}), '(self)\n', (3385, 3391), False, 'from gym import utils\n'), ((1093, 1115), 'numpy.clip', 'np.clip', (['qvel', '(-10)', '(10)'], {}), '(qvel, -10, 10)\n', (1100, 1115), True, 'import numpy as np\n'), ((2747, 2769), 'numpy.clip', 'np.clip', (['qvel', '(-10)', '(10)'], {}), '(qvel, -10, 10)\n', (2754, 2769), True, 'import numpy as np\n'), ((4067, 4089), 'numpy.clip', 'np.clip', (['qvel', '(-10)', '(10)'], {}), '(qvel, -10, 10)\n', (4074, 4089), True, 'import numpy as np\n'), ((716, 728), 'numpy.square', 'np.square', (['a'], {}), '(a)\n', (725, 728), True, 'import numpy as np\n'), ((2107, 2124), 'numpy.square', 'np.square', (['action'], {}), '(action)\n', (2116, 2124), True, 'import numpy as np\n'), ((3700, 3712), 'numpy.square', 'np.square', (['a'], {}), '(a)\n', (3709, 3712), True, 'import numpy as np\n'), ((2352, 2366), 'numpy.isfinite', 'np.isfinite', (['s'], {}), '(s)\n', (2363, 2366), True, 'import numpy as np\n'), ((2378, 2391), 'numpy.abs', 'np.abs', (['s[2:]'], {}), '(s[2:])\n', (2384, 2391), True, 'import numpy as np\n')] |
'''
Reaction Diffusion : Gray-Scott model
References:
----------
Complex Patterns in a Simple System
<NAME>, Science 261, 5118, 189-192, 1993.
Encode movie
------------
ffmpeg -r 30 -i "tmp-%03d.png" -c:v libx264 -crf 23 -pix_fmt yuv420p bacteria.mp4
'''
import numpy as np
import matplotlib.pyplot as plt
n,k = 100, 4
Z = np.zeros((n+2,2*n+2))
dt = 0.05
plt.ion()
size = 3*np.array(Z.shape)
dpi = 72.0
figsize= size[1]/float(dpi),size[0]/float(dpi)
fig = plt.figure(figsize=figsize, dpi=dpi, facecolor="white")
fig.add_axes([0.0, 0.0, 1.0, 1.0], frameon=False)
im = plt.imshow(Z, interpolation='bicubic', cmap=plt.cm.hot)
plt.xticks([]), plt.yticks([])
for i in range(50000):
L = ( Z[0:-2,1:-1] +
Z[1:-1,0:-2] - 4*Z[1:-1,1:-1] + Z[1:-1,2:] +
Z[2: ,1:-1] )
Z[1:-1,1:-1] += k*L*dt
Z[ n/2-20:n/2+20, n-5:n-1] = 1
Z[ n/2-20:n/2+20, n+1:n+5] = -1
if i % 30 == 0:
im.set_data(Z)
im.set_clim(vmin=Z.min(), vmax=Z.max())
plt.draw()
# To make movie
# plt.savefig("./tmp/tmp-%03d.png" % (i/10) ,dpi=dpi)
plt.ioff()
# plt.savefig("../figures/zebra.png",dpi=dpi)
# plt.savefig("../figures/bacteria.png",dpi=dpi)
plt.savefig("../figures/diffusion.png",dpi=dpi)
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.yticks",
"numpy.zeros",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.savefig"
] | [((328, 356), 'numpy.zeros', 'np.zeros', (['(n + 2, 2 * n + 2)'], {}), '((n + 2, 2 * n + 2))\n', (336, 356), True, 'import numpy as np\n'), ((361, 370), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (368, 370), True, 'import matplotlib.pyplot as plt\n'), ((463, 518), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize', 'dpi': 'dpi', 'facecolor': '"""white"""'}), "(figsize=figsize, dpi=dpi, facecolor='white')\n", (473, 518), True, 'import matplotlib.pyplot as plt\n'), ((574, 629), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Z'], {'interpolation': '"""bicubic"""', 'cmap': 'plt.cm.hot'}), "(Z, interpolation='bicubic', cmap=plt.cm.hot)\n", (584, 629), True, 'import matplotlib.pyplot as plt\n'), ((1118, 1128), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (1126, 1128), True, 'import matplotlib.pyplot as plt\n'), ((1224, 1272), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../figures/diffusion.png"""'], {'dpi': 'dpi'}), "('../figures/diffusion.png', dpi=dpi)\n", (1235, 1272), True, 'import matplotlib.pyplot as plt\n'), ((1272, 1282), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1280, 1282), True, 'import matplotlib.pyplot as plt\n'), ((381, 398), 'numpy.array', 'np.array', (['Z.shape'], {}), '(Z.shape)\n', (389, 398), True, 'import numpy as np\n'), ((630, 644), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (640, 644), True, 'import matplotlib.pyplot as plt\n'), ((646, 660), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (656, 660), True, 'import matplotlib.pyplot as plt\n'), ((1020, 1030), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (1028, 1030), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import sys, os
from matplotlib import pyplot as plt
from pyDOE import lhs
import torch
from torch.utils.data import TensorDataset, DataLoader
from torch.nn import Linear
import torch.nn as nn
import torch.nn.functional as F
import gpytorch
from gpytorch.means import ConstantMean
from gpytorch.kernels import RBFKernel, ScaleKernel, LinearKernel
from gpytorch.variational import VariationalStrategy, CholeskyVariationalDistribution
from gpytorch.distributions import MultivariateNormal, MultitaskMultivariateNormal
from gpytorch.models import AbstractVariationalGP, GP
from gpytorch.mlls import VariationalELBO, AddedLossTerm
from gpytorch.likelihoods import GaussianLikelihood, BernoulliLikelihood, SoftmaxLikelihood
from gpytorch.models.deep_gps import AbstractDeepGPLayer, AbstractDeepGP, DeepLikelihood
from gpytorch.lazy import BlockDiagLazyTensor, lazify
from scipy.special import erf, expit
from pyTrajectoryUtils.pyTrajectoryUtils.utils import *
# Values required for approximating the logistic sigmoid by
# error functions. coefs are obtained via:
# x = np.array([0, 0.6, 2, 3.5, 4.5, np.inf])
# b = logistic(x)
# A = (erf(np.dot(x, self.lambdas)) + 1) / 2
# coefs = lstsq(A, b)[0]
# Approximate \int log(z) * N(z | f_star, var_f_star)
# Approximation is due to Williams & Barber, "Bayesian Classification
# with Gaussian Processes", Appendix A: Approximate the logistic
# sigmoid by a linear combination of 5 error functions.
# For information on how this integral can be computed see
# blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html
LAMBDAS = np.array([0.41, 0.4, 0.37, 0.44, 0.39])[:, np.newaxis]
COEFS = np.array([-1854.8214151, 3516.89893646, 221.29346712,
128.12323805, -2010.49422654])[:, np.newaxis]
class MFDeepGPLayer(AbstractDeepGPLayer):
def __init__(self, input_dims, output_dims, prev_dims=0, num_inducing=512, inducing_points=None, prev_layer=None):
self.prev_dims = prev_dims
input_all_dims = input_dims + prev_dims
# TODO
if inducing_points is None:
if output_dims is None:
inducing_points = torch.randn(num_inducing, input_all_dims)
else:
inducing_points = torch.randn(output_dims, num_inducing, input_all_dims)
variational_distribution = CholeskyVariationalDistribution(
num_inducing_points=num_inducing,
batch_shape=torch.Size([output_dims]) if output_dims is not None else torch.Size([])
)
variational_strategy = VariationalStrategy(
self,
inducing_points,
variational_distribution,
learn_inducing_locations=True
)
super(MFDeepGPLayer, self).__init__(variational_strategy, input_all_dims, output_dims)
self.mean_module = ConstantMean(batch_size=output_dims)
self.covar_module = ScaleKernel(
RBFKernel(batch_size=output_dims, ard_num_dims=input_dims),
batch_size=output_dims, ard_num_dims=None
)
self.prev_layer = prev_layer
if prev_dims > 0:
self.covar_module_corr = ScaleKernel(
RBFKernel(batch_size=output_dims, ard_num_dims=input_dims),
batch_size=output_dims, ard_num_dims=None
)
self.covar_module_prev = ScaleKernel(
RBFKernel(batch_size=output_dims, ard_num_dims=None),
batch_size=output_dims, ard_num_dims=None
)
self.covar_module_linear = ScaleKernel(
LinearKernel(batch_size=output_dims, ard_num_dims=None)
)
def covar(self, x):
x_input = torch.index_select(x, -1, torch.arange(self.prev_dims,self.input_dims).long().cuda())
x_prev = torch.index_select(x, -1, torch.arange(self.prev_dims).long().cuda())
covar_x = self.covar_module(x_input)
if self.prev_dims > 0:
k_corr = self.covar_module_corr(x_input)
k_prev = self.covar_module_prev(x_prev)
# k_prev = self.prev_layer.covar(x_input)
k_linear = self.covar_module_linear(x_prev)
covar_x += k_corr*(k_prev + k_linear)
# covar_x = k_corr*(k_prev)
return covar_x
def forward(self, x):
# https://github.com/amzn/emukit/blob/master/emukit/examples/multi_fidelity_dgp/multi_fidelity_deep_gp.py
x_input = torch.index_select(x, -1, torch.arange(self.prev_dims,self.input_dims).long().cuda())
mean_x = self.mean_module(x_input) # self.linear_layer(x).squeeze(-1)
covar_x = self.covar(x)
return MultivariateNormal(mean_x, covar_x)
def __call__(self, x, *other_inputs, **kwargs):
"""
Overriding __call__ isn't strictly necessary, but it lets us add concatenation based skip connections
easily. For example, hidden_layer2(hidden_layer1_outputs, inputs) will pass the concatenation of the first
hidden layer's outputs and the input data to hidden_layer2.
"""
if len(other_inputs):
if isinstance(x, gpytorch.distributions.MultitaskMultivariateNormal):
x = x.rsample()
processed_inputs = [
inp.unsqueeze(0).expand(x.shape[0], *inp.shape)
for inp in other_inputs
]
else:
processed_inputs = [
inp for inp in other_inputs
]
x = torch.cat([x] + processed_inputs, dim=-1)
return super().__call__(x, are_samples=bool(len(other_inputs)))
class MFDeepGPC(AbstractDeepGP):
def __init__(self, train_x, train_y, num_inducing=512, input_uc=0):
super().__init__()
num_fidelity = len(train_x)
train_x_shape = train_x[0].shape
# Generate Inducing points - TODO check higher fidelity inducing points
train_z = []
i_z = torch.randperm(train_x[0].size(0)).cuda()[:num_inducing]
z_low = train_x[0][i_z, :]
setattr(self, 'train_z_' + str(0), z_low)
train_z.append(z_low)
for i in range(1,num_fidelity):
i_z_low = torch.randperm(train_x[i-1].size(0)).cuda()[:num_inducing]
z_high = torch.cat([train_x[i-1][i_z_low, :], train_y[i-1][i_z_low].unsqueeze(-1)], axis=1).unsqueeze(0)
setattr(self, 'train_z_' + str(i), z_high)
train_z.append(z_high)
# Generate Multifidelity layers
self.layers = []
layer = MFDeepGPLayer(
input_dims=train_x_shape[-1],
output_dims=1,
prev_dims=input_uc,
num_inducing=num_inducing,
inducing_points=train_z[0]
)
setattr(self, 'layer_' + str(0), layer)
self.layers.append(layer)
for i in range(1,num_fidelity):
layer = MFDeepGPLayer(
input_dims=train_x_shape[-1],
output_dims=1,
prev_dims=1,
num_inducing=num_inducing,
inducing_points=train_z[i],
prev_layer=self.layers[i-1]
)
setattr(self, 'layer_' + str(i), layer)
self.layers.append(layer)
self.likelihood = DeepLikelihood(BernoulliLikelihood())
def forward(self, inputs, fidelity=2, eval=False):
val = self.layers[0](inputs, eval=eval)
for layer in self.layers[1:fidelity]:
val = layer(val, inputs, eval=eval)
val = MultivariateNormal(val.mean.squeeze(-1), val.lazy_covariance_matrix)
return val
def predict(self, x, fidelity=2):
with gpytorch.settings.fast_computations(log_prob=False, solves=False), torch.no_grad():
preds = self(x, fidelity=fidelity, eval=True)
val = MultivariateNormal(preds.mean[0], preds.lazy_covariance_matrix[0])
for i in range(1,preds.mean.shape[0]):
val += MultivariateNormal(preds.mean[i], preds.lazy_covariance_matrix[i])
val /= (preds.mean.shape[0])
return self.likelihood.base_likelihood(val).mean.ge(0.5).cpu().numpy()
def predict_proba(self, x, fidelity=2, return_std=False):
with gpytorch.settings.fast_computations(log_prob=False, solves=False), torch.no_grad():
preds = self(x, fidelity=fidelity, eval=True)
val = MultivariateNormal(preds.mean[0], preds.lazy_covariance_matrix[0])
for i in range(1,preds.mean.shape[0]):
val += MultivariateNormal(preds.mean[i], preds.lazy_covariance_matrix[i])
val /= (preds.mean.shape[0])
pred_means = val.mean.cpu().numpy()
pred_vars = val.variance.cpu().numpy()
var_f_star = pred_vars
f_star_min = pred_means
bern = self.likelihood.base_likelihood(val)
pi_star = bern.probs.cpu().numpy()
if return_std:
return f_star_min, np.sqrt(var_f_star)
else:
return f_star_min, np.sqrt(var_f_star), np.vstack((1 - pi_star, pi_star)).T
def predict_proba_MF(self, x, fidelity=1, C_L=1., C_H=10., beta=0.05, return_std=False, return_all=False):
with gpytorch.settings.fast_computations(log_prob=False, solves=False), torch.no_grad():
preds = self(x, fidelity=fidelity, eval=True)
val = MultivariateNormal(preds.mean[0], preds.lazy_covariance_matrix[0])
for i in range(1,preds.mean.shape[0]):
val += MultivariateNormal(preds.mean[i], preds.lazy_covariance_matrix[i])
val /= (preds.mean.shape[0])
pred_means = val.mean.cpu().numpy()
pred_vars = val.variance.cpu().numpy()
var_f_star = pred_vars
f_star_min = pred_means - beta*np.sqrt(pred_vars)
f_star_min_i = pred_means
val_mf = MultivariateNormal(val.mean-beta*torch.sqrt(val.variance), val.lazy_covariance_matrix)
bern = self.likelihood.base_likelihood(val_mf)
pi_star = bern.probs.cpu().numpy()
bern_i = self.likelihood.base_likelihood(val)
pi_star_i = bern_i.probs.cpu().numpy()
if return_all:
if return_std:
return f_star_min, np.sqrt(var_f_star), f_star_min_i, np.sqrt(var_f_star), np.vstack((1 - pi_star_i, pi_star_i)).T
else:
return np.vstack((1 - pi_star, pi_star)).T, f_star_min_i, np.sqrt(var_f_star), np.vstack((1 - pi_star_i, pi_star_i)).T
else:
if return_std:
return f_star_min, np.sqrt(var_f_star)
else:
return np.vstack((1 - pi_star, pi_star)).T
| [
"gpytorch.variational.VariationalStrategy",
"gpytorch.distributions.MultivariateNormal",
"gpytorch.settings.fast_computations",
"torch.sqrt",
"gpytorch.kernels.RBFKernel",
"torch.cat",
"torch.randn",
"numpy.array",
"torch.arange",
"gpytorch.means.ConstantMean",
"gpytorch.kernels.LinearKernel",
... | [((1638, 1677), 'numpy.array', 'np.array', (['[0.41, 0.4, 0.37, 0.44, 0.39]'], {}), '([0.41, 0.4, 0.37, 0.44, 0.39])\n', (1646, 1677), True, 'import numpy as np\n'), ((1701, 1790), 'numpy.array', 'np.array', (['[-1854.8214151, 3516.89893646, 221.29346712, 128.12323805, -2010.49422654]'], {}), '([-1854.8214151, 3516.89893646, 221.29346712, 128.12323805, -\n 2010.49422654])\n', (1709, 1790), True, 'import numpy as np\n'), ((2605, 2708), 'gpytorch.variational.VariationalStrategy', 'VariationalStrategy', (['self', 'inducing_points', 'variational_distribution'], {'learn_inducing_locations': '(True)'}), '(self, inducing_points, variational_distribution,\n learn_inducing_locations=True)\n', (2624, 2708), False, 'from gpytorch.variational import VariationalStrategy, CholeskyVariationalDistribution\n'), ((2895, 2931), 'gpytorch.means.ConstantMean', 'ConstantMean', ([], {'batch_size': 'output_dims'}), '(batch_size=output_dims)\n', (2907, 2931), False, 'from gpytorch.means import ConstantMean\n'), ((4729, 4764), 'gpytorch.distributions.MultivariateNormal', 'MultivariateNormal', (['mean_x', 'covar_x'], {}), '(mean_x, covar_x)\n', (4747, 4764), False, 'from gpytorch.distributions import MultivariateNormal, MultitaskMultivariateNormal\n'), ((7963, 8029), 'gpytorch.distributions.MultivariateNormal', 'MultivariateNormal', (['preds.mean[0]', 'preds.lazy_covariance_matrix[0]'], {}), '(preds.mean[0], preds.lazy_covariance_matrix[0])\n', (7981, 8029), False, 'from gpytorch.distributions import MultivariateNormal, MultitaskMultivariateNormal\n'), ((8525, 8591), 'gpytorch.distributions.MultivariateNormal', 'MultivariateNormal', (['preds.mean[0]', 'preds.lazy_covariance_matrix[0]'], {}), '(preds.mean[0], preds.lazy_covariance_matrix[0])\n', (8543, 8591), False, 'from gpytorch.distributions import MultivariateNormal, MultitaskMultivariateNormal\n'), ((9509, 9575), 'gpytorch.distributions.MultivariateNormal', 'MultivariateNormal', (['preds.mean[0]', 'preds.lazy_covariance_matrix[0]'], {}), '(preds.mean[0], preds.lazy_covariance_matrix[0])\n', (9527, 9575), False, 'from gpytorch.distributions import MultivariateNormal, MultitaskMultivariateNormal\n'), ((2985, 3043), 'gpytorch.kernels.RBFKernel', 'RBFKernel', ([], {'batch_size': 'output_dims', 'ard_num_dims': 'input_dims'}), '(batch_size=output_dims, ard_num_dims=input_dims)\n', (2994, 3043), False, 'from gpytorch.kernels import RBFKernel, ScaleKernel, LinearKernel\n'), ((5597, 5638), 'torch.cat', 'torch.cat', (['([x] + processed_inputs)'], {'dim': '(-1)'}), '([x] + processed_inputs, dim=-1)\n', (5606, 5638), False, 'import torch\n'), ((7415, 7436), 'gpytorch.likelihoods.BernoulliLikelihood', 'BernoulliLikelihood', ([], {}), '()\n', (7434, 7436), False, 'from gpytorch.likelihoods import GaussianLikelihood, BernoulliLikelihood, SoftmaxLikelihood\n'), ((7798, 7863), 'gpytorch.settings.fast_computations', 'gpytorch.settings.fast_computations', ([], {'log_prob': '(False)', 'solves': '(False)'}), '(log_prob=False, solves=False)\n', (7833, 7863), False, 'import gpytorch\n'), ((7865, 7880), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7878, 7880), False, 'import torch\n'), ((8096, 8162), 'gpytorch.distributions.MultivariateNormal', 'MultivariateNormal', (['preds.mean[i]', 'preds.lazy_covariance_matrix[i]'], {}), '(preds.mean[i], preds.lazy_covariance_matrix[i])\n', (8114, 8162), False, 'from gpytorch.distributions import MultivariateNormal, MultitaskMultivariateNormal\n'), ((8368, 8433), 'gpytorch.settings.fast_computations', 'gpytorch.settings.fast_computations', ([], {'log_prob': '(False)', 'solves': '(False)'}), '(log_prob=False, solves=False)\n', (8403, 8433), False, 'import gpytorch\n'), ((8435, 8450), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8448, 8450), False, 'import torch\n'), ((8658, 8724), 'gpytorch.distributions.MultivariateNormal', 'MultivariateNormal', (['preds.mean[i]', 'preds.lazy_covariance_matrix[i]'], {}), '(preds.mean[i], preds.lazy_covariance_matrix[i])\n', (8676, 8724), False, 'from gpytorch.distributions import MultivariateNormal, MultitaskMultivariateNormal\n'), ((9340, 9405), 'gpytorch.settings.fast_computations', 'gpytorch.settings.fast_computations', ([], {'log_prob': '(False)', 'solves': '(False)'}), '(log_prob=False, solves=False)\n', (9375, 9405), False, 'import gpytorch\n'), ((9407, 9422), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9420, 9422), False, 'import torch\n'), ((9642, 9708), 'gpytorch.distributions.MultivariateNormal', 'MultivariateNormal', (['preds.mean[i]', 'preds.lazy_covariance_matrix[i]'], {}), '(preds.mean[i], preds.lazy_covariance_matrix[i])\n', (9660, 9708), False, 'from gpytorch.distributions import MultivariateNormal, MultitaskMultivariateNormal\n'), ((2194, 2235), 'torch.randn', 'torch.randn', (['num_inducing', 'input_all_dims'], {}), '(num_inducing, input_all_dims)\n', (2205, 2235), False, 'import torch\n'), ((2288, 2342), 'torch.randn', 'torch.randn', (['output_dims', 'num_inducing', 'input_all_dims'], {}), '(output_dims, num_inducing, input_all_dims)\n', (2299, 2342), False, 'import torch\n'), ((3247, 3305), 'gpytorch.kernels.RBFKernel', 'RBFKernel', ([], {'batch_size': 'output_dims', 'ard_num_dims': 'input_dims'}), '(batch_size=output_dims, ard_num_dims=input_dims)\n', (3256, 3305), False, 'from gpytorch.kernels import RBFKernel, ScaleKernel, LinearKernel\n'), ((3445, 3497), 'gpytorch.kernels.RBFKernel', 'RBFKernel', ([], {'batch_size': 'output_dims', 'ard_num_dims': 'None'}), '(batch_size=output_dims, ard_num_dims=None)\n', (3454, 3497), False, 'from gpytorch.kernels import RBFKernel, ScaleKernel, LinearKernel\n'), ((3639, 3694), 'gpytorch.kernels.LinearKernel', 'LinearKernel', ([], {'batch_size': 'output_dims', 'ard_num_dims': 'None'}), '(batch_size=output_dims, ard_num_dims=None)\n', (3651, 3694), False, 'from gpytorch.kernels import RBFKernel, ScaleKernel, LinearKernel\n'), ((9085, 9104), 'numpy.sqrt', 'np.sqrt', (['var_f_star'], {}), '(var_f_star)\n', (9092, 9104), True, 'import numpy as np\n'), ((9150, 9169), 'numpy.sqrt', 'np.sqrt', (['var_f_star'], {}), '(var_f_star)\n', (9157, 9169), True, 'import numpy as np\n'), ((9909, 9927), 'numpy.sqrt', 'np.sqrt', (['pred_vars'], {}), '(pred_vars)\n', (9916, 9927), True, 'import numpy as np\n'), ((2490, 2515), 'torch.Size', 'torch.Size', (['[output_dims]'], {}), '([output_dims])\n', (2500, 2515), False, 'import torch\n'), ((2548, 2562), 'torch.Size', 'torch.Size', (['[]'], {}), '([])\n', (2558, 2562), False, 'import torch\n'), ((9171, 9204), 'numpy.vstack', 'np.vstack', (['(1 - pi_star, pi_star)'], {}), '((1 - pi_star, pi_star))\n', (9180, 9204), True, 'import numpy as np\n'), ((10021, 10045), 'torch.sqrt', 'torch.sqrt', (['val.variance'], {}), '(val.variance)\n', (10031, 10045), False, 'import torch\n'), ((10377, 10396), 'numpy.sqrt', 'np.sqrt', (['var_f_star'], {}), '(var_f_star)\n', (10384, 10396), True, 'import numpy as np\n'), ((10412, 10431), 'numpy.sqrt', 'np.sqrt', (['var_f_star'], {}), '(var_f_star)\n', (10419, 10431), True, 'import numpy as np\n'), ((10565, 10584), 'numpy.sqrt', 'np.sqrt', (['var_f_star'], {}), '(var_f_star)\n', (10572, 10584), True, 'import numpy as np\n'), ((10702, 10721), 'numpy.sqrt', 'np.sqrt', (['var_f_star'], {}), '(var_f_star)\n', (10709, 10721), True, 'import numpy as np\n'), ((10763, 10796), 'numpy.vstack', 'np.vstack', (['(1 - pi_star, pi_star)'], {}), '((1 - pi_star, pi_star))\n', (10772, 10796), True, 'import numpy as np\n'), ((10433, 10470), 'numpy.vstack', 'np.vstack', (['(1 - pi_star_i, pi_star_i)'], {}), '((1 - pi_star_i, pi_star_i))\n', (10442, 10470), True, 'import numpy as np\n'), ((10514, 10547), 'numpy.vstack', 'np.vstack', (['(1 - pi_star, pi_star)'], {}), '((1 - pi_star, pi_star))\n', (10523, 10547), True, 'import numpy as np\n'), ((10586, 10623), 'numpy.vstack', 'np.vstack', (['(1 - pi_star_i, pi_star_i)'], {}), '((1 - pi_star_i, pi_star_i))\n', (10595, 10623), True, 'import numpy as np\n'), ((3782, 3827), 'torch.arange', 'torch.arange', (['self.prev_dims', 'self.input_dims'], {}), '(self.prev_dims, self.input_dims)\n', (3794, 3827), False, 'import torch\n'), ((3885, 3913), 'torch.arange', 'torch.arange', (['self.prev_dims'], {}), '(self.prev_dims)\n', (3897, 3913), False, 'import torch\n'), ((4531, 4576), 'torch.arange', 'torch.arange', (['self.prev_dims', 'self.input_dims'], {}), '(self.prev_dims, self.input_dims)\n', (4543, 4576), False, 'import torch\n')] |
__author__ = "<NAME>"
"""
This script contains the implementation of submatrices calculation method as used
in Clifford & Clifford version B of the algorithm. C&C algorithm uses Laplace
expansion for the permanents in order to compute the set of probabilities in each
step of the algorithm. Instead of computing each permanent separately, we can
compute them all in one run, which is vastly more efficient.
Instead of using Rysers formula, or Glynn formula, we base our approach on the
Chin-Huh's formula, which takes into account possible bunching in the input states
(or rather can be interpreted as that).
"""
import abc
from numpy import complex128, ndarray, int64, array, nonzero, zeros, ones
import operator
from functools import reduce
from typing import List, Optional
class BSSubmatricesPermanentCalculatorInterface(abc.ABC):
"""
This is the interface class for submatrices permanents calculator. For now
BSCCCHSubmatricesPermanentCalculator is the only class of that kind, but it's
possible that there will be more (based e.g. on Glynns formula that grants
numerical stability.
If the above happens, place the interface class in the separate file.
"""
@abc.abstractmethod
def compute_permanents(self) -> List[complex128]:
"""Computes permanent of a matrix given before."""
raise NotImplementedError
@property
@abc.abstractmethod
def matrix(self) -> ndarray:
raise NotImplementedError
@matrix.setter
@abc.abstractmethod
def matrix(self, matrix: ndarray) -> None:
raise NotImplementedError
@property
@abc.abstractmethod
def input_state(self) -> ndarray:
raise NotImplementedError
@input_state.setter
@abc.abstractmethod
def input_state(self, input_state: ndarray) -> None:
raise NotImplementedError
@property
@abc.abstractmethod
def output_state(self) -> ndarray:
raise NotImplementedError
@output_state.setter
@abc.abstractmethod
def output_state(self, output_state: ndarray) -> None:
raise NotImplementedError
class BSSubmatricesPermanentCalculatorBase(
BSSubmatricesPermanentCalculatorInterface, abc.ABC
):
"""
Base class for BSSubmatricesPermanentCalculator classes. It takes care of some
boilerplate code.
Again, it should be put into separate file were the
BSCCCHSubmatricesPermanentCalculator cease to be the only submatrices
permanent calculator.
"""
def __init__(
self,
matrix: ndarray,
input_state: Optional[ndarray] = None,
output_state: Optional[ndarray] = None,
) -> None:
if output_state is None:
output_state = array([], dtype=int64)
if input_state is None:
input_state = array([], dtype=int64)
self._matrix = matrix
self._input_state = input_state
self._output_state = output_state
@property
def matrix(self) -> ndarray:
return self._matrix
@matrix.setter
def matrix(self, matrix: ndarray) -> None:
self._matrix = matrix
@property
def input_state(self) -> ndarray:
return self._input_state
@input_state.setter
def input_state(self, input_state: ndarray) -> None:
self._input_state = array(input_state, dtype=int64)
@property
def output_state(self) -> ndarray:
return self._output_state
@output_state.setter
def output_state(self, output_state: ndarray) -> None:
self._output_state = array(output_state, dtype=int64)
class BSCCCHSubmatricesPermanentCalculator(BSSubmatricesPermanentCalculatorBase):
"""
The name stands for Boson Sampling Clifford & Clifford Chin-Huh submatrices
permanent calculator, as it uses Clifford & Clifford approach to compute
permanents of submatrices to compute sub-distribution of Boson Sampling problem
instance. The starting point in our case is Chin-Huh permanent calculator
iterated in Guan Codes induced order.
"""
def __init__(
self,
matrix: ndarray,
input_state: Optional[ndarray] = None,
output_state: Optional[ndarray] = None,
) -> None:
self.sums: dict = dict()
self.permanents: List[complex128] = []
self.multiplier: int = 1
self.binomials_product: int = 1
self.v_vector: ndarray = array(0)
self.considered_columns_indices = array(0)
super().__init__(matrix, input_state, output_state)
def compute_permanents(self) -> List[complex128]:
# TODO TR: This method is huge and complicated. It would be smart to break
# it down into smaller ones.
self.permanents = [complex128(0) for _ in range(len(self.input_state))]
# Required for Guan Code iteration
self.v_vector = zeros(len(self._input_state), dtype=int) # g
code_update_information = ones(len(self._input_state), dtype=int) # u
position_limits = list(self._input_state) # n
self.sums = dict()
self.binomials_product = 1
self.considered_columns_indices = nonzero(self._output_state)[0]
self.multiplier = 1
# Initialization (0-th step).
for i in self.considered_columns_indices:
self.sums[i] = 0
for j in range(len(self._input_state)):
self.sums[i] += self._input_state[j] * self._matrix[i][j]
self._add_permanent_addends()
# Rest of the steps.
while self.v_vector[-1] <= position_limits[-1]:
# UPDATE R VECTOR
index_to_update = 0 # i
updated_value_at_index = self.v_vector[0] + code_update_information[0] # k
while (
updated_value_at_index > position_limits[index_to_update]
or updated_value_at_index < 0
):
code_update_information[index_to_update] = -code_update_information[
index_to_update
]
index_to_update += 1
if index_to_update == len(self.v_vector):
for _ in range(int(sum(self.input_state)) - 1):
for i in range(len(self.permanents)):
self.permanents[i] /= 2
return self.permanents
updated_value_at_index = (
self.v_vector[index_to_update]
+ code_update_information[index_to_update]
)
last_value_at_index = self.v_vector[index_to_update]
self.v_vector[index_to_update] = updated_value_at_index
# END UPDATE
# START PERMANENT UPDATE
self.multiplier = -self.multiplier
# Sums update
for i in self.sums:
self.sums[i] -= (
2
* (self.v_vector[index_to_update] - last_value_at_index)
* self.matrix[i][index_to_update]
)
# Binoms update
if self.v_vector[index_to_update] > last_value_at_index:
self.binomials_product *= (
self._input_state[index_to_update] - last_value_at_index
) / self.v_vector[index_to_update]
else:
self.binomials_product *= last_value_at_index / (
self._input_state[index_to_update] - self.v_vector[index_to_update]
)
self._add_permanent_addends()
for _ in range(int(sum(self._input_state)) - 1):
for i in range(len(self.permanents)):
self.permanents[i] /= 2
return self.permanents
def _add_permanent_addends(self) -> None:
# For each occupied mode
for i in range(len(self.input_state)):
if self.input_state[i] == 0 or self.input_state[i] == self.v_vector[i]:
continue
# Compute update the sums
updated_sums = {}
for j in self.considered_columns_indices:
updated_sums[j] = self.sums[j] - self.matrix[j][i]
# Compute update binomial product
updated_binom = self.binomials_product / (
self.input_state[i] / (self.input_state[i] - self.v_vector[i])
)
addend = (
self.multiplier
* updated_binom
* reduce(
operator.mul,
[
pow(updated_sums[j], self._output_state[j])
for j in self.considered_columns_indices
],
1,
)
)
self.permanents[i] += addend
| [
"numpy.complex128",
"numpy.array",
"numpy.nonzero"
] | [((3377, 3408), 'numpy.array', 'array', (['input_state'], {'dtype': 'int64'}), '(input_state, dtype=int64)\n', (3382, 3408), False, 'from numpy import complex128, ndarray, int64, array, nonzero, zeros, ones\n'), ((3611, 3643), 'numpy.array', 'array', (['output_state'], {'dtype': 'int64'}), '(output_state, dtype=int64)\n', (3616, 3643), False, 'from numpy import complex128, ndarray, int64, array, nonzero, zeros, ones\n'), ((4481, 4489), 'numpy.array', 'array', (['(0)'], {}), '(0)\n', (4486, 4489), False, 'from numpy import complex128, ndarray, int64, array, nonzero, zeros, ones\n'), ((4532, 4540), 'numpy.array', 'array', (['(0)'], {}), '(0)\n', (4537, 4540), False, 'from numpy import complex128, ndarray, int64, array, nonzero, zeros, ones\n'), ((2792, 2814), 'numpy.array', 'array', (['[]'], {'dtype': 'int64'}), '([], dtype=int64)\n', (2797, 2814), False, 'from numpy import complex128, ndarray, int64, array, nonzero, zeros, ones\n'), ((2873, 2895), 'numpy.array', 'array', (['[]'], {'dtype': 'int64'}), '([], dtype=int64)\n', (2878, 2895), False, 'from numpy import complex128, ndarray, int64, array, nonzero, zeros, ones\n'), ((4817, 4830), 'numpy.complex128', 'complex128', (['(0)'], {}), '(0)\n', (4827, 4830), False, 'from numpy import complex128, ndarray, int64, array, nonzero, zeros, ones\n'), ((5223, 5250), 'numpy.nonzero', 'nonzero', (['self._output_state'], {}), '(self._output_state)\n', (5230, 5250), False, 'from numpy import complex128, ndarray, int64, array, nonzero, zeros, ones\n')] |
import numpy as np
class BinarizationGNN:
"""
Graph Newral Network to classify graphs into two.
parameters
----------
feature_dim : int, optional (default = 8)
Dimension of the feature vectors to each graph node.
learning_rate : float, optional (default = 0.001)
Learning rate.
eps : float, optinal (default = 0.001)
Used when calculating numerical gradient.
optimizer : 'SGD' | 'momentun' , optinal (default = 'momentum')
Optimizing algorithm. Possible values:
- 'SGD'
Stochastic Gradient Descent.
- 'momentum'
Momentum SGD.
momentum : float, optional (default = 0.9)
Used when optimizer == 'momentum'.
batch_size : int, optional (default = 10)
Batch size.
epoch : int, optional (defalult = 10)
Epoch.
aggregate_step : int, optional (default = 2)
Aggregation step T.
aggregate_feature : np.ndarray(feature_dim,) or None (default = None)
Initial feature vector when aggregating.
If None, default is np.array([1, 0, 0, 0, ....]).
aggregate_weight : np.ndarray(feature_dim, feature_dim) or None (default = None)
Initial weight W in aggregation.
If None, default is created by using aggregate_weight_param.
aggregate_weight_param : dict (key:: 'mu', 'sigma') (default = {'mu': 0, 'sigma': 0.4})
This parameter is used when aggregate_weight is None.
Initial weight W is initialized with a normal distribution with mean 'mu' and standard deviation 'sigma'.
aggregate_activate_func : 'sigmoid' | 'relu' | 'swish' (default = 'relu')
An Activation function when aggregating.
feature_vect_each_weight : np.ndarray(feature_dim) or None (default = None)
Initial weight A when calculating the weighted sum of feature vectors.
feature_vect_add_weight : float (default = 0)
Initial weight b when calculating the score of feature vectors.
"""
def __init__(self,
feature_dim: int=8,
learning_rate: float=0.0001,
eps: float=0.001,
optimizer :str='momentum',
momentum: float=0.9,
batch_size: int=10,
epoch: int=10,
aggregate_step: int=2,
aggregate_feature: np.ndarray=None,
aggregate_weight: np.ndarray=None,
aggregate_weight_param: dict={'mu': 0, 'sigma': 0.4},
aggregate_activate_func: str='relu',
feature_vect_each_weight: np.ndarray=None,
feature_vect_each_weight_param: dict={'mu': 0, 'sigma': 0.4},
feature_vect_add_weight: float=0):
self.feature_dim = feature_dim
self.learning_rate = learning_rate
self.eps = eps
self.optimizer = optimizer
self.momentum = momentum
self.batch_size = batch_size
self.epoch = epoch
self.aggregate_step = aggregate_step
if aggregate_feature is not None:
self.aggregate_feature = np.copy(aggregate_feature)
else:
self.aggregate_feature = np.zeros(feature_dim, dtype=np.float32)
self.aggregate_feature[0] = 1.
self.aggregate_weight_param = aggregate_weight_param
if aggregate_weight is not None:
self.aggregate_weight = np.copy(aggregate_weight)
else:
self.aggregate_weight = (np.random.randn(self.feature_dim, self.feature_dim) * self.aggregate_weight_param['sigma'] + self.aggregate_weight_param['mu']).astype(np.float32)
self.aggregate_activate_func = aggregate_activate_func
self.feature_vect_each_weight_param = feature_vect_each_weight_param
if feature_vect_add_weight:
self.feature_vect_each_weight = np.copy(feature_vect_each_weight)
else:
self.feature_vect_each_weight = (np.random.randn(self.feature_dim) * self.feature_vect_each_weight_param['sigma'] + self.feature_vect_each_weight_param['mu']).astype(np.float32)
self.feature_vect_add_weight = feature_vect_add_weight
self.learning_params = [self.aggregate_weight, self.feature_vect_each_weight, self.feature_vect_add_weight]
self.aggregate_weight_d = 0
self.feature_vect_each_weight_d = 0
self.feature_vect_add_weight_d = 0
self.learning_params_d = [self.aggregate_weight_d, self.feature_vect_each_weight_d, self.feature_vect_add_weight_d]
def _aggregate(self, graph: np.ndarray, learning_params: list=None) -> np.ndarray:
"""
Aggregate some steps and return head out.
parameters
----------
graph : np.ndarray
Single graph.
learning_params : list, optional (default = None)
Learing parameters.
If None, defalut is self.learning_params.
returns
----------
head_out : np.ndarray(feature_dim)
The head out of the aggregation.
"""
graph = np.copy(graph).astype(np.float32)
if not learning_params:
learning_params = self.learning_params
weight = np.copy(learning_params[0])
if self.aggregate_activate_func == 'sigmoid':
f = lambda X: (np.tanh(X / 2.) + 1.) / 2.
if self.aggregate_activate_func == 'relu':
f = lambda X: np.maximum(X, 0)
if self.aggregate_activate_func == 'swish':
f = lambda X: X * (np.tanh(X / 2.) + 1.) / 2.
n = graph.shape[0]
X = np.copy(self.aggregate_feature)
X = np.tile(X, (n, 1))
for _ in range(self.aggregate_step):
A = np.dot(graph, X)
X = np.dot(A, weight)
X = f(X)
# HEADOUT
head_out = np.sum(X, axis=0)
return head_out
def _rawscore_one(self, graph: np.ndarray, learning_params: list=None) -> float:
"""
Calcurate score after aggregating step.
parameters
----------
graph : np.ndarray
Single graph.
learning_params : list, optional (default = None)
Learing parameters.
If None, defalut is self.learning_params.
returns
----------
s : float
The value of the score.
"""
if not learning_params:
learning_params = self.learning_params
h = self._aggregate(graph, learning_params)
feature_vect_each_weight, feature_vect_add_weight = learning_params[1:]
s = np.dot(feature_vect_each_weight, h) + feature_vect_add_weight
return s
def _predict_one(self, graph: np.ndarray) -> bool:
"""
Predict a label of a single graph.
parameters
----------
graph : np.ndarray
Single graph.
returns
----------
s > 0 : bool
The predicted label.
"""
s = self._rawscore_one(graph)
return s > 0
def _loss_one(self, graph: np.ndarray, label: bool, learning_params: list=None) -> float:
"""
Calculate the loss score of a single graph.
parameters
----------
graph : np.ndarray
Single graph.
label : bool
The correct answer label of the graph.
learning_params : list, optional (default = None)
Learing parameters.
If None, defalut is self.learning_params.
returns
----------
loss : float
The value of the loss.
"""
label = float(label)
s = self._rawscore_one(graph, learning_params)
if -100 < s < 100:
loss = label * np.log(1 + np.exp(-s)) + (1 - label) * np.log(1 + np.exp(s))
elif s < 0:
loss = label * s + (1 - label) * np.log(1 + np.exp(s))
else:
loss = label * np.log(1 + np.exp(-s)) + (1 - label) * s
return loss
def loss(self, graphs: np.ndarray, labels: list) -> float:
"""
Calculate the avarage loss score of a single graph.
parameters
----------
graphs : np.ndarray
Correction of graphs.
labels : list
The list of correct answer labels of the graphs.
learning_params : list, optional (default = None)
Learing parameters.
If None, defalut is self.learning_params.
returns
----------
loss : float
The value of the loss.
"""
loss = 0
n = len(labels)
for graph, label in zip(graphs, labels):
loss += self._loss_one(graph, label) / n
return loss
def _gradient_one(self, graph: np.ndarray, label: bool) -> list:
"""
Calculate the gradient of the loss score of a single graph.
parameters
----------
graph : np.ndarray
Single graph.
label : bool
The correct answer label of the graph.
learning_params : list, optional (default = None)
Learing parameters.
If None, defalut is self.learning_params.
returns
----------
g_aggregate_weight : np.ndarray
Gradient of the aggregate_weight parameters.
g_feature_vect_each_weight : np.ndarray
Gradient of the feature_vect_each_weight parameters.
g_feature_vect_add_weight : float
Gradient of the feature_vect_add_weight parameters.
"""
loss = self._loss_one(graph, label)
aggregate_weight, feature_vect_each_weight, feature_vect_add_weight = self.learning_params
d1, d2 = aggregate_weight.shape
g_aggregate_weight = np.zeros_like(aggregate_weight)
for i in range(d1):
for j in range(d2):
plus = np.copy(aggregate_weight)
plus[i, j] += self.eps
learning_params = [plus, feature_vect_each_weight, feature_vect_add_weight]
lossplus = self._loss_one(graph, label, learning_params)
diff = (lossplus - loss) / self.eps
g_aggregate_weight[i, j] = diff
d, = feature_vect_each_weight.shape
g_feature_vect_each_weight = np.zeros_like(feature_vect_each_weight)
for i in range(d):
plus = np.copy(feature_vect_each_weight)
plus[i] += self.eps
learning_params = [aggregate_weight, plus, feature_vect_add_weight]
lossplus = self._loss_one(graph, label, learning_params)
diff = (lossplus - loss) / self.eps
g_feature_vect_each_weight[i] = diff
plus = feature_vect_add_weight + self.eps
learning_params = [aggregate_weight, feature_vect_each_weight, plus]
lossplus = self._loss_one(graph, label, learning_params)
diff = (lossplus - loss) / self.eps
g_feature_vect_add_weight = diff
return g_aggregate_weight, g_feature_vect_each_weight, g_feature_vect_add_weight
def _optimize(self, graphs: np.ndarray, labels: list):
"""
Calculate the gradient of the loss score of a single graph and optimize the learning parameters.
parameters
----------
graphs : np.ndarray
Correction of graphs.
labels : list
The list of correct answer labels of the graphs.
"""
n = graphs.shape[0]
delta_aggregate = 0
delta_feature_vect_each = 0
delta_feature_vect_add = 0
for graph, label in zip(graphs, labels):
g_aggregate_weight, g_feature_vect_each_weight, g_feature_vect_add_weight = self._gradient_one(graph, label)
delta_aggregate += g_aggregate_weight / n
delta_feature_vect_each += g_feature_vect_each_weight / n
delta_feature_vect_add += g_feature_vect_add_weight / n
if self.optimizer == 'SGD':
self.aggregate_weight_d = -self.learning_rate * delta_aggregate
self.feature_vect_each_weight_d = -self.learning_rate * delta_feature_vect_each
self.feature_vect_add_weight_d = -self.learning_rate * delta_feature_vect_add
self.aggregate_weight += self.aggregate_weight_d
self.feature_vect_each_weight += self.feature_vect_each_weight_d
self.feature_vect_add_weight += self.feature_vect_add_weight_d
if self.optimizer == 'momentum':
self.aggregate_weight_d = -self.learning_rate * delta_aggregate + self.momentum * self.aggregate_weight_d
self.feature_vect_each_weight_d = -self.learning_rate * delta_feature_vect_each + self.momentum * self.feature_vect_each_weight_d
self.feature_vect_add_weight_d = -self.learning_rate * delta_feature_vect_add + self.momentum * self.feature_vect_add_weight_d
self.aggregate_weight += self.aggregate_weight_d
self.feature_vect_each_weight += self.feature_vect_each_weight_d
self.feature_vect_add_weight += self.feature_vect_add_weight_d
self.learning_params = [self.aggregate_weight, self.feature_vect_each_weight, self.feature_vect_add_weight]
self.learning_params_d = [self.aggregate_weight_d, self.feature_vect_each_weight_d, self.feature_vect_add_weight_d]
def fit(self, graphs: np.ndarray, labels: list):
"""
Fit the data.
IF YOU CALL THIS METHOD TWO OR MORE TIMES, YOU CAN FIT ADDITIONAL EPOCH.
parameters
----------
graphs : np.ndarray
Correction of graphs.
labels : list
The list of correct answer labels of the graphs.
"""
num = graphs.shape[0]
for _ in range(self.epoch):
shuffle_idx = np.random.permutation(np.arange(num))
shuffle_graphs = graphs[shuffle_idx]
shuffle_labels = np.array(labels)[shuffle_idx].tolist()
for i in range(num // self.batch_size):
batch_graphs = shuffle_graphs[i*self.batch_size:(i+1)*self.batch_size]
batch_labels = shuffle_labels[i*self.batch_size:(i+1)*self.batch_size]
self._optimize(batch_graphs, batch_labels)
def predict(self, graphs: np.ndarray) -> list:
"""
Predict the labels of the given graphs.
parameters
----------
graphs : np.ndarray
Correction of graphs.
returns
----------
labels : list
List of the labels.
"""
labels = list()
for graph in graphs:
labels.append(self._predict_one(graph))
return labels
def predict_prob(self, graphs: np.ndarray, labels: list) -> float:
"""
Accuracy of the predict.
parameters
----------
graphs : np.ndarray
Correction of graphs.
labels : list
The list of correct answer labels of the graphs.
returns
----------
prob : float
Correct answer rate.
"""
predict_labels = self.predict(graphs)
n = len(labels)
prob = sum([l == p for l, p in zip(labels, predict_labels)]) / n
return prob
| [
"numpy.zeros_like",
"numpy.sum",
"numpy.maximum",
"numpy.copy",
"numpy.tanh",
"numpy.random.randn",
"numpy.zeros",
"numpy.arange",
"numpy.tile",
"numpy.array",
"numpy.exp",
"numpy.dot"
] | [((5229, 5256), 'numpy.copy', 'np.copy', (['learning_params[0]'], {}), '(learning_params[0])\n', (5236, 5256), True, 'import numpy as np\n'), ((5621, 5652), 'numpy.copy', 'np.copy', (['self.aggregate_feature'], {}), '(self.aggregate_feature)\n', (5628, 5652), True, 'import numpy as np\n'), ((5665, 5683), 'numpy.tile', 'np.tile', (['X', '(n, 1)'], {}), '(X, (n, 1))\n', (5672, 5683), True, 'import numpy as np\n'), ((5854, 5871), 'numpy.sum', 'np.sum', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (5860, 5871), True, 'import numpy as np\n'), ((9818, 9849), 'numpy.zeros_like', 'np.zeros_like', (['aggregate_weight'], {}), '(aggregate_weight)\n', (9831, 9849), True, 'import numpy as np\n'), ((10345, 10384), 'numpy.zeros_like', 'np.zeros_like', (['feature_vect_each_weight'], {}), '(feature_vect_each_weight)\n', (10358, 10384), True, 'import numpy as np\n'), ((3136, 3162), 'numpy.copy', 'np.copy', (['aggregate_feature'], {}), '(aggregate_feature)\n', (3143, 3162), True, 'import numpy as np\n'), ((3214, 3253), 'numpy.zeros', 'np.zeros', (['feature_dim'], {'dtype': 'np.float32'}), '(feature_dim, dtype=np.float32)\n', (3222, 3253), True, 'import numpy as np\n'), ((3436, 3461), 'numpy.copy', 'np.copy', (['aggregate_weight'], {}), '(aggregate_weight)\n', (3443, 3461), True, 'import numpy as np\n'), ((3880, 3913), 'numpy.copy', 'np.copy', (['feature_vect_each_weight'], {}), '(feature_vect_each_weight)\n', (3887, 3913), True, 'import numpy as np\n'), ((5745, 5761), 'numpy.dot', 'np.dot', (['graph', 'X'], {}), '(graph, X)\n', (5751, 5761), True, 'import numpy as np\n'), ((5778, 5795), 'numpy.dot', 'np.dot', (['A', 'weight'], {}), '(A, weight)\n', (5784, 5795), True, 'import numpy as np\n'), ((6630, 6665), 'numpy.dot', 'np.dot', (['feature_vect_each_weight', 'h'], {}), '(feature_vect_each_weight, h)\n', (6636, 6665), True, 'import numpy as np\n'), ((10431, 10464), 'numpy.copy', 'np.copy', (['feature_vect_each_weight'], {}), '(feature_vect_each_weight)\n', (10438, 10464), True, 'import numpy as np\n'), ((5095, 5109), 'numpy.copy', 'np.copy', (['graph'], {}), '(graph)\n', (5102, 5109), True, 'import numpy as np\n'), ((5452, 5468), 'numpy.maximum', 'np.maximum', (['X', '(0)'], {}), '(X, 0)\n', (5462, 5468), True, 'import numpy as np\n'), ((9933, 9958), 'numpy.copy', 'np.copy', (['aggregate_weight'], {}), '(aggregate_weight)\n', (9940, 9958), True, 'import numpy as np\n'), ((13867, 13881), 'numpy.arange', 'np.arange', (['num'], {}), '(num)\n', (13876, 13881), True, 'import numpy as np\n'), ((5347, 5363), 'numpy.tanh', 'np.tanh', (['(X / 2.0)'], {}), '(X / 2.0)\n', (5354, 5363), True, 'import numpy as np\n'), ((13961, 13977), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (13969, 13977), True, 'import numpy as np\n'), ((3513, 3564), 'numpy.random.randn', 'np.random.randn', (['self.feature_dim', 'self.feature_dim'], {}), '(self.feature_dim, self.feature_dim)\n', (3528, 3564), True, 'import numpy as np\n'), ((3973, 4006), 'numpy.random.randn', 'np.random.randn', (['self.feature_dim'], {}), '(self.feature_dim)\n', (3988, 4006), True, 'import numpy as np\n'), ((5552, 5568), 'numpy.tanh', 'np.tanh', (['(X / 2.0)'], {}), '(X / 2.0)\n', (5559, 5568), True, 'import numpy as np\n'), ((7805, 7815), 'numpy.exp', 'np.exp', (['(-s)'], {}), '(-s)\n', (7811, 7815), True, 'import numpy as np\n'), ((7844, 7853), 'numpy.exp', 'np.exp', (['s'], {}), '(s)\n', (7850, 7853), True, 'import numpy as np\n'), ((7931, 7940), 'numpy.exp', 'np.exp', (['s'], {}), '(s)\n', (7937, 7940), True, 'import numpy as np\n'), ((7994, 8004), 'numpy.exp', 'np.exp', (['(-s)'], {}), '(-s)\n', (8000, 8004), True, 'import numpy as np\n')] |
import os
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset
from torchvision import transforms
from utils.joint_transforms import Compose, JointResize, RandomHorizontallyFlip, RandomRotate
from utils.misc import construct_print
mean_rgb = np.array([0.447, 0.407, 0.386])
std_rgb = np.array([0.244, 0.250, 0.253])
def _get_ext(path_list):
ext_list = list(set([os.path.splitext(p)[1] for p in path_list]))
if len(ext_list) != 1:
if '.png' in ext_list:
ext = '.png'
elif '.jpg' in ext_list:
ext = '.jpg'
elif '.bmp' in ext_list:
ext = '.bmp'
else:
raise NotImplementedError
construct_print(f"数据文件夹中包含多种扩展名,这里仅使用{ext}")
elif len(ext_list) == 1:
ext = ext_list[0]
else:
raise NotImplementedError
return ext
def _make_unlabeled_dataset(root, split):
img_path = os.path.join(root, split + '_images')
depth_path = os.path.join(root, split + '_depth')
img_list = os.listdir(img_path)
depth_list = os.listdir(depth_path)
img_ext = _get_ext(img_list)
depth_ext = _get_ext(depth_list)
# relative path to main file name
img_list = [os.path.splitext(f)[0] for f in img_list if f.endswith(img_ext)]
return [ (os.path.join(img_path, img_name + img_ext), \
os.path.join(depth_path, img_name + depth_ext)) for img_name in img_list ]
def _make_dataset(root, split):
img_path = os.path.join(root, split + '_images')
depth_path = os.path.join(root, split + '_depth')
mask_path = os.path.join(root, split + '_masks')
img_list = os.listdir(img_path)
depth_list = os.listdir(depth_path)
mask_list = os.listdir(mask_path)
img_ext = _get_ext(img_list)
depth_ext = _get_ext(depth_list)
mask_ext = _get_ext(mask_list)
img_list = [os.path.splitext(f)[0] for f in mask_list if f.endswith(mask_ext)]
return [(os.path.join(img_path, img_name + img_ext),
os.path.join(depth_path, img_name + depth_ext),
os.path.join(mask_path, img_name + mask_ext),
)
for img_name in img_list]
def _make_fdp_dataset(root):
img_path = os.path.join(root, 'RGB')
depth_path = os.path.join(root, 'depth')
mask_path = os.path.join(root, 'GT')
img_list = os.listdir(img_path)
depth_list = os.listdir(depth_path)
mask_list = os.listdir(mask_path)
img_ext = _get_ext(img_list)
depth_ext = _get_ext(depth_list)
mask_ext = _get_ext(mask_list)
img_list = [os.path.splitext(f)[0] for f in mask_list if f.endswith(mask_ext)]
return [(os.path.join(img_path, img_name + img_ext),
os.path.join(depth_path, img_name + depth_ext),
os.path.join(mask_path, img_name + mask_ext),
)
for img_name in img_list]
def _read_list_from_file(list_filepath):
img_list = []
with open(list_filepath, mode='r', encoding='utf-8') as openedfile:
line = openedfile.readline()
while line:
img_list.append(line.split()[0])
line = openedfile.readline()
return img_list
def _make_test_dataset_from_list(list_filepath, prefix=('.jpg', '.png')):
img_list = _read_list_from_file(list_filepath)
return [(os.path.join(os.path.join(os.path.dirname(img_path), 'test_images'),
os.path.basename(img_path) + prefix[0]),
os.path.join(os.path.join(os.path.dirname(img_path), 'test_masks'),
os.path.basename(img_path) + prefix[1]))
for img_path in img_list]
class TestImageFolder(Dataset):
def __init__(self, root, in_size, prefix):
if os.path.isdir(root):
construct_print(f"{root} is an image folder, we will test on it.")
self.imgs = _make_dataset(root, split = 'test')
elif os.path.isfile(root):
construct_print(f"{root} is a list of images, we will use these paths to read the "
f"corresponding image")
self.imgs = _make_test_dataset_from_list(root, prefix=prefix)
else:
raise NotImplementedError
self.test_img_trainsform = transforms.Compose([
# 输入的如果是一个tuple,则按照数据缩放,但是如果是一个数字,则按比例缩放到短边等于该值
transforms.Resize((in_size, in_size)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
self.test_depth_trainsform = transforms.Compose([
transforms.Resize((in_size, in_size)),
transforms.ToTensor()
])
def __getitem__(self, index):
img_path, depth_path, mask_path = self.imgs[index]
img = Image.open(img_path).convert('RGB')
depth = Image.open(depth_path).convert('L')
depth = np.asarray(depth)
depth = (depth - np.min(depth)) / (np.max(depth) - np.min(depth) + 1.0e-6) * 255.0
depth = Image.fromarray(depth.astype(np.uint8)) # 255 -> [0, 1] automatically !!!!
img_name = (img_path.split(os.sep)[-1]).split('.')[0]
img = self.test_img_trainsform(img).float()
depth = self.test_depth_trainsform(depth).float()
# depth = (depth - torch.min(depth)) / (torch.max(depth) - torch.min(depth) + torch.tensor(1.0e-6)) * torch.tensor(255.0)
return img, depth, mask_path, img_name
def __len__(self):
return len(self.imgs)
class TestFDPImageFolder(Dataset):
def __init__(self, root, in_size, prefix):
if os.path.isdir(root):
construct_print(f"{root} is an image folder, we will test on it.")
self.imgs = _make_fdp_dataset(root)
elif os.path.isfile(root):
raise NotImplementedError
else:
raise NotImplementedError
self.test_img_trainsform = transforms.Compose([
# 输入的如果是一个tuple,则按照数据缩放,但是如果是一个数字,则按比例缩放到短边等于该值
transforms.Resize((in_size, in_size)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
self.test_depth_trainsform = transforms.Compose([
transforms.Resize((in_size, in_size)),
transforms.ToTensor()
])
def __getitem__(self, index):
img_path, depth_path, mask_path = self.imgs[index]
img = Image.open(img_path).convert('RGB')
depth = Image.open(depth_path).convert('L')
depth = np.asarray(depth)
# depth = (depth - np.min(depth)) / (np.max(depth) - np.min(depth) + 1.0e-6) * 255.0
depth = Image.fromarray(depth.astype(np.uint8)) # 255 -> [0, 1] automatically !!!!
img_name = (img_path.split(os.sep)[-1]).split('.')[0]
img = self.test_img_trainsform(img).float()
depth = self.test_depth_trainsform(depth).float()
return img, depth, mask_path, img_name
def __len__(self):
return len(self.imgs)
class TestUnlabeledImageFolder(Dataset):
def __init__(self, root, in_size, prefix):
if os.path.isdir(root):
construct_print(f"{root} is an image folder, we will test on it.")
self.imgs = _make_unlabeled_dataset(root, split = 'test')
elif os.path.isfile(root):
raise NotImplementedError
else:
raise NotImplementedError
self.test_img_trainsform = transforms.Compose([
transforms.Resize((in_size, in_size)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
self.test_depth_trainsform = transforms.Compose([
transforms.Resize((in_size, in_size)),
transforms.ToTensor()
])
def __getitem__(self, index):
img_path, depth_path= self.imgs[index]
img = Image.open(img_path).convert('RGB')
depth = Image.open(depth_path).convert('L')
img_name = (img_path.split(os.sep)[-1]).split('.')[0]
img = self.test_img_trainsform(img).float()
depth = self.test_depth_trainsform(depth).float()
depth = (depth - torch.min(depth)) / (torch.max(depth) - torch.min(depth) + torch.tensor(1.0e-6)) * torch.tensor(255.0)
return img, depth, img_name
def __len__(self):
return len(self.imgs)
class TestWithRotationImageFolder(Dataset):
def __init__(self, root, in_size, prefix, rotations = (0, 90, 180, 270)):
self.rotations = rotations
self.times = len(rotations)
if os.path.isdir(root):
construct_print(f"{root} is an image folder, we will test on it.")
self.imgs = _make_dataset(root, split = 'test')
elif os.path.isfile(root):
raise NotImplementedError
else:
raise NotImplementedError
self.test_img_trainsform = transforms.Compose([
# 输入的如果是一个tuple,则按照数据缩放,但是如果是一个数字,则按比例缩放到短边等于该值
transforms.Resize((in_size, in_size)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
self.test_depth_trainsform = transforms.Compose([
transforms.Resize((in_size, in_size)),
transforms.ToTensor()
])
def __getitem__(self, index):
rotate_index = np.random.randint(self.times)
img_path, depth_path, mask_path = self.imgs[index]
img_name = (img_path.split(os.sep)[-1]).split('.')[0]
img = Image.open(img_path).convert('RGB')
depth = Image.open(depth_path).convert('L')
depth = np.asarray(depth)
depth = (depth - np.min(depth)) / (np.max(depth) - np.min(depth) + 1.0e-6) * 255.0
depth = Image.fromarray(depth.astype(np.uint8)) # 255 -> [0, 1] automatically !!!!
img = img.rotate(self.rotations[rotate_index])
depth = depth.rotate(self.rotations[rotate_index])
img = self.test_img_trainsform(img).float()
depth = self.test_depth_trainsform(depth).float()
# depth = (depth - torch.min(depth)) / (torch.max(depth) - torch.min(depth) + torch.tensor(1.0e-6)) * torch.tensor(255.0)
rotate_label = torch.tensor(rotate_index).long()
return img, depth, mask_path, rotate_label, img_name
def __len__(self):
return len(self.imgs)
class TestWithRotationFDPImageFolder(Dataset):
def __init__(self, root, in_size, prefix, rotations = (0, 90, 180, 270)):
self.rotations = rotations
self.times = len(rotations)
if os.path.isdir(root):
construct_print(f"{root} is an image folder, we will test on it.")
self.imgs = _make_fdp_dataset(root)
elif os.path.isfile(root):
raise NotImplementedError
else:
raise NotImplementedError
self.test_img_trainsform = transforms.Compose([
# 输入的如果是一个tuple,则按照数据缩放,但是如果是一个数字,则按比例缩放到短边等于该值
transforms.Resize((in_size, in_size)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
self.test_depth_trainsform = transforms.Compose([
transforms.Resize((in_size, in_size)),
transforms.ToTensor()
])
def __getitem__(self, index):
rotate_index = index % self.times
# rotate_index = np.random.randint(self.times)
img_path, depth_path, mask_path = self.imgs[index]
img_name = (img_path.split(os.sep)[-1]).split('.')[0]
img = Image.open(img_path).convert('RGB')
depth = Image.open(depth_path).convert('L')
depth = np.asarray(depth)
depth = (depth - np.min(depth)) / (np.max(depth) - np.min(depth) + 1.0e-6) * 255.0
depth = Image.fromarray(depth.astype(np.uint8)) # 255 -> [0, 1] automatically !!!!
img = img.rotate(self.rotations[rotate_index])
depth = depth.rotate(self.rotations[rotate_index])
img = self.test_img_trainsform(img).float()
depth = self.test_depth_trainsform(depth).float()
# depth = (depth - torch.min(depth)) / (torch.max(depth) - torch.min(depth) + torch.tensor(1.0e-6)) * torch.tensor(255.0)
rotate_label = torch.tensor(rotate_index).long()
return img, depth, mask_path, rotate_label, img_name, img_path
def __len__(self):
return len(self.imgs)
def _make_train_dataset_from_list(list_filepath, prefix=('.jpg', '.png')):
# list_filepath = '/home/lart/Datasets/RGBDSaliency/FinalSet/rgbd_train_jw.lst'
img_list = _read_list_from_file(list_filepath)
return [(os.path.join(os.path.join(os.path.dirname(img_path), 'train_images'),
os.path.basename(img_path) + prefix[0]),
os.path.join(os.path.join(os.path.dirname(img_path), 'train_masks'),
os.path.basename(img_path) + prefix[1]))
for img_path in img_list]
class TrainImageFolder(Dataset):
def __init__(self, root, in_size, prefix, use_bigt=False):
self.use_bigt = use_bigt
if os.path.isdir(root):
construct_print(f"{root} is an image folder, we will train on it.")
self.imgs = _make_fdp_dataset(root)
elif os.path.isfile(root):
construct_print(f"{root} is a list of images, we will use these paths to read the "
f"corresponding image")
self.imgs = _make_train_dataset_from_list(root, prefix=prefix)
else:
raise NotImplementedError
self.train_joint_transform = Compose([
JointResize(in_size),
RandomHorizontallyFlip(),
RandomRotate(10)
])
self.train_img_transform = transforms.Compose([
transforms.ColorJitter(0.1, 0.1, 0.1),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225]) # 处理的是Tensor
])
self.train_depth_transform = transforms.ToTensor()
self.train_mask_transform = transforms.ToTensor()
def __getitem__(self, index):
img_path, depth_path, mask_path = self.imgs[index]
img = Image.open(img_path)
depth = Image.open(depth_path)
mask = Image.open(mask_path)
if len(img.split()) != 3:
img = img.convert('RGB')
if len(depth.split()) == 3:
depth = depth.convert('L')
if len(mask.split()) == 3:
mask = mask.convert('L')
img, depth, mask = self.train_joint_transform(img, depth, mask)
mask = self.train_mask_transform(mask).float()
img = self.train_img_transform(img).float()
depth = self.train_depth_transform(depth).float()
###########################################
# depth 255 normalized already(NJUD + NLPR)
# depth = (depth - torch.min(depth)) / (torch.max(depth) - torch.min(depth) + torch.tensor(1.0e-6))
###########################################
if self.use_bigt:
mask = mask.ge(0.5).float() # 二值化
img_name = (img_path.split(os.sep)[-1]).split('.')[0]
return img, depth, mask, img_name
def __len__(self):
return len(self.imgs)
class TrainMTImageFolder(Dataset):
def __init__(self, root, unlabeled_root, in_size, prefix, use_bigt=False):
self.in_size = in_size
self.use_bigt = use_bigt
if os.path.isdir(root):
construct_print(f"{root} is an image folder, we will train on it.")
self.imgs = _make_dataset(root, split = 'train')
elif os.path.isfile(root):
construct_print(f"{root} is a list of images, we will use these paths to read the "
f"corresponding image")
self.imgs = _make_train_dataset_from_list(root, prefix=prefix)
else:
raise NotImplementedError
if os.path.isdir(unlabeled_root):
construct_print(f"{unlabeled_root} is an image folder, we will conduct MT on it.")
self.unlabeled_imgs = _make_unlabeled_dataset(unlabeled_root, split = 'train')
elif os.path.isfile(unlabeled_root):
raise NotImplementedError
else:
raise NotImplementedError
self.train_joint_transform = Compose([
JointResize(in_size),
RandomHorizontallyFlip(),
RandomRotate(10)
])
self.train_img_transform = transforms.Compose([
transforms.ColorJitter(0.1, 0.1, 0.1),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225]) # 处理的是Tensor
])
self.train_depth_transform = transforms.ToTensor()
self.train_mask_transform = transforms.ToTensor()
def __getitem__(self, index):
if index < len(self.imgs):
img_path, depth_path, mask_path = self.imgs[index]
img = Image.open(img_path)
depth = Image.open(depth_path)
mask = Image.open(mask_path)
if len(img.split()) != 3:
img = img.convert('RGB')
if len(depth.split()) == 3:
depth = depth.convert('L')
if len(mask.split()) == 3:
mask = mask.convert('L')
img, depth, mask = self.train_joint_transform(img, depth, mask)
mask = self.train_mask_transform(mask).float()
img = self.train_img_transform(img).float()
depth = self.train_depth_transform(depth).float()
# depth = (depth - torch.min(depth)) / (torch.max(depth) - torch.min(depth) + torch.tensor(1.0e-6))
if self.use_bigt:
mask = mask.ge(0.5).float() # 二值化
img_name = (img_path.split(os.sep)[-1]).split('.')[0]
return img, depth, mask, img_name
else:
index -= len(self.imgs)
# without unlabeled_mask_path
unlabeled_img_path, unlabeled_depth_path = self.unlabeled_imgs[index]
unlabeled_img = Image.open(unlabeled_img_path)
unlabeled_depth = Image.open(unlabeled_depth_path)
if len(unlabeled_img.split()) != 3:
unlabeled_img = unlabeled_img.convert('RGB')
if len(unlabeled_depth.split()) == 3:
unlabeled_depth = unlabeled_depth.convert('L')
unlabeled_img, unlabeled_depth = self.train_joint_transform(unlabeled_img, unlabeled_depth)
unlabeled_img = self.train_img_transform(unlabeled_img).float()
unlabeled_depth = self.train_depth_transform(unlabeled_depth).float()
unlabeled_depth = (unlabeled_depth - torch.min(unlabeled_depth)) / (torch.max(unlabeled_depth) - torch.min(unlabeled_depth) + torch.tensor(1.0e-6)) * torch.tensor(255.0)
unlabeled_mask = torch.zeros((1, self.in_size, self.in_size)).float()
unlabeled_img_name = (unlabeled_img_path.split(os.sep)[-1]).split('.')[0]
return unlabeled_img, unlabeled_depth, unlabeled_mask, unlabeled_img_name
def __len__(self):
return len(self.imgs) + len(self.unlabeled_imgs)
def get_primary_secondary_indices(self):
return np.arange(len(self.imgs)), np.arange(len(self.imgs), len(self.unlabeled_imgs))
class TrainSSImageFolder(Dataset):
def __init__(self, root, unlabeled_root, in_size, prefix, is_labeled_rotation,
use_bigt=False, rotations = (0, 90, 180, 270)):
self.in_size = in_size
self.is_labeled_rotation = is_labeled_rotation
self.use_bigt = use_bigt
self.rotations = rotations
self.times = len(rotations)
if os.path.isdir(root):
construct_print(f"{root} is an image folder, we will train on it.")
self.imgs = _make_fdp_dataset(root)
elif os.path.isfile(root):
construct_print(f"{root} is a list of images, we will use these paths to read the "
f"corresponding image")
self.imgs = _make_train_dataset_from_list(root, prefix=prefix)
else:
raise NotImplementedError
if os.path.isdir(unlabeled_root):
construct_print(f"{unlabeled_root} is an image folder, we will conduct SS (also SSMT) on it.")
self.unlabeled_imgs = _make_unlabeled_dataset(unlabeled_root, split = 'train')
elif os.path.isfile(unlabeled_root):
raise NotImplementedError
else:
raise NotImplementedError
self.train_joint_transform = JointResize(in_size)
self.train_img_transform = transforms.Compose([
transforms.ColorJitter(0.1, 0.1, 0.1),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225]) # 处理的是Tensor
])
self.train_depth_transform = transforms.ToTensor()
self.train_mask_transform = transforms.ToTensor()
def __getitem__(self, index):
# main_index, rotate_index = index // self.times, index % self.times
main_index = index
rotate_index = np.random.randint(self.times)
if main_index < len(self.imgs):
img_path, depth_path, mask_path = self.imgs[main_index]
img = Image.open(img_path)
depth = Image.open(depth_path)
mask = Image.open(mask_path)
if len(img.split()) != 3:
img = img.convert('RGB')
if len(depth.split()) == 3:
depth = depth.convert('L')
if len(mask.split()) == 3:
mask = mask.convert('L')
img, depth, mask = self.train_joint_transform(img, depth, mask)
if self.is_labeled_rotation:
img = img.rotate(self.rotations[rotate_index])
depth = depth.rotate(self.rotations[rotate_index])
mask = mask.rotate(self.rotations[rotate_index])
mask = self.train_mask_transform(mask).float()
img = self.train_img_transform(img).float()
depth = self.train_depth_transform(depth).float()
# depth = (depth - torch.min(depth)) / (torch.max(depth) - torch.min(depth) + torch.tensor(1.0e-6))
if self.use_bigt:
mask = mask.ge(0.5).float() # 二值化
# rotate_label = torch.zeros((self.times), dtype = torch.int64).scatter_(0, torch.LongTensor([ rotate_index ]), torch.LongTensor([ 1 ])).long()
rotate_label = torch.tensor(rotate_index).long()
img_name = (img_path.split(os.sep)[-1]).split('.')[0]
return img, depth, mask, rotate_label, img_name
else:
main_index -= len(self.imgs)
# without unlabeled_mask_path
unlabeled_img_path, unlabeled_depth_path = self.unlabeled_imgs[main_index]
unlabeled_img = Image.open(unlabeled_img_path)
unlabeled_depth = Image.open(unlabeled_depth_path)
if len(unlabeled_img.split()) != 3:
unlabeled_img = unlabeled_img.convert('RGB')
if len(unlabeled_depth.split()) == 3:
unlabeled_depth = unlabeled_depth.convert('L')
unlabeled_img, unlabeled_depth = self.train_joint_transform(unlabeled_img, unlabeled_depth)
unlabeled_img = unlabeled_img.rotate(self.rotations[rotate_index])
unlabeled_depth = unlabeled_depth.rotate(self.rotations[rotate_index])
unlabeled_img = self.train_img_transform(unlabeled_img).float()
unlabeled_depth = self.train_depth_transform(unlabeled_depth).float()
unlabeled_depth = (unlabeled_depth - torch.min(unlabeled_depth)) / (torch.max(unlabeled_depth) - torch.min(unlabeled_depth) + torch.tensor(1.0e-6)) * torch.tensor(255.0)
unlabeled_mask = torch.zeros((1, self.in_size, self.in_size)).float() # dummy
# unlabeled_rotate_label = torch.zeros((self.times), dtype = torch.int64).scatter_(0, torch.LongTensor([ rotate_index ]), torch.LongTensor([ 1 ]))
unlabeled_rotate_label = torch.tensor(rotate_index).long()
unlabeled_img_name = (unlabeled_img_path.split(os.sep)[-1]).split('.')[0]
return unlabeled_img, unlabeled_depth, unlabeled_mask, unlabeled_rotate_label, unlabeled_img_name
def __len__(self):
return (len(self.imgs) + len(self.unlabeled_imgs))
# return (len(self.imgs) + len(self.unlabeled_imgs)) * self.times
def get_primary_secondary_indices(self):
return np.arange(len(self.imgs)), np.arange(len(self.imgs), len(self.unlabeled_imgs))
# return np.arange(len(self.imgs) * self.times), np.arange(len(self.imgs) * self.times, len(self.unlabeled_imgs) * self.times)
| [
"os.path.isfile",
"numpy.random.randint",
"torchvision.transforms.Normalize",
"os.path.join",
"os.path.dirname",
"utils.joint_transforms.RandomHorizontallyFlip",
"numpy.max",
"torch.zeros",
"os.path.basename",
"numpy.asarray",
"numpy.min",
"torch.max",
"os.listdir",
"torch.min",
"torchvi... | [((283, 314), 'numpy.array', 'np.array', (['[0.447, 0.407, 0.386]'], {}), '([0.447, 0.407, 0.386])\n', (291, 314), True, 'import numpy as np\n'), ((325, 355), 'numpy.array', 'np.array', (['[0.244, 0.25, 0.253]'], {}), '([0.244, 0.25, 0.253])\n', (333, 355), True, 'import numpy as np\n'), ((929, 966), 'os.path.join', 'os.path.join', (['root', "(split + '_images')"], {}), "(root, split + '_images')\n", (941, 966), False, 'import os\n'), ((984, 1020), 'os.path.join', 'os.path.join', (['root', "(split + '_depth')"], {}), "(root, split + '_depth')\n", (996, 1020), False, 'import os\n'), ((1041, 1061), 'os.listdir', 'os.listdir', (['img_path'], {}), '(img_path)\n', (1051, 1061), False, 'import os\n'), ((1079, 1101), 'os.listdir', 'os.listdir', (['depth_path'], {}), '(depth_path)\n', (1089, 1101), False, 'import os\n'), ((1499, 1536), 'os.path.join', 'os.path.join', (['root', "(split + '_images')"], {}), "(root, split + '_images')\n", (1511, 1536), False, 'import os\n'), ((1554, 1590), 'os.path.join', 'os.path.join', (['root', "(split + '_depth')"], {}), "(root, split + '_depth')\n", (1566, 1590), False, 'import os\n'), ((1607, 1643), 'os.path.join', 'os.path.join', (['root', "(split + '_masks')"], {}), "(root, split + '_masks')\n", (1619, 1643), False, 'import os\n'), ((1664, 1684), 'os.listdir', 'os.listdir', (['img_path'], {}), '(img_path)\n', (1674, 1684), False, 'import os\n'), ((1702, 1724), 'os.listdir', 'os.listdir', (['depth_path'], {}), '(depth_path)\n', (1712, 1724), False, 'import os\n'), ((1741, 1762), 'os.listdir', 'os.listdir', (['mask_path'], {}), '(mask_path)\n', (1751, 1762), False, 'import os\n'), ((2236, 2261), 'os.path.join', 'os.path.join', (['root', '"""RGB"""'], {}), "(root, 'RGB')\n", (2248, 2261), False, 'import os\n'), ((2279, 2306), 'os.path.join', 'os.path.join', (['root', '"""depth"""'], {}), "(root, 'depth')\n", (2291, 2306), False, 'import os\n'), ((2323, 2347), 'os.path.join', 'os.path.join', (['root', '"""GT"""'], {}), "(root, 'GT')\n", (2335, 2347), False, 'import os\n'), ((2368, 2388), 'os.listdir', 'os.listdir', (['img_path'], {}), '(img_path)\n', (2378, 2388), False, 'import os\n'), ((2406, 2428), 'os.listdir', 'os.listdir', (['depth_path'], {}), '(depth_path)\n', (2416, 2428), False, 'import os\n'), ((2445, 2466), 'os.listdir', 'os.listdir', (['mask_path'], {}), '(mask_path)\n', (2455, 2466), False, 'import os\n'), ((712, 756), 'utils.misc.construct_print', 'construct_print', (['f"""数据文件夹中包含多种扩展名,这里仅使用{ext}"""'], {}), "(f'数据文件夹中包含多种扩展名,这里仅使用{ext}')\n", (727, 756), False, 'from utils.misc import construct_print\n'), ((3744, 3763), 'os.path.isdir', 'os.path.isdir', (['root'], {}), '(root)\n', (3757, 3763), False, 'import os\n'), ((4879, 4896), 'numpy.asarray', 'np.asarray', (['depth'], {}), '(depth)\n', (4889, 4896), True, 'import numpy as np\n'), ((5589, 5608), 'os.path.isdir', 'os.path.isdir', (['root'], {}), '(root)\n', (5602, 5608), False, 'import os\n'), ((6529, 6546), 'numpy.asarray', 'np.asarray', (['depth'], {}), '(depth)\n', (6539, 6546), True, 'import numpy as np\n'), ((7121, 7140), 'os.path.isdir', 'os.path.isdir', (['root'], {}), '(root)\n', (7134, 7140), False, 'import os\n'), ((8602, 8621), 'os.path.isdir', 'os.path.isdir', (['root'], {}), '(root)\n', (8615, 8621), False, 'import os\n'), ((9390, 9419), 'numpy.random.randint', 'np.random.randint', (['self.times'], {}), '(self.times)\n', (9407, 9419), True, 'import numpy as np\n'), ((9661, 9678), 'numpy.asarray', 'np.asarray', (['depth'], {}), '(depth)\n', (9671, 9678), True, 'import numpy as np\n'), ((10618, 10637), 'os.path.isdir', 'os.path.isdir', (['root'], {}), '(root)\n', (10631, 10637), False, 'import os\n'), ((11710, 11727), 'numpy.asarray', 'np.asarray', (['depth'], {}), '(depth)\n', (11720, 11727), True, 'import numpy as np\n'), ((13151, 13170), 'os.path.isdir', 'os.path.isdir', (['root'], {}), '(root)\n', (13164, 13170), False, 'import os\n'), ((14086, 14107), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (14105, 14107), False, 'from torchvision import transforms\n'), ((14144, 14165), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (14163, 14165), False, 'from torchvision import transforms\n'), ((14287, 14307), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (14297, 14307), False, 'from PIL import Image\n'), ((14324, 14346), 'PIL.Image.open', 'Image.open', (['depth_path'], {}), '(depth_path)\n', (14334, 14346), False, 'from PIL import Image\n'), ((14362, 14383), 'PIL.Image.open', 'Image.open', (['mask_path'], {}), '(mask_path)\n', (14372, 14383), False, 'from PIL import Image\n'), ((15543, 15562), 'os.path.isdir', 'os.path.isdir', (['root'], {}), '(root)\n', (15556, 15562), False, 'import os\n'), ((16026, 16055), 'os.path.isdir', 'os.path.isdir', (['unlabeled_root'], {}), '(unlabeled_root)\n', (16039, 16055), False, 'import os\n'), ((16854, 16875), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (16873, 16875), False, 'from torchvision import transforms\n'), ((16912, 16933), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (16931, 16933), False, 'from torchvision import transforms\n'), ((19926, 19945), 'os.path.isdir', 'os.path.isdir', (['root'], {}), '(root)\n', (19939, 19945), False, 'import os\n'), ((20396, 20425), 'os.path.isdir', 'os.path.isdir', (['unlabeled_root'], {}), '(unlabeled_root)\n', (20409, 20425), False, 'import os\n'), ((20798, 20818), 'utils.joint_transforms.JointResize', 'JointResize', (['in_size'], {}), '(in_size)\n', (20809, 20818), False, 'from utils.joint_transforms import Compose, JointResize, RandomHorizontallyFlip, RandomRotate\n'), ((21136, 21157), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (21155, 21157), False, 'from torchvision import transforms\n'), ((21194, 21215), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (21213, 21215), False, 'from torchvision import transforms\n'), ((21382, 21411), 'numpy.random.randint', 'np.random.randint', (['self.times'], {}), '(self.times)\n', (21399, 21411), True, 'import numpy as np\n'), ((1236, 1255), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (1252, 1255), False, 'import os\n'), ((1315, 1357), 'os.path.join', 'os.path.join', (['img_path', '(img_name + img_ext)'], {}), '(img_path, img_name + img_ext)\n', (1327, 1357), False, 'import os\n'), ((1375, 1421), 'os.path.join', 'os.path.join', (['depth_path', '(img_name + depth_ext)'], {}), '(depth_path, img_name + depth_ext)\n', (1387, 1421), False, 'import os\n'), ((1894, 1913), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (1910, 1913), False, 'import os\n'), ((1974, 2016), 'os.path.join', 'os.path.join', (['img_path', '(img_name + img_ext)'], {}), '(img_path, img_name + img_ext)\n', (1986, 2016), False, 'import os\n'), ((2031, 2077), 'os.path.join', 'os.path.join', (['depth_path', '(img_name + depth_ext)'], {}), '(depth_path, img_name + depth_ext)\n', (2043, 2077), False, 'import os\n'), ((2092, 2136), 'os.path.join', 'os.path.join', (['mask_path', '(img_name + mask_ext)'], {}), '(mask_path, img_name + mask_ext)\n', (2104, 2136), False, 'import os\n'), ((2598, 2617), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (2614, 2617), False, 'import os\n'), ((2678, 2720), 'os.path.join', 'os.path.join', (['img_path', '(img_name + img_ext)'], {}), '(img_path, img_name + img_ext)\n', (2690, 2720), False, 'import os\n'), ((2735, 2781), 'os.path.join', 'os.path.join', (['depth_path', '(img_name + depth_ext)'], {}), '(depth_path, img_name + depth_ext)\n', (2747, 2781), False, 'import os\n'), ((2796, 2840), 'os.path.join', 'os.path.join', (['mask_path', '(img_name + mask_ext)'], {}), '(mask_path, img_name + mask_ext)\n', (2808, 2840), False, 'import os\n'), ((3777, 3843), 'utils.misc.construct_print', 'construct_print', (['f"""{root} is an image folder, we will test on it."""'], {}), "(f'{root} is an image folder, we will test on it.')\n", (3792, 3843), False, 'from utils.misc import construct_print\n'), ((3917, 3937), 'os.path.isfile', 'os.path.isfile', (['root'], {}), '(root)\n', (3931, 3937), False, 'import os\n'), ((5622, 5688), 'utils.misc.construct_print', 'construct_print', (['f"""{root} is an image folder, we will test on it."""'], {}), "(f'{root} is an image folder, we will test on it.')\n", (5637, 5688), False, 'from utils.misc import construct_print\n'), ((5750, 5770), 'os.path.isfile', 'os.path.isfile', (['root'], {}), '(root)\n', (5764, 5770), False, 'import os\n'), ((7154, 7220), 'utils.misc.construct_print', 'construct_print', (['f"""{root} is an image folder, we will test on it."""'], {}), "(f'{root} is an image folder, we will test on it.')\n", (7169, 7220), False, 'from utils.misc import construct_print\n'), ((7304, 7324), 'os.path.isfile', 'os.path.isfile', (['root'], {}), '(root)\n', (7318, 7324), False, 'import os\n'), ((8283, 8302), 'torch.tensor', 'torch.tensor', (['(255.0)'], {}), '(255.0)\n', (8295, 8302), False, 'import torch\n'), ((8635, 8701), 'utils.misc.construct_print', 'construct_print', (['f"""{root} is an image folder, we will test on it."""'], {}), "(f'{root} is an image folder, we will test on it.')\n", (8650, 8701), False, 'from utils.misc import construct_print\n'), ((8775, 8795), 'os.path.isfile', 'os.path.isfile', (['root'], {}), '(root)\n', (8789, 8795), False, 'import os\n'), ((10651, 10717), 'utils.misc.construct_print', 'construct_print', (['f"""{root} is an image folder, we will test on it."""'], {}), "(f'{root} is an image folder, we will test on it.')\n", (10666, 10717), False, 'from utils.misc import construct_print\n'), ((10779, 10799), 'os.path.isfile', 'os.path.isfile', (['root'], {}), '(root)\n', (10793, 10799), False, 'import os\n'), ((13184, 13251), 'utils.misc.construct_print', 'construct_print', (['f"""{root} is an image folder, we will train on it."""'], {}), "(f'{root} is an image folder, we will train on it.')\n", (13199, 13251), False, 'from utils.misc import construct_print\n'), ((13313, 13333), 'os.path.isfile', 'os.path.isfile', (['root'], {}), '(root)\n', (13327, 13333), False, 'import os\n'), ((15576, 15643), 'utils.misc.construct_print', 'construct_print', (['f"""{root} is an image folder, we will train on it."""'], {}), "(f'{root} is an image folder, we will train on it.')\n", (15591, 15643), False, 'from utils.misc import construct_print\n'), ((15718, 15738), 'os.path.isfile', 'os.path.isfile', (['root'], {}), '(root)\n', (15732, 15738), False, 'import os\n'), ((16069, 16156), 'utils.misc.construct_print', 'construct_print', (['f"""{unlabeled_root} is an image folder, we will conduct MT on it."""'], {}), "(\n f'{unlabeled_root} is an image folder, we will conduct MT on it.')\n", (16084, 16156), False, 'from utils.misc import construct_print\n'), ((16256, 16286), 'os.path.isfile', 'os.path.isfile', (['unlabeled_root'], {}), '(unlabeled_root)\n', (16270, 16286), False, 'import os\n'), ((17102, 17122), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (17112, 17122), False, 'from PIL import Image\n'), ((17143, 17165), 'PIL.Image.open', 'Image.open', (['depth_path'], {}), '(depth_path)\n', (17153, 17165), False, 'from PIL import Image\n'), ((17185, 17206), 'PIL.Image.open', 'Image.open', (['mask_path'], {}), '(mask_path)\n', (17195, 17206), False, 'from PIL import Image\n'), ((18236, 18266), 'PIL.Image.open', 'Image.open', (['unlabeled_img_path'], {}), '(unlabeled_img_path)\n', (18246, 18266), False, 'from PIL import Image\n'), ((18297, 18329), 'PIL.Image.open', 'Image.open', (['unlabeled_depth_path'], {}), '(unlabeled_depth_path)\n', (18307, 18329), False, 'from PIL import Image\n'), ((19959, 20026), 'utils.misc.construct_print', 'construct_print', (['f"""{root} is an image folder, we will train on it."""'], {}), "(f'{root} is an image folder, we will train on it.')\n", (19974, 20026), False, 'from utils.misc import construct_print\n'), ((20088, 20108), 'os.path.isfile', 'os.path.isfile', (['root'], {}), '(root)\n', (20102, 20108), False, 'import os\n'), ((20439, 20543), 'utils.misc.construct_print', 'construct_print', (['f"""{unlabeled_root} is an image folder, we will conduct SS (also SSMT) on it."""'], {}), "(\n f'{unlabeled_root} is an image folder, we will conduct SS (also SSMT) on it.'\n )\n", (20454, 20543), False, 'from utils.misc import construct_print\n'), ((20638, 20668), 'os.path.isfile', 'os.path.isfile', (['unlabeled_root'], {}), '(unlabeled_root)\n', (20652, 20668), False, 'import os\n'), ((21551, 21571), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (21561, 21571), False, 'from PIL import Image\n'), ((21592, 21614), 'PIL.Image.open', 'Image.open', (['depth_path'], {}), '(depth_path)\n', (21602, 21614), False, 'from PIL import Image\n'), ((21634, 21655), 'PIL.Image.open', 'Image.open', (['mask_path'], {}), '(mask_path)\n', (21644, 21655), False, 'from PIL import Image\n'), ((23164, 23194), 'PIL.Image.open', 'Image.open', (['unlabeled_img_path'], {}), '(unlabeled_img_path)\n', (23174, 23194), False, 'from PIL import Image\n'), ((23225, 23257), 'PIL.Image.open', 'Image.open', (['unlabeled_depth_path'], {}), '(unlabeled_depth_path)\n', (23235, 23257), False, 'from PIL import Image\n'), ((3951, 4064), 'utils.misc.construct_print', 'construct_print', (['f"""{root} is a list of images, we will use these paths to read the corresponding image"""'], {}), "(\n f'{root} is a list of images, we will use these paths to read the corresponding image'\n )\n", (3966, 4064), False, 'from utils.misc import construct_print\n'), ((4341, 4378), 'torchvision.transforms.Resize', 'transforms.Resize', (['(in_size, in_size)'], {}), '((in_size, in_size))\n', (4358, 4378), False, 'from torchvision import transforms\n'), ((4392, 4413), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4411, 4413), False, 'from torchvision import transforms\n'), ((4427, 4493), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (4447, 4493), False, 'from torchvision import transforms\n'), ((4575, 4612), 'torchvision.transforms.Resize', 'transforms.Resize', (['(in_size, in_size)'], {}), '((in_size, in_size))\n', (4592, 4612), False, 'from torchvision import transforms\n'), ((4626, 4647), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4645, 4647), False, 'from torchvision import transforms\n'), ((4775, 4795), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (4785, 4795), False, 'from PIL import Image\n'), ((4827, 4849), 'PIL.Image.open', 'Image.open', (['depth_path'], {}), '(depth_path)\n', (4837, 4849), False, 'from PIL import Image\n'), ((5990, 6027), 'torchvision.transforms.Resize', 'transforms.Resize', (['(in_size, in_size)'], {}), '((in_size, in_size))\n', (6007, 6027), False, 'from torchvision import transforms\n'), ((6041, 6062), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6060, 6062), False, 'from torchvision import transforms\n'), ((6076, 6142), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (6096, 6142), False, 'from torchvision import transforms\n'), ((6224, 6261), 'torchvision.transforms.Resize', 'transforms.Resize', (['(in_size, in_size)'], {}), '((in_size, in_size))\n', (6241, 6261), False, 'from torchvision import transforms\n'), ((6275, 6296), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6294, 6296), False, 'from torchvision import transforms\n'), ((6425, 6445), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (6435, 6445), False, 'from PIL import Image\n'), ((6477, 6499), 'PIL.Image.open', 'Image.open', (['depth_path'], {}), '(depth_path)\n', (6487, 6499), False, 'from PIL import Image\n'), ((7484, 7521), 'torchvision.transforms.Resize', 'transforms.Resize', (['(in_size, in_size)'], {}), '((in_size, in_size))\n', (7501, 7521), False, 'from torchvision import transforms\n'), ((7535, 7556), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (7554, 7556), False, 'from torchvision import transforms\n'), ((7570, 7636), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (7590, 7636), False, 'from torchvision import transforms\n'), ((7718, 7755), 'torchvision.transforms.Resize', 'transforms.Resize', (['(in_size, in_size)'], {}), '((in_size, in_size))\n', (7735, 7755), False, 'from torchvision import transforms\n'), ((7769, 7790), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (7788, 7790), False, 'from torchvision import transforms\n'), ((7906, 7926), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (7916, 7926), False, 'from PIL import Image\n'), ((7958, 7980), 'PIL.Image.open', 'Image.open', (['depth_path'], {}), '(depth_path)\n', (7968, 7980), False, 'from PIL import Image\n'), ((9015, 9052), 'torchvision.transforms.Resize', 'transforms.Resize', (['(in_size, in_size)'], {}), '((in_size, in_size))\n', (9032, 9052), False, 'from torchvision import transforms\n'), ((9066, 9087), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (9085, 9087), False, 'from torchvision import transforms\n'), ((9101, 9167), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (9121, 9167), False, 'from torchvision import transforms\n'), ((9249, 9286), 'torchvision.transforms.Resize', 'transforms.Resize', (['(in_size, in_size)'], {}), '((in_size, in_size))\n', (9266, 9286), False, 'from torchvision import transforms\n'), ((9300, 9321), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (9319, 9321), False, 'from torchvision import transforms\n'), ((9557, 9577), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (9567, 9577), False, 'from PIL import Image\n'), ((9609, 9631), 'PIL.Image.open', 'Image.open', (['depth_path'], {}), '(depth_path)\n', (9619, 9631), False, 'from PIL import Image\n'), ((10256, 10282), 'torch.tensor', 'torch.tensor', (['rotate_index'], {}), '(rotate_index)\n', (10268, 10282), False, 'import torch\n'), ((11019, 11056), 'torchvision.transforms.Resize', 'transforms.Resize', (['(in_size, in_size)'], {}), '((in_size, in_size))\n', (11036, 11056), False, 'from torchvision import transforms\n'), ((11070, 11091), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (11089, 11091), False, 'from torchvision import transforms\n'), ((11105, 11171), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (11125, 11171), False, 'from torchvision import transforms\n'), ((11253, 11290), 'torchvision.transforms.Resize', 'transforms.Resize', (['(in_size, in_size)'], {}), '((in_size, in_size))\n', (11270, 11290), False, 'from torchvision import transforms\n'), ((11304, 11325), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (11323, 11325), False, 'from torchvision import transforms\n'), ((11606, 11626), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (11616, 11626), False, 'from PIL import Image\n'), ((11658, 11680), 'PIL.Image.open', 'Image.open', (['depth_path'], {}), '(depth_path)\n', (11668, 11680), False, 'from PIL import Image\n'), ((12297, 12323), 'torch.tensor', 'torch.tensor', (['rotate_index'], {}), '(rotate_index)\n', (12309, 12323), False, 'import torch\n'), ((13347, 13460), 'utils.misc.construct_print', 'construct_print', (['f"""{root} is a list of images, we will use these paths to read the corresponding image"""'], {}), "(\n f'{root} is a list of images, we will use these paths to read the corresponding image'\n )\n", (13362, 13460), False, 'from utils.misc import construct_print\n'), ((13670, 13690), 'utils.joint_transforms.JointResize', 'JointResize', (['in_size'], {}), '(in_size)\n', (13681, 13690), False, 'from utils.joint_transforms import Compose, JointResize, RandomHorizontallyFlip, RandomRotate\n'), ((13704, 13728), 'utils.joint_transforms.RandomHorizontallyFlip', 'RandomHorizontallyFlip', ([], {}), '()\n', (13726, 13728), False, 'from utils.joint_transforms import Compose, JointResize, RandomHorizontallyFlip, RandomRotate\n'), ((13742, 13758), 'utils.joint_transforms.RandomRotate', 'RandomRotate', (['(10)'], {}), '(10)\n', (13754, 13758), False, 'from utils.joint_transforms import Compose, JointResize, RandomHorizontallyFlip, RandomRotate\n'), ((13838, 13875), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', (['(0.1)', '(0.1)', '(0.1)'], {}), '(0.1, 0.1, 0.1)\n', (13860, 13875), False, 'from torchvision import transforms\n'), ((13889, 13910), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (13908, 13910), False, 'from torchvision import transforms\n'), ((13924, 13990), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (13944, 13990), False, 'from torchvision import transforms\n'), ((15752, 15865), 'utils.misc.construct_print', 'construct_print', (['f"""{root} is a list of images, we will use these paths to read the corresponding image"""'], {}), "(\n f'{root} is a list of images, we will use these paths to read the corresponding image'\n )\n", (15767, 15865), False, 'from utils.misc import construct_print\n'), ((16438, 16458), 'utils.joint_transforms.JointResize', 'JointResize', (['in_size'], {}), '(in_size)\n', (16449, 16458), False, 'from utils.joint_transforms import Compose, JointResize, RandomHorizontallyFlip, RandomRotate\n'), ((16472, 16496), 'utils.joint_transforms.RandomHorizontallyFlip', 'RandomHorizontallyFlip', ([], {}), '()\n', (16494, 16496), False, 'from utils.joint_transforms import Compose, JointResize, RandomHorizontallyFlip, RandomRotate\n'), ((16510, 16526), 'utils.joint_transforms.RandomRotate', 'RandomRotate', (['(10)'], {}), '(10)\n', (16522, 16526), False, 'from utils.joint_transforms import Compose, JointResize, RandomHorizontallyFlip, RandomRotate\n'), ((16606, 16643), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', (['(0.1)', '(0.1)', '(0.1)'], {}), '(0.1, 0.1, 0.1)\n', (16628, 16643), False, 'from torchvision import transforms\n'), ((16657, 16678), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (16676, 16678), False, 'from torchvision import transforms\n'), ((16692, 16758), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (16712, 16758), False, 'from torchvision import transforms\n'), ((19002, 19021), 'torch.tensor', 'torch.tensor', (['(255.0)'], {}), '(255.0)\n', (19014, 19021), False, 'import torch\n'), ((20122, 20235), 'utils.misc.construct_print', 'construct_print', (['f"""{root} is a list of images, we will use these paths to read the corresponding image"""'], {}), "(\n f'{root} is a list of images, we will use these paths to read the corresponding image'\n )\n", (20137, 20235), False, 'from utils.misc import construct_print\n'), ((20888, 20925), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', (['(0.1)', '(0.1)', '(0.1)'], {}), '(0.1, 0.1, 0.1)\n', (20910, 20925), False, 'from torchvision import transforms\n'), ((20939, 20960), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (20958, 20960), False, 'from torchvision import transforms\n'), ((20974, 21040), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (20994, 21040), False, 'from torchvision import transforms\n'), ((24093, 24112), 'torch.tensor', 'torch.tensor', (['(255.0)'], {}), '(255.0)\n', (24105, 24112), False, 'import torch\n'), ((408, 427), 'os.path.splitext', 'os.path.splitext', (['p'], {}), '(p)\n', (424, 427), False, 'import os\n'), ((3356, 3381), 'os.path.dirname', 'os.path.dirname', (['img_path'], {}), '(img_path)\n', (3371, 3381), False, 'import os\n'), ((3425, 3451), 'os.path.basename', 'os.path.basename', (['img_path'], {}), '(img_path)\n', (3441, 3451), False, 'import os\n'), ((3505, 3530), 'os.path.dirname', 'os.path.dirname', (['img_path'], {}), '(img_path)\n', (3520, 3530), False, 'import os\n'), ((3573, 3599), 'os.path.basename', 'os.path.basename', (['img_path'], {}), '(img_path)\n', (3589, 3599), False, 'import os\n'), ((4922, 4935), 'numpy.min', 'np.min', (['depth'], {}), '(depth)\n', (4928, 4935), True, 'import numpy as np\n'), ((8200, 8216), 'torch.min', 'torch.min', (['depth'], {}), '(depth)\n', (8209, 8216), False, 'import torch\n'), ((8259, 8278), 'torch.tensor', 'torch.tensor', (['(1e-06)'], {}), '(1e-06)\n', (8271, 8278), False, 'import torch\n'), ((9704, 9717), 'numpy.min', 'np.min', (['depth'], {}), '(depth)\n', (9710, 9717), True, 'import numpy as np\n'), ((11753, 11766), 'numpy.min', 'np.min', (['depth'], {}), '(depth)\n', (11759, 11766), True, 'import numpy as np\n'), ((12712, 12737), 'os.path.dirname', 'os.path.dirname', (['img_path'], {}), '(img_path)\n', (12727, 12737), False, 'import os\n'), ((12782, 12808), 'os.path.basename', 'os.path.basename', (['img_path'], {}), '(img_path)\n', (12798, 12808), False, 'import os\n'), ((12862, 12887), 'os.path.dirname', 'os.path.dirname', (['img_path'], {}), '(img_path)\n', (12877, 12887), False, 'import os\n'), ((12931, 12957), 'os.path.basename', 'os.path.basename', (['img_path'], {}), '(img_path)\n', (12947, 12957), False, 'import os\n'), ((19051, 19095), 'torch.zeros', 'torch.zeros', (['(1, self.in_size, self.in_size)'], {}), '((1, self.in_size, self.in_size))\n', (19062, 19095), False, 'import torch\n'), ((22778, 22804), 'torch.tensor', 'torch.tensor', (['rotate_index'], {}), '(rotate_index)\n', (22790, 22804), False, 'import torch\n'), ((24155, 24199), 'torch.zeros', 'torch.zeros', (['(1, self.in_size, self.in_size)'], {}), '((1, self.in_size, self.in_size))\n', (24166, 24199), False, 'import torch\n'), ((24413, 24439), 'torch.tensor', 'torch.tensor', (['rotate_index'], {}), '(rotate_index)\n', (24425, 24439), False, 'import torch\n'), ((4940, 4953), 'numpy.max', 'np.max', (['depth'], {}), '(depth)\n', (4946, 4953), True, 'import numpy as np\n'), ((4956, 4969), 'numpy.min', 'np.min', (['depth'], {}), '(depth)\n', (4962, 4969), True, 'import numpy as np\n'), ((8221, 8237), 'torch.max', 'torch.max', (['depth'], {}), '(depth)\n', (8230, 8237), False, 'import torch\n'), ((8240, 8256), 'torch.min', 'torch.min', (['depth'], {}), '(depth)\n', (8249, 8256), False, 'import torch\n'), ((9722, 9735), 'numpy.max', 'np.max', (['depth'], {}), '(depth)\n', (9728, 9735), True, 'import numpy as np\n'), ((9738, 9751), 'numpy.min', 'np.min', (['depth'], {}), '(depth)\n', (9744, 9751), True, 'import numpy as np\n'), ((11771, 11784), 'numpy.max', 'np.max', (['depth'], {}), '(depth)\n', (11777, 11784), True, 'import numpy as np\n'), ((11787, 11800), 'numpy.min', 'np.min', (['depth'], {}), '(depth)\n', (11793, 11800), True, 'import numpy as np\n'), ((18889, 18915), 'torch.min', 'torch.min', (['unlabeled_depth'], {}), '(unlabeled_depth)\n', (18898, 18915), False, 'import torch\n'), ((18978, 18997), 'torch.tensor', 'torch.tensor', (['(1e-06)'], {}), '(1e-06)\n', (18990, 18997), False, 'import torch\n'), ((23980, 24006), 'torch.min', 'torch.min', (['unlabeled_depth'], {}), '(unlabeled_depth)\n', (23989, 24006), False, 'import torch\n'), ((24069, 24088), 'torch.tensor', 'torch.tensor', (['(1e-06)'], {}), '(1e-06)\n', (24081, 24088), False, 'import torch\n'), ((18920, 18946), 'torch.max', 'torch.max', (['unlabeled_depth'], {}), '(unlabeled_depth)\n', (18929, 18946), False, 'import torch\n'), ((18949, 18975), 'torch.min', 'torch.min', (['unlabeled_depth'], {}), '(unlabeled_depth)\n', (18958, 18975), False, 'import torch\n'), ((24011, 24037), 'torch.max', 'torch.max', (['unlabeled_depth'], {}), '(unlabeled_depth)\n', (24020, 24037), False, 'import torch\n'), ((24040, 24066), 'torch.min', 'torch.min', (['unlabeled_depth'], {}), '(unlabeled_depth)\n', (24049, 24066), False, 'import torch\n')] |
#!/usr/bin/env python
import os
import argparse
import networkx as nx
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
#import pygraphviz
from networkx.drawing.nx_agraph import graphviz_layout
translation = {"NTR": "NTR+",
"LYS": "LYS+",
"ARG": "ARG+",
"GLU": "GLU-",
"HEM": "HEM+",
"HEC": "HEC+",
"ASP": "ASP-",
"CTR": "CTR-",
"HIS": "HIS+",
"TYR": "TYR-"}
def network(df, cutoff):
column_names = df.keys()
residues = list(column_names[2:])
row_names = df["Terms"]
i_start = np.where(row_names == "TOTAL")[0][0] + 1
influencers = list(row_names[i_start:])
df.set_index('Terms', inplace=True)
G = nx.DiGraph()
for influencer in influencers:
if influencer[:3] in translation:
influencer_name = translation[influencer[:3]] + influencer[3:]
else:
influencer_name = influencer
for residue in residues:
weight = abs(df.at[influencer, residue])
if weight > cutoff:
G.add_edge(influencer_name, residue, weight=weight)
plt.figure(figsize=(12,8))
pos = graphviz_layout(G)
nx.draw(G, pos=pos, with_labels=True)
plt.show()
return
if __name__ == "__main__":
# Get the command arguments
helpmsg = "Plot the residue pKa influence graph. A residue's pKa is influenced by other residues, especially the charged ones. This kind of influence is plotted by an weighted arrow. This program requires mfe_all.py output."
parser = argparse.ArgumentParser(description=helpmsg)
parser.add_argument("-c", metavar="pw_cutoff", default=0.1, help="Cutoff pairwise interaction, default is 0.1.", type=float)
parser.add_argument("mfe_all", metavar="mfe_all.xlsx", default="mfe_all.xlsx", help="mfe output file", nargs="?")
args = parser.parse_args()
pathname = args.mfe_all
filename, file_extension = os.path.splitext(pathname)
mfe_data = ""
if file_extension == ".csv":
mfe_data = pd.read_csv(pathname)
elif file_extension == ".xlsx":
mfe_data = pd.read_excel(pathname)
elif file_extension == ".htm" or file_extension == ".html":
mfe_data = pd.read_html(pathname)
else:
print("Can not interpret data file %s. Only xlsx, csv, and html files are acceppted." % pathname)
cutoff = args.c
network(mfe_data, cutoff)
| [
"pandas.read_html",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"pandas.read_csv",
"pandas.read_excel",
"matplotlib.pyplot.figure",
"numpy.where",
"networkx.draw",
"os.path.splitext",
"networkx.drawing.nx_agraph.graphviz_layout",
"networkx.DiGraph"
] | [((788, 800), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (798, 800), True, 'import networkx as nx\n'), ((1200, 1227), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (1210, 1227), True, 'import matplotlib.pyplot as plt\n'), ((1237, 1255), 'networkx.drawing.nx_agraph.graphviz_layout', 'graphviz_layout', (['G'], {}), '(G)\n', (1252, 1255), False, 'from networkx.drawing.nx_agraph import graphviz_layout\n'), ((1260, 1297), 'networkx.draw', 'nx.draw', (['G'], {'pos': 'pos', 'with_labels': '(True)'}), '(G, pos=pos, with_labels=True)\n', (1267, 1297), True, 'import networkx as nx\n'), ((1302, 1312), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1310, 1312), True, 'import matplotlib.pyplot as plt\n'), ((1628, 1672), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'helpmsg'}), '(description=helpmsg)\n', (1651, 1672), False, 'import argparse\n'), ((2011, 2037), 'os.path.splitext', 'os.path.splitext', (['pathname'], {}), '(pathname)\n', (2027, 2037), False, 'import os\n'), ((2109, 2130), 'pandas.read_csv', 'pd.read_csv', (['pathname'], {}), '(pathname)\n', (2120, 2130), True, 'import pandas as pd\n'), ((2186, 2209), 'pandas.read_excel', 'pd.read_excel', (['pathname'], {}), '(pathname)\n', (2199, 2209), True, 'import pandas as pd\n'), ((653, 683), 'numpy.where', 'np.where', (["(row_names == 'TOTAL')"], {}), "(row_names == 'TOTAL')\n", (661, 683), True, 'import numpy as np\n'), ((2293, 2315), 'pandas.read_html', 'pd.read_html', (['pathname'], {}), '(pathname)\n', (2305, 2315), True, 'import pandas as pd\n')] |
import os
import time
import random
import argparse
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
import esm
# For DDP
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from transformers import AdamW, get_linear_schedule_with_warmup
from sklearn.metrics import confusion_matrix
from dataset import PPI_Dataset
from model import BaseClsModel
parser = argparse.ArgumentParser(description='PPI pretrain model method')
parser.add_argument('--test_path', default='./data_ppi/LenA400_LenB400/test_data.csv', help='data path')
parser.add_argument('--resume', default="experiments/Cls_wograd/0.ckpt", help='path to load your model')
parser.add_argument('--outdir', default='experiments/Cls_wograd', help='folder to save output')
parser.add_argument('--batchsize', default=2,type=int, help='batchsize')
# for ddp
parser.add_argument("--local_rank", default=-1, type=int)
args = parser.parse_args()
local_rank = args.local_rank
# ddp init
torch.cuda.set_device(local_rank)
dist.init_process_group(backend='nccl')
def set_seed(seed_value=42):
"""
Set seed for reproducibility.
"""
random.seed(seed_value)
np.random.seed(seed_value)
torch.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value)
set_seed()
dataset = PPI_Dataset(args.test_path)
test_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
test_dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batchsize, \
num_workers=0, sampler=test_sampler)
# Model
model = BaseClsModel().to(local_rank)
ckpt_path = args.resume
# Pretrain_model
pretrain_model, alphabet = esm.pretrained.esm1b_t33_650M_UR50S()
batch_converter = alphabet.get_batch_converter()
pretrain_model = pretrain_model.to(local_rank)
for param in pretrain_model.parameters():
param.requires_grad = False
# load model
if dist.get_rank() == 0 and ckpt_path is not None:
model.load_state_dict(torch.load(ckpt_path))
model = DDP(model, device_ids=[local_rank], output_device=local_rank,find_unused_parameters=True)
# Specify loss function
loss_fn = nn.CrossEntropyLoss().to(local_rank)
def get_embeddings(pretrain_model, batch_tokens):
"""
Get avg pooling of the embedding of the input sequence data
:param batch_tokens: list[tokens]
:return: tensor: [n,1280]
"""
results = pretrain_model(batch_tokens, repr_layers=[33], return_contacts=True)
token_representations = results["representations"][33] # [num,maxlen,embed=1280]
pool_embedding = token_representations.mean(1)
return pool_embedding
def cal_confusion_matrix(y_true,y_pred):
TP, FP, TN, FN = 0, 0, 0, 0
for i in range(len(y_true)):
if y_true[i] == 1 and y_pred[i] == 1:
TP += 1
if y_true[i] == 0 and y_pred[i] == 1:
FP += 1
if y_true[i] == 0 and y_pred[i] == 0:
TN += 1
if y_true[i] == 1 and y_pred[i] == 0:
FN += 1
res = {'TP':TP/len(y_true),'FP':FP/len(y_true),'TN':TN/len(y_true),'FN':FN/len(y_true)}
return res
def evaluate(model, val_dataloader, loss_fn):
model.eval()
# Tracking variables
samples_num = 0
val_loss = []
y_true = []
y_pred = []
for idx, input_data in tqdm(enumerate(val_dataloader)):
prot_a = input_data['prot_a']
prot_b = input_data['prot_b']
labels = input_data['label'].to(local_rank)
data_1 = [(prot_a[i], input_data['seq_a'][i]) for i in range(len(prot_a))]
data_2 = [(prot_b[i], input_data['seq_b'][i]) for i in range(len(prot_b))]
_, _, batch_tokens1 = batch_converter(data_1)
_, _, batch_tokens2 = batch_converter(data_2)
batch_tokens1, batch_tokens2 = batch_tokens1.to(local_rank), batch_tokens2.to(local_rank)
with torch.no_grad():
u = get_embeddings(pretrain_model, batch_tokens1)
v = get_embeddings(pretrain_model, batch_tokens2)
features = torch.cat([u, v, torch.abs(u - v)], dim=1)
logits = model(features)
# Compute loss
loss = loss_fn(logits, labels)
val_loss.append(loss.item())
preds = torch.argmax(logits, dim=1).flatten()
samples_num += len(preds)
y_true.extend(list(labels.cpu().numpy()))
y_pred.extend(list(preds.cpu().numpy()))
res = cal_confusion_matrix(y_true,y_pred)
print(res)
evaluate(model,test_dataloader,loss_fn)
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.argmax",
"torch.no_grad",
"torch.utils.data.DataLoader",
"torch.distributed.get_rank",
"torch.nn.parallel.DistributedDataParallel",
"torch.load",
"torch.utils.data.distributed.DistributedSampler",
"random.seed",
"torch.cuda.set_device",
"m... | [((444, 508), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PPI pretrain model method"""'}), "(description='PPI pretrain model method')\n", (467, 508), False, 'import argparse\n'), ((1033, 1066), 'torch.cuda.set_device', 'torch.cuda.set_device', (['local_rank'], {}), '(local_rank)\n', (1054, 1066), False, 'import torch\n'), ((1068, 1107), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'backend': '"""nccl"""'}), "(backend='nccl')\n", (1091, 1107), True, 'import torch.distributed as dist\n'), ((1358, 1385), 'dataset.PPI_Dataset', 'PPI_Dataset', (['args.test_path'], {}), '(args.test_path)\n', (1369, 1385), False, 'from dataset import PPI_Dataset\n'), ((1402, 1458), 'torch.utils.data.distributed.DistributedSampler', 'torch.utils.data.distributed.DistributedSampler', (['dataset'], {}), '(dataset)\n', (1449, 1458), False, 'import torch\n'), ((1478, 1583), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'args.batchsize', 'num_workers': '(0)', 'sampler': 'test_sampler'}), '(dataset, batch_size=args.batchsize, num_workers\n =0, sampler=test_sampler)\n', (1505, 1583), False, 'import torch\n'), ((1749, 1786), 'esm.pretrained.esm1b_t33_650M_UR50S', 'esm.pretrained.esm1b_t33_650M_UR50S', ([], {}), '()\n', (1784, 1786), False, 'import esm\n'), ((2090, 2184), 'torch.nn.parallel.DistributedDataParallel', 'DDP', (['model'], {'device_ids': '[local_rank]', 'output_device': 'local_rank', 'find_unused_parameters': '(True)'}), '(model, device_ids=[local_rank], output_device=local_rank,\n find_unused_parameters=True)\n', (2093, 2184), True, 'from torch.nn.parallel import DistributedDataParallel as DDP\n'), ((1198, 1221), 'random.seed', 'random.seed', (['seed_value'], {}), '(seed_value)\n', (1209, 1221), False, 'import random\n'), ((1227, 1253), 'numpy.random.seed', 'np.random.seed', (['seed_value'], {}), '(seed_value)\n', (1241, 1253), True, 'import numpy as np\n'), ((1259, 1288), 'torch.manual_seed', 'torch.manual_seed', (['seed_value'], {}), '(seed_value)\n', (1276, 1288), False, 'import torch\n'), ((1294, 1332), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed_value'], {}), '(seed_value)\n', (1320, 1332), False, 'import torch\n'), ((1648, 1662), 'model.BaseClsModel', 'BaseClsModel', ([], {}), '()\n', (1660, 1662), False, 'from model import BaseClsModel\n'), ((1981, 1996), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1994, 1996), True, 'import torch.distributed as dist\n'), ((2056, 2077), 'torch.load', 'torch.load', (['ckpt_path'], {}), '(ckpt_path)\n', (2066, 2077), False, 'import torch\n'), ((2218, 2239), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2237, 2239), True, 'import torch.nn as nn\n'), ((3964, 3979), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3977, 3979), False, 'import torch\n'), ((4333, 4360), 'torch.argmax', 'torch.argmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (4345, 4360), False, 'import torch\n'), ((4148, 4164), 'torch.abs', 'torch.abs', (['(u - v)'], {}), '(u - v)\n', (4157, 4164), False, 'import torch\n')] |
#from asyncio.windows_events import NULL
from audioop import add
from re import X
from turtle import title
from urllib import response
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
import datetime
pio.templates.default = "simple_white"
def check_conditions(X: pd.DataFrame) -> X:
X = X.drop(X.index[X['floors'] <= 0])
X = X.drop(X.index[X['sqft_lot'] <= 0])
X = X.drop(X.index[X['bedrooms'] <= 0])
X = X.drop(X.index[X['sqft_living'] < 0])
return X
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
DF = pd.read_csv(filename)
DF.dropna(inplace=True)
DF = check_conditions(DF)
# DF = pd.get_dummies(DF, columns=['zipcode'])
responses = DF['price']
DF.drop(['id','date','price','lat','long','zipcode'], axis=1, inplace=True)
return (DF,responses)
def feature_evaluation(X: pd.DataFrame, y: pd.Series, output_path: str = ".") -> NoReturn:
"""
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Design matrix of regression problem
y : array-like of shape (n_samples, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
"""
for title in X.columns:
pears_corr = np.cov(X[title],y)[0][1]/(np.std(X[title])*np.std(y))
go.Figure([go.Scatter(x=X[title], y=y, mode='markers')],
layout=go.Layout(title= f"The Conecction {title} - House Prices, Pearson Correlation {pears_corr}",
xaxis_title=f"{title}",
yaxis_title="house prices")).write_image(output_path+ title +".png")
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of housing prices dataset
(df,y) = load_data('/home/ronyzerkavod/IML.HUJI/datasets/house_prices.csv')
# Question 2 - Feature evaluation with respect to response
feature_evaluation(df,y,'/home/ronyzerkavod/IML.HUJI/exercises/figures/')
# Question 3 - Split samples into training- and testing sets.
train_proportion = 0.75
train_x,train_y,test_x,test_y = split_train_test(df,y,train_proportion)
# Question 4 - Fit model over increasing percentages of the overall training data
# For every percentage p in 10%, 11%, ..., 100%, repeat the following 10 times:
# 1) Sample p% of the overall training data
# 2) Fit linear model (including intercept) over sampled set
# 3) Test fitted model over test set
# 4) Store average and variance of loss over test set
# Then plot average loss as function of training size with error ribbon of size (mean-2*std, mean+2*std)
ms = np.linspace(10, 100, 91).astype(int)
#train_x_size =len(train_x)
all_mean = []
all_std = []
for i in ms:
losses = []
curr_perc = i/100
for i in range(10):
new_train_x = train_x.sample(frac = curr_perc)
new_train_y = train_y.loc[new_train_x.index]
new_train_y= np.array(new_train_y)
new_train_x= np.array(new_train_x)
linear_reg = LinearRegression()
linear_reg._fit(new_train_x,new_train_y)
curr_loss = linear_reg._loss(test_x.to_numpy(),test_y.to_numpy())
losses.append(curr_loss)
losses = np.array(losses)
all_mean.append(losses.mean())
all_std.append(losses.std())
all_mean= np.array(all_mean)
all_std = np.array(all_std)
fig = go.Figure()
fig.add_trace(go.Scatter(x=ms, y=all_mean, mode="markers+lines", name="Mean Loss", line=dict(dash="dash"), marker=dict(color="green", opacity=.7)))
fig.add_trace(go.Scatter(x=ms, y=all_mean-2*all_std, fill=None, mode="lines", line=dict(color="lightgrey"), showlegend=False))
fig.add_trace(go.Scatter(x=ms, y=all_mean+2*all_std, fill='tonexty', mode="lines", line=dict(color="lightgrey"), showlegend=False))
fig.update_layout(title = "the Loss and variance of samples trained on",
xaxis_title="the percentages taken of the train_set",
yaxis_title="the MSE Losses")
fig.show()
| [
"plotly.graph_objects.Scatter",
"numpy.random.seed",
"pandas.read_csv",
"plotly.graph_objects.Figure",
"numpy.std",
"IMLearn.utils.split_train_test",
"numpy.array",
"numpy.linspace",
"plotly.graph_objects.Layout",
"re.X.drop",
"numpy.cov",
"IMLearn.learners.regressors.LinearRegression"
] | [((497, 530), 're.X.drop', 'X.drop', (["X.index[X['floors'] <= 0]"], {}), "(X.index[X['floors'] <= 0])\n", (503, 530), False, 'from re import X\n'), ((539, 574), 're.X.drop', 'X.drop', (["X.index[X['sqft_lot'] <= 0]"], {}), "(X.index[X['sqft_lot'] <= 0])\n", (545, 574), False, 'from re import X\n'), ((583, 618), 're.X.drop', 'X.drop', (["X.index[X['bedrooms'] <= 0]"], {}), "(X.index[X['bedrooms'] <= 0])\n", (589, 618), False, 'from re import X\n'), ((627, 664), 're.X.drop', 'X.drop', (["X.index[X['sqft_living'] < 0]"], {}), "(X.index[X['sqft_living'] < 0])\n", (633, 664), False, 'from re import X\n'), ((1008, 1029), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (1019, 1029), True, 'import pandas as pd\n'), ((2387, 2404), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2401, 2404), True, 'import numpy as np\n'), ((2825, 2866), 'IMLearn.utils.split_train_test', 'split_train_test', (['df', 'y', 'train_proportion'], {}), '(df, y, train_proportion)\n', (2841, 2866), False, 'from IMLearn.utils import split_train_test\n'), ((4115, 4133), 'numpy.array', 'np.array', (['all_mean'], {}), '(all_mean)\n', (4123, 4133), True, 'import numpy as np\n'), ((4148, 4165), 'numpy.array', 'np.array', (['all_std'], {}), '(all_std)\n', (4156, 4165), True, 'import numpy as np\n'), ((4176, 4187), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (4185, 4187), True, 'import plotly.graph_objects as go\n'), ((4008, 4024), 'numpy.array', 'np.array', (['losses'], {}), '(losses)\n', (4016, 4024), True, 'import numpy as np\n'), ((3374, 3398), 'numpy.linspace', 'np.linspace', (['(10)', '(100)', '(91)'], {}), '(10, 100, 91)\n', (3385, 3398), True, 'import numpy as np\n'), ((3710, 3731), 'numpy.array', 'np.array', (['new_train_y'], {}), '(new_train_y)\n', (3718, 3731), True, 'import numpy as np\n'), ((3757, 3778), 'numpy.array', 'np.array', (['new_train_x'], {}), '(new_train_x)\n', (3765, 3778), True, 'import numpy as np\n'), ((3804, 3822), 'IMLearn.learners.regressors.LinearRegression', 'LinearRegression', ([], {}), '()\n', (3820, 3822), False, 'from IMLearn.learners.regressors import LinearRegression\n'), ((2019, 2035), 'numpy.std', 'np.std', (['X[title]'], {}), '(X[title])\n', (2025, 2035), True, 'import numpy as np\n'), ((2036, 2045), 'numpy.std', 'np.std', (['y'], {}), '(y)\n', (2042, 2045), True, 'import numpy as np\n'), ((1993, 2012), 'numpy.cov', 'np.cov', (['X[title]', 'y'], {}), '(X[title], y)\n', (1999, 2012), True, 'import numpy as np\n'), ((2066, 2109), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'X[title]', 'y': 'y', 'mode': '"""markers"""'}), "(x=X[title], y=y, mode='markers')\n", (2076, 2109), True, 'import plotly.graph_objects as go\n'), ((2129, 2281), 'plotly.graph_objects.Layout', 'go.Layout', ([], {'title': 'f"""The Conecction {title} - House Prices, Pearson Correlation {pears_corr}"""', 'xaxis_title': 'f"""{title}"""', 'yaxis_title': '"""house prices"""'}), "(title=\n f'The Conecction {title} - House Prices, Pearson Correlation {pears_corr}',\n xaxis_title=f'{title}', yaxis_title='house prices')\n", (2138, 2281), True, 'import plotly.graph_objects as go\n')] |
import os
import argparse
import torch
from torchvision import transforms
from torch.utils.data import DataLoader
from dataset import EfficientdetDataset
from utils import Resizer, Normalizer, collater, iou, area
from utils import colors
import cv2
import shutil
from efficientdet.efficientdet import EfficientDet
from config import get_args_efficientdet
from tqdm import tqdm
import numpy as np
writePIC = False
calIOU = False
calPR = True
calAREA = None
C = np.array([0, 1, 2, 3, 3, 2, 3, 2, 3, 3, 4, 0, 2, 2, 2, 2, 0, 1, 0, 2, 3, 3, 2])
def test(opt):
opt.resume = True
test_set = EfficientdetDataset(opt.data_path, mode='validation', transform=transforms.Compose([Normalizer(), Resizer()]), imgORvdo=opt.imgORvdo)
opt.num_classes = test_set.num_classes
opt.vocab_size = test_set.vocab_size
opt.batch_size = 8
test_params = {"batch_size": opt.batch_size,
"shuffle": False,
"drop_last": False,
"collate_fn": collater,
"num_workers": opt.workers}
test_generator = DataLoader(test_set, **test_params)
print(opt.network+'_'+opt.imgORvdo)
model = EfficientDet(opt)
model.load_state_dict(torch.load(os.path.join(opt.saved_path, opt.network+'_'+opt.imgORvdo+'.pth')))
model.cuda()
# model.set_is_training(False)
model.eval()
if writePIC:
if os.path.isdir(opt.prediction_dir):
shutil.rmtree(opt.prediction_dir)
os.makedirs(opt.prediction_dir)
progress_bar = tqdm(test_generator)
progress_bar.set_description_str(' Evaluating')
IoU_scores = []
N_TP = 0
N_P = 0
N_GT = 0
N_TP_iou = 0
N_GT_ins = 0
N_P_ins = 0
N_TP_ins = 0
for i, data in enumerate(progress_bar):
scale = data['scale']
with torch.no_grad():
output_list = model([data['img'].cuda().float(), data['text'].cuda()])
# output_list = model(data['img'].cuda().float())
for j, output in enumerate(output_list):
imgPath = test_set.getImagePath(i*opt.batch_size+j)
scores, labels, instances, all_labels, boxes = output
# scores, labels, all_labels, boxes = output
# print(instances)
annot = data['annot'][j]
annot = annot[annot[:, 4]!=-1]
# print(labels, torch.argsort(-all_labels, dim=1))
top5_label = torch.argsort(-all_labels, dim=1)[:, :5]
cat = torch.cat([scores.view(-1, 1), top5_label.float(), boxes, instances], dim=1).cpu()
# cat = torch.cat([scores.view(-1, 1), top5_label.float(), boxes], dim=1).cpu()
cat = cat[cat[:, 0]>=opt.cls_threshold]
if calAREA is not None:
areas = area(cat[:, 6:10])
area_arg = np.argsort(-areas)
cat = cat[area_arg[:calAREA]]
# print(scores.size(), labels.size(), boxes.size(), annot.size())
if calIOU:
if boxes.shape[0] == 0:
if annot.size(0) == 0:
IoU_scores.append(1.0)
else:
IoU_scores.append(0.0)
continue
if annot.size(0) == 0:
IoU_scores.append(0.0)
else:
classes = set(annot[:, 4].tolist())
iou_score = []
for c in classes:
box = []
for item in cat:
if c in item[1:6]:
box.append(item[6:10])
if len(box) == 0:
iou_score.append(0.0)
continue
box = torch.stack(box, dim=0)
tgt = annot[annot[:, 4]==c][:, :4]
iou_s = iou(box, tgt)
iou_score.append(iou_s.cpu().numpy())
classes_pre = cat[:, 1:6].tolist()
for c in classes_pre:
if len(set(c) & set(classes)) == 0:
iou_score.append(0)
IoU_scores.append(sum(iou_score)/len(iou_score))
# print(IoU_scores)
if calPR:
N_P += cat.size(0)
N_GT += annot.size(0)
N_GT_ins += int((annot[:, 5] == 1).sum())
# if len(cat) == 1:
# N_P_ins += 1
# N_TP_ins += 1
# else:
N_P_ins += int((cat[:, 10] == 1).sum())
# print(cat[:, 10], annot[:, 5])
# print(N_GT_ins)
for pre in cat:
for gt in annot:
s = iou(pre[6:10].unsqueeze(0), gt[:4].unsqueeze(0))
if s > 0.5:
N_TP_iou += 1
if C[int(gt[4])] in C[list(map(int, pre[1:3]))]:
N_TP += 1
# if len(cat) != 1:
if gt[5] == pre[10] and gt[5] == 1:
N_TP_ins += 1
if writePIC:
annot_labels = annot[:, 4].clone()
annot_instance = annot[:, 5].clone()
annot /= scale[j]
boxes /= scale[j]
output_image = cv2.imread(os.path.join(opt.data_path, imgPath))
# print(annot, os.path.join(opt.data_path, imgPath))
for box_id in range(boxes.shape[0]):
pred_prob = float(scores[box_id])
if pred_prob < opt.cls_threshold:
break
# pred_label = int(top5_label[box_id][0])
pred_label = int(instances[box_id, 0])
xmin, ymin, xmax, ymax = boxes[box_id, :]
color = colors[pred_label]
cv2.rectangle(output_image, (xmin, ymin), (xmax, ymax), color, 2)
text_size = cv2.getTextSize('p: {}'.format(pred_label) + ' : %.2f' % pred_prob, cv2.FONT_HERSHEY_PLAIN, 3, 2)[0]
cv2.rectangle(output_image, (xmin, ymin), (xmin + text_size[0] + 3, ymin + text_size[1] + 4), color, 1)
cv2.putText(
output_image, 'p: {}'.format(pred_label) + ' : %.2f' % pred_prob,
(xmin, ymin + text_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1,
color, 2)
for box_id in range(annot.size(0)):
# true_label = int(annot_labels[box_id])
true_label = annot_instance[box_id]
xmin, ymin, xmax, ymax = annot[box_id, :4]
cv2.rectangle(output_image, (xmin, ymin), (xmax, ymax), (255, 0, 0), 2)
text_size = cv2.getTextSize('g: {}'.format(true_label), cv2.FONT_HERSHEY_PLAIN, 3, 2)[0]
cv2.rectangle(output_image, (xmin, ymax), (xmin + text_size[0] + 3, ymax - text_size[1] + 4), (255, 0, 0), 1)
cv2.putText(
output_image, 'g: {}'.format(true_label),
(xmin, ymax - text_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1,
(255, 0, 0), 2)
cv2.imwrite("{}/{}_prediction.jpg".format(opt.prediction_dir, imgPath.split('/')[-1][:-4]), output_image)
if calIOU:
print('IoU is '.format(sum(IoU_scores)/len(IoU_scores)))
if calPR:
print('*'*100)
print('bbox 识别率:')
print('N_P: {}\tN_GT: {}\tN_TP_iou: {}\tN_TP: {}'.format(N_P, N_GT, N_TP_iou, N_TP))
print('精确率: {}\t召回率: {}\t分类准确率: {}'.format(N_TP/N_P, N_TP/N_GT, N_TP/N_TP_iou))
print('*'*100)
print('instance 识别率:')
print('N_P_ins: {}\tN_GT_ins: {}\tN_TP_ins: {}'.format(N_P_ins, N_GT_ins, N_TP_ins))
print('精确率: {}\t召回率: {}'.format(N_TP_ins/N_P_ins, N_TP_ins/N_GT_ins))
print('*'*100)
if __name__ == "__main__":
opt = get_args_efficientdet()
test(opt)
# import json
# with open('data/label.json', 'r') as f:
# d = json.load(f)
# print(d['index2label'])
# C = [0, 1, 2, 3, 3, 2, 3, 2, 3, 3, 4, 0, 2, 2, 2, 2, 0, 1, 0, 2, 3, 3, 2]
# print(len(C))
| [
"config.get_args_efficientdet",
"utils.iou",
"tqdm.tqdm",
"os.makedirs",
"torch.utils.data.DataLoader",
"torch.stack",
"os.path.isdir",
"torch.argsort",
"utils.Resizer",
"numpy.argsort",
"utils.area",
"numpy.array",
"utils.Normalizer",
"cv2.rectangle",
"shutil.rmtree",
"torch.no_grad",... | [((462, 541), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 3, 2, 3, 2, 3, 3, 4, 0, 2, 2, 2, 2, 0, 1, 0, 2, 3, 3, 2]'], {}), '([0, 1, 2, 3, 3, 2, 3, 2, 3, 3, 4, 0, 2, 2, 2, 2, 0, 1, 0, 2, 3, 3, 2])\n', (470, 541), True, 'import numpy as np\n'), ((1072, 1107), 'torch.utils.data.DataLoader', 'DataLoader', (['test_set'], {}), '(test_set, **test_params)\n', (1082, 1107), False, 'from torch.utils.data import DataLoader\n'), ((1165, 1182), 'efficientdet.efficientdet.EfficientDet', 'EfficientDet', (['opt'], {}), '(opt)\n', (1177, 1182), False, 'from efficientdet.efficientdet import EfficientDet\n'), ((1531, 1551), 'tqdm.tqdm', 'tqdm', (['test_generator'], {}), '(test_generator)\n', (1535, 1551), False, 'from tqdm import tqdm\n'), ((8106, 8129), 'config.get_args_efficientdet', 'get_args_efficientdet', ([], {}), '()\n', (8127, 8129), False, 'from config import get_args_efficientdet\n'), ((1386, 1419), 'os.path.isdir', 'os.path.isdir', (['opt.prediction_dir'], {}), '(opt.prediction_dir)\n', (1399, 1419), False, 'import os\n'), ((1475, 1506), 'os.makedirs', 'os.makedirs', (['opt.prediction_dir'], {}), '(opt.prediction_dir)\n', (1486, 1506), False, 'import os\n'), ((1220, 1291), 'os.path.join', 'os.path.join', (['opt.saved_path', "(opt.network + '_' + opt.imgORvdo + '.pth')"], {}), "(opt.saved_path, opt.network + '_' + opt.imgORvdo + '.pth')\n", (1232, 1291), False, 'import os\n'), ((1433, 1466), 'shutil.rmtree', 'shutil.rmtree', (['opt.prediction_dir'], {}), '(opt.prediction_dir)\n', (1446, 1466), False, 'import shutil\n'), ((1816, 1831), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1829, 1831), False, 'import torch\n'), ((2422, 2455), 'torch.argsort', 'torch.argsort', (['(-all_labels)'], {'dim': '(1)'}), '(-all_labels, dim=1)\n', (2435, 2455), False, 'import torch\n'), ((2781, 2799), 'utils.area', 'area', (['cat[:, 6:10]'], {}), '(cat[:, 6:10])\n', (2785, 2799), False, 'from utils import Resizer, Normalizer, collater, iou, area\n'), ((2827, 2845), 'numpy.argsort', 'np.argsort', (['(-areas)'], {}), '(-areas)\n', (2837, 2845), True, 'import numpy as np\n'), ((679, 691), 'utils.Normalizer', 'Normalizer', ([], {}), '()\n', (689, 691), False, 'from utils import Resizer, Normalizer, collater, iou, area\n'), ((693, 702), 'utils.Resizer', 'Resizer', ([], {}), '()\n', (700, 702), False, 'from utils import Resizer, Normalizer, collater, iou, area\n'), ((5469, 5505), 'os.path.join', 'os.path.join', (['opt.data_path', 'imgPath'], {}), '(opt.data_path, imgPath)\n', (5481, 5505), False, 'import os\n'), ((6017, 6082), 'cv2.rectangle', 'cv2.rectangle', (['output_image', '(xmin, ymin)', '(xmax, ymax)', 'color', '(2)'], {}), '(output_image, (xmin, ymin), (xmax, ymax), color, 2)\n', (6030, 6082), False, 'import cv2\n'), ((6237, 6344), 'cv2.rectangle', 'cv2.rectangle', (['output_image', '(xmin, ymin)', '(xmin + text_size[0] + 3, ymin + text_size[1] + 4)', 'color', '(1)'], {}), '(output_image, (xmin, ymin), (xmin + text_size[0] + 3, ymin +\n text_size[1] + 4), color, 1)\n', (6250, 6344), False, 'import cv2\n'), ((6834, 6905), 'cv2.rectangle', 'cv2.rectangle', (['output_image', '(xmin, ymin)', '(xmax, ymax)', '(255, 0, 0)', '(2)'], {}), '(output_image, (xmin, ymin), (xmax, ymax), (255, 0, 0), 2)\n', (6847, 6905), False, 'import cv2\n'), ((7035, 7148), 'cv2.rectangle', 'cv2.rectangle', (['output_image', '(xmin, ymax)', '(xmin + text_size[0] + 3, ymax - text_size[1] + 4)', '(255, 0, 0)', '(1)'], {}), '(output_image, (xmin, ymax), (xmin + text_size[0] + 3, ymax -\n text_size[1] + 4), (255, 0, 0), 1)\n', (7048, 7148), False, 'import cv2\n'), ((3794, 3817), 'torch.stack', 'torch.stack', (['box'], {'dim': '(0)'}), '(box, dim=0)\n', (3805, 3817), False, 'import torch\n'), ((3909, 3922), 'utils.iou', 'iou', (['box', 'tgt'], {}), '(box, tgt)\n', (3912, 3922), False, 'from utils import Resizer, Normalizer, collater, iou, area\n')] |
"""
Functions to generate learning curves.
Records performance (error or score) vs training set size.
TODO: move utils.calc_scores to a more local function.
"""
import os
import sys
from pathlib import Path
from collections import OrderedDict
import sklearn
import numpy as np
import pandas as pd
import matplotlib
# matplotlib.use('TkAgg')
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_validate
from sklearn.model_selection import train_test_split
from sklearn.model_selection import ShuffleSplit, KFold
from sklearn.model_selection import GroupShuffleSplit, GroupKFold
from sklearn.model_selection import StratifiedShuffleSplit, StratifiedKFold
from pandas.api.types import is_string_dtype
from sklearn.preprocessing import LabelEncoder
from sklearn.externals import joblib
import keras
from keras.models import load_model
from keras.callbacks import ModelCheckpoint, CSVLogger, ReduceLROnPlateau, EarlyStopping, TensorBoard
from keras.utils import plot_model
from scipy import optimize
# Utils
import ml_models
class LearningCurve():
"""
Train estimator using multiple train set sizes and generate learning curves for multiple metrics.
The CV splitter splits the input dataset into cv_folds data subsets.
Examples:
cv = sklearn.model_selection.KFold(n_splits=5, shuffle=False, random_state=0)
lrn_curve.my_learning_curve(X=xdata, Y=ydata, mltype='reg', cv=cv, n_shards=5)
"""
def __init__(self,
X, Y,
cv=5,
cv_lists=None,
shard_frac=[],
n_shards: int=5,
shard_step_scale: str='log2',
args=None,
logger=None,
outdir='./'):
"""
Args:
X : array-like (pd.DataFrame or np.ndarray)
Y : array-like (pd.DataFrame or np.ndarray)
cv : (optional) number of cv folds (int) or sklearn cv splitter --> scikit-learn.org/stable/glossary.html#term-cv-splitter
cv_lists : tuple of 2 dicts, cv_lists[0] and cv_lists[1], that contain the tr and vl folds, respectively
shard_frac : list of relative numbers of training samples that are used to generate learning curves
e.g., shard_frac=[0.1, 0.2, 0.4, 0.7, 1.0].
If this arg is not provided, then the training shards are generated from n_shards and shard_step_scale.
n_shards : number of dataset splits in the learning curve (used if shard_frac is None)
shard_step_scale : if n_shards is provided, this will generate a list of training set sizes with steps
specified by this arg. Available values: 'linear', 'log2', 'log10', 'log'.
e.g., if n_shards=5 and shard_step_scale='linear', then it generates ...
args : command line args
"""
self.X = pd.DataFrame(X).values
self.Y = pd.DataFrame(Y).values
self.cv = cv
self.cv_lists = cv_lists
self.n_shards = n_shards
self.shard_step_scale = shard_step_scale
self.shard_frac = shard_frac
self.args = args
self.logger = logger
self.outdir = Path(outdir)
self.create_fold_dcts()
self.create_tr_shards_list()
def create_fold_dcts(self):
""" Returns a tuple of two dicts (tr_dct, vl_dct) that contain the splits of all the folds. """
tr_dct = {}
vl_dct = {}
# Use lists passed as input arg
if self.cv_lists is not None:
tr_id = self.cv_lists[0]
vl_id = self.cv_lists[1]
assert tr_id.shape[1] == vl_id.shape[1], 'tr and vl must have the same number of folds.'
self.cv_folds = tr_id.shape[1]
# Calc the split ratio if cv=1
if self.cv_folds == 1:
self.vl_size = vl_id.shape[0]/(vl_id.shape[0] + tr_id.shape[0])
for fold in range(tr_id.shape[1]):
tr_dct[fold] = tr_id.iloc[:, fold].dropna().values.astype(int).tolist()
vl_dct[fold] = vl_id.iloc[:, fold].dropna().values.astype(int).tolist()
# Generate folds on the fly if no pre-defined folds were passed
else:
if isinstance(self.cv, int):
self.cv_folds = self.cv
self.cv = KFold(n_splits=self.cv_folds, shuffle=False, random_state=self.random_state)
else:
# cv is sklearn splitter
self.cv_folds = cv.get_n_splits()
if cv_folds == 1:
self.vl_size = cv.test_size
# Create sklearn splitter
if self.mltype == 'cls':
if self.Y.ndim > 1 and self.Y.shape[1] > 1:
splitter = self.cv.split(self.X, np.argmax(self.Y, axis=1))
else:
splitter = self.cv.split(self.X, self.Y)
# Generate the splits
for fold, (tr_vec, vl_vec) in enumerate(splitter):
tr_dct[fold] = tr_vec
vl_dct[fold] = vl_vec
self.tr_dct = tr_dct
self.vl_dct = vl_dct
def create_tr_shards_list(self):
""" Generate the list of training shard sizes. """
if len(self.shard_frac)==0:
# if any( [self.shard_step_scale.lower()==s for s in ['lin', 'linear']] ):
scale = self.shard_step_scale.lower()
if scale == 'linear':
self.shard_frac = np.linspace(0.1, 1.0, self.n_shards)
else:
if scale == 'log2':
base = 2
elif scale == 'log10':
base = 10
# In np.logspace the sequence starts at base ** start
# self.shard_frac = np.logspace(start=0.0, stop=1.0, num=self.n_shards, endpoint=True, base=base)/base
# shard_frac_small = list(np.logspace(start=0.0, stop=1.0, num=2*self.n_shards, endpoint=True, base=base)/(self.X.shape[0]/10))
shard_frac_low_range = list(np.linspace(start=10, stop=int(0.1*self.X.shape[0]), num=2*self.n_shards, endpoint=False)/self.X.shape[0])
shard_frac = list(np.logspace(start=0.0, stop=1.0, num=self.n_shards, endpoint=True, base=base)/base)
shard_frac.extend(shard_frac_low_range)
self.shard_frac = np.array( sorted(list(set(shard_frac))) )
if self.logger: self.logger.info(f'Shard step spacing: {self.shard_step_scale}.')
if self.cv_folds == 1:
self.tr_shards = [int(n) for n in (1-self.vl_size) * self.X.shape[0] * self.shard_frac if n>0]
else:
self.tr_shards = [int(n) for n in (self.cv_folds-1)/self.cv_folds * self.X.shape[0] * self.shard_frac if n>0]
# --------------------------------------------
# New (fixed) spacing
if self.cv_folds == 1:
self.max_samples = int((1-self.vl_size) * self.X.shape[0])
else:
self.max_samples = int((self.cv_folds-1)/self.cv_folds * self.X.shape[0])
# TODO need to add self.max_samples to the training vector
v = 2 ** np.array(np.arange(30))[1:]
idx = np.argmin( np.abs( v - self.max_samples ) )
if v[idx] > self.max_samples: idx -= 1
v = list(v[:idx+1])
v.append(self.max_samples)
self.tr_shards = v
# --------------------------------------------
# --------------------------------------------
# Temporary to finish the run
# self.tr_shards = [131072, 228003]
# self.tr_shards = [65536, 131072, 228003]
# --------------------------------------------
if self.logger is not None: self.logger.info('Train shards: {}\n'.format(self.tr_shards))
def trn_learning_curve(self,
framework: str='lightgbm',
mltype: str='reg',
model_name: str='lgb_reg', # TODO! this is redundent
init_kwargs: dict={},
fit_kwargs: dict={},
clr_keras_kwargs: dict={},
metrics: list=['r2', 'neg_mean_absolute_error', 'neg_median_absolute_error', 'neg_mean_squared_error'],
n_jobs: int=4,
random_state: int=None,
plot=True):
"""
Args:
framework : ml framework (keras, lightgbm, or sklearn)
mltype : type to ml problem (reg or cls)
init_kwargs : dict of parameters that initialize the estimator
fit_kwargs : dict of parameters to the estimator's fit() method
clr_keras_kwargs :
metrics : allow to pass a string of metrics TODO!
"""
self.framework = framework
self.mltype = mltype
self.model_name = model_name
self.init_kwargs = init_kwargs
self.fit_kwargs = fit_kwargs
self.clr_keras_kwargs = clr_keras_kwargs
self.metrics = metrics
self.n_jobs = n_jobs
self.random_state = random_state
# Start nested loop of train size and cv folds
tr_scores_all = [] # list of dicts
vl_scores_all = [] # list of dicts
# CV loop
for fold, (tr_k, vl_k) in enumerate(zip( self.tr_dct.keys(), self.vl_dct.keys() )):
if self.logger is not None: self.logger.info(f'Fold {fold+1}/{self.cv_folds}')
tr_id = self.tr_dct[tr_k]
vl_id = self.vl_dct[vl_k]
# Samples from this dataset are randomly sampled for training
xtr = self.X[tr_id, :]
ytr = self.Y[tr_id, :]
# A fixed set of validation samples for the current CV split
xvl = self.X[vl_id, :]
yvl = np.squeeze(self.Y[vl_id, :])
# Shards loop (iterate across the dataset sizes and train)
# np.random.seed(random_state)
# idx = np.random.permutation(len(xtr))
idx = np.arange(len(xtr))
for i, tr_sz in enumerate(self.tr_shards):
# For each shard: train model, save best model, calc tr_scores, calc_vl_scores
if self.logger: self.logger.info(f'\tTrain size: {tr_sz} ({i+1}/{len(self.tr_shards)})')
# Sequentially get a subset of samples (the input dataset X must be shuffled)
xtr_sub = xtr[idx[:tr_sz], :]
ytr_sub = np.squeeze(ytr[idx[:tr_sz], :])
# Get the estimator
estimator = ml_models.get_model(self.model_name, init_kwargs=self.init_kwargs)
model = estimator.model
# Train
# self.val_split = 0 # 0.1 # used for early stopping
self.eval_frac = 0.1 # 0.1 # used for early stopping
eval_samples = int(self.eval_frac*xvl.shape[0])
eval_set = (xvl[:eval_samples, :], yvl[:eval_samples])
if self.framework=='lightgbm':
model, trn_outdir = self.trn_lgbm_model(model=model, xtr_sub=xtr_sub, ytr_sub=ytr_sub, fold=fold, tr_sz=tr_sz, eval_set=eval_set)
elif self.framework=='keras':
model, trn_outdir = self.trn_keras_model(model=model, xtr_sub=xtr_sub, ytr_sub=ytr_sub, fold=fold, tr_sz=tr_sz, eval_set=eval_set)
elif self.framework=='pytorch':
pass
else:
raise ValueError(f'framework {self.framework} is not supported.')
# Calc preds and scores TODO: dump preds
# ... training set
y_pred, y_true = calc_preds(model, x=xtr_sub, y=ytr_sub, mltype=self.mltype)
tr_scores = calc_scores(y_true=y_true, y_pred=y_pred, mltype=self.mltype, metrics=None)
# ... val set
y_pred, y_true = calc_preds(model, x=xvl, y=yvl, mltype=self.mltype)
vl_scores = calc_scores(y_true=y_true, y_pred=y_pred, mltype=self.mltype, metrics=None)
del estimator, model
# nm = ((y_true - y_pred) ** 2).sum(axis=0, dtype=np.float64)
# dn = ((y_true - np.average(y_true, axis=0)) ** 2).sum(axis=0, dtype=np.float64)
# Add metadata
tr_scores['tr_set'] = True
tr_scores['fold'] = 'fold'+str(fold)
tr_scores['tr_size'] = tr_sz
vl_scores['tr_set'] = False
vl_scores['fold'] = 'fold'+str(fold)
vl_scores['tr_size'] = tr_sz
# Append scores (dicts)
tr_scores_all.append(tr_scores)
vl_scores_all.append(vl_scores)
# Dump intermediate scores
# TODO
pass
scores_tmp = pd.concat([scores_to_df(tr_scores_all), scores_to_df(vl_scores_all)], axis=0)
scores_tmp.to_csv( trn_outdir / ('tmp_scores.csv'), index=False )
del trn_outdir, tmp_scores
# Dump intermediate results (this is useful if the run terminates before run ends)
# tr_df_tmp = scores_to_df(tr_scores_all)
# vl_df_tmp = scores_to_df(vl_scores_all)
scores_all_df_tmp = pd.concat([scores_to_df(tr_scores_all), scores_to_df(vl_scores_all)], axis=0)
scores_all_df_tmp.to_csv( self.outdir / ('_lrn_crv_scores_cv' + str(fold+1) + '.csv'), index=False )
# Scores to df
tr_scores_df = scores_to_df( tr_scores_all )
vl_scores_df = scores_to_df( vl_scores_all )
scores_df = pd.concat([tr_scores_df, vl_scores_df], axis=0)
# Dump final results
tr_scores_df.to_csv( self.outdir/'tr_lrn_crv_scores.csv', index=False)
vl_scores_df.to_csv( self.outdir/'vl_lrn_crv_scores.csv', index=False)
scores_df.to_csv( self.outdir/'lrn_crv_scores.csv', index=False)
# Plot learning curves
if plot:
plot_lrn_crv_all_metrics( scores_df, outdir=self.outdir )
return scores_df
def trn_keras_model(self, model, xtr_sub, ytr_sub, fold, tr_sz, eval_set=None):
""" ... """
keras.utils.plot_model(model, to_file=self.outdir/'nn_model.png')
# Create output dir
trn_outdir = self.outdir / ('cv'+str(fold+1) + '_sz'+str(tr_sz))
os.makedirs(trn_outdir, exist_ok=False)
# Keras callbacks
keras_callbacks = define_keras_callbacks(trn_outdir)
# if bool(self.clr_keras_kwargs):
if self.clr_keras_kwargs['mode'] is not None:
keras_callbacks.append( ml_models.clr_keras_callback(**self.clr_keras_kwargs) )
# Fit params
fit_kwargs = self.fit_kwargs
fit_kwargs['validation_data'] = eval_set
# fit_kwargs['validation_split'] = self.val_split
fit_kwargs['callbacks'] = keras_callbacks
# Train model
history = model.fit(xtr_sub, ytr_sub, **fit_kwargs)
ml_models.save_krs_history(history, outdir=trn_outdir)
ml_models.plot_prfrm_metrics(history, title=f'Train size: {tr_sz}', skp_ep=20, add_lr=True, outdir=trn_outdir)
# Load the best model (https://github.com/keras-team/keras/issues/5916)
# model = keras.models.load_model(str(trn_outdir/'model_best.h5'), custom_objects={'r2_krs': ml_models.r2_krs})
model = keras.models.load_model( str(trn_outdir/'model_best.h5') )
return model, trn_outdir
def trn_lgbm_model(self, model, xtr_sub, ytr_sub, fold, tr_sz, eval_set=None):
""" Train and save LigthGBM model. """
# Create output dir
trn_outdir = self.outdir / ('cv'+str(fold+1) + '_sz'+str(tr_sz))
# os.makedirs(trn_outdir, exist_ok=False)
os.makedirs(trn_outdir, exist_ok=True)
# Get a subset of samples for validation for early stopping
fit_kwargs = self.fit_kwargs
# xtr_sub, xvl_sub, ytr_sub, yvl_sub = train_test_split(xtr_sub, ytr_sub, test_size=self.val_split)
# if xvl_sub_.shape[0] > 0:
# fit_kwargs['eval_set'] = (xvl_sub, yvl_sub)
# fit_kwargs['early_stopping_rounds'] = 10
fit_kwargs['eval_set'] = eval_set
fit_kwargs['early_stopping_rounds'] = 10
# Train and save model
model.fit(xtr_sub, ytr_sub, **fit_kwargs)
joblib.dump(model, filename = trn_outdir / ('model.'+self.model_name+'.pkl') )
return model, trn_outdir
def define_keras_callbacks(outdir):
checkpointer = ModelCheckpoint(str(outdir/'model_best.h5'), verbose=0, save_weights_only=False, save_best_only=True)
csv_logger = CSVLogger(outdir/'training.log')
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.75, patience=20, verbose=1, mode='auto',
min_delta=0.0001, cooldown=3, min_lr=0.000000001)
# early_stop = EarlyStopping(monitor='val_loss', patience=100, verbose=1)
early_stop = EarlyStopping(monitor='val_loss', patience=60, verbose=1)
return [checkpointer, csv_logger, early_stop, reduce_lr]
def plot_lrn_crv_all_metrics(df, outdir:Path, figsize=(7,5), xtick_scale='linear', ytick_scale='linear'):
""" Takes the entire table of scores across folds and train set sizes, and generates plots of
learning curves for the different metrics.
Args:
df : contains train and val scores for cv folds (the scores are the last cv_folds cols)
metric | tr_set | tr_size | fold0 | fold1 | fold2 | fold3 | fold4
------------------------------------------------------------------------------
r2 | True | 200 | 0.95 | 0.98 | 0.97 | 0.91 | 0.92
r2 | False | 200 | 0.21 | 0.27 | 0.22 | 0.25 | 0.24
mae | True | 200 | 0.11 | 0.12 | 0.15 | 0.10 | 0.18
mae | False | 200 | 0.34 | 0.37 | 0.35 | 0.33 | 0.30
r2 | True | 600 | 0.75 | 0.78 | 0.77 | 0.71 | 0.72
r2 | False | 600 | 0.41 | 0.47 | 0.42 | 0.45 | 0.44
mae | True | 600 | 0.21 | 0.22 | 0.25 | 0.20 | 0.28
mae | False | 600 | 0.34 | 0.37 | 0.35 | 0.33 | 0.30
... | ..... | ... | .... | .... | .... | .... | ....
cv_folds : number of cv folds
outdir : dir to save plots
"""
tr_shards = sorted(df['tr_size'].unique())
# figs = []
for metric_name in df['metric'].unique():
aa = df[df['metric']==metric_name].reset_index(drop=True)
aa.sort_values('tr_size', inplace=True)
tr = aa[aa['tr_set']==True]
vl = aa[aa['tr_set']==False]
# tr = aa[aa['phase']=='train']
# vl = aa[aa['phase']=='val']
tr = tr[[c for c in tr.columns if 'fold' in c]]
vl = vl[[c for c in vl.columns if 'fold' in c]]
# tr = tr.iloc[:, -cv_folds:]
# vl = vl.iloc[:, -cv_folds:]
rslt = []
rslt.append(tr_shards)
rslt.append(tr.values if tr.values.shape[0]>0 else None)
rslt.append(vl.values if vl.values.shape[0]>0 else None)
fname = 'lrn_crv_' + metric_name + '.png'
title = 'Learning curve'
path = outdir / fname
fig = plot_lrn_crv(rslt=rslt, metric_name=metric_name, figsize=figsize,
xtick_scale=xtick_scale, ytick_scale=ytick_scale, title=title, path=path)
# figs.append(fig)
def scale_ticks_params(tick_scale='linear'):
""" Helper function for learning cureve plots.
Args:
tick_scale : available values are [linear, log2, log10]
"""
if tick_scale == 'linear':
base = None
label_scale = 'Linear scale'
else:
if tick_scale == 'log2':
base = 2
label_scale = 'Log2 scale'
elif tick_scale == 'log10':
base = 10
label_scale = 'Log10 scale'
else:
raise ValueError('The specified tick scale is not supported.')
return base, label_scale
def plot_lrn_crv(rslt:list, metric_name:str='score',
xtick_scale:str='log2', ytick_scale:str='log2',
xlim:list=None, ylim:list=None, title:str=None, path:Path=None,
figsize=(7,5), ax=None):
"""
Args:
rslt : output from sklearn.model_selection.learning_curve()
rslt[0] : 1-D array (n_ticks, ) -> vector of train set sizes
rslt[1] : 2-D array (n_ticks, n_cv_folds) -> tr scores
rslt[2] : 2-D array (n_ticks, n_cv_folds) -> vl scores
"""
tr_shards = rslt[0]
tr_scores = rslt[1]
vl_scores = rslt[2]
def plot_single_crv(tr_shards, scores, ax, phase, color=None):
scores_mean = np.mean(scores, axis=1)
scores_std = np.std( scores, axis=1)
ax.plot(tr_shards, scores_mean, '.-', color=color, label=f'{phase} score')
ax.fill_between(tr_shards, scores_mean - scores_std, scores_mean + scores_std, alpha=0.1, color=color)
# Plot learning curves
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
if tr_scores is not None:
plot_single_crv(tr_shards, scores=tr_scores, ax=ax, color='b', phase='Train')
if vl_scores is not None:
plot_single_crv(tr_shards, scores=vl_scores, ax=ax, color='g', phase='Val')
# Set axes scale and labels
basex, xlabel_scale = scale_ticks_params(tick_scale=xtick_scale)
basey, ylabel_scale = scale_ticks_params(tick_scale=ytick_scale)
ax.set_xlabel(f'Train Dataset Size ({xlabel_scale})')
if 'log' in xlabel_scale.lower(): ax.set_xscale('log', basex=basex)
ylbl = ' '.join(s.capitalize() for s in metric_name.split('_'))
ax.set_ylabel(f'{ylbl} ({ylabel_scale})')
if 'log' in ylabel_scale.lower(): ax.set_yscale('log', basey=basey)
# Other settings
if ylim is not None: ax.set_ylim(ylim)
if xlim is not None: ax.set_ylim(xlim)
if title is None: title='Learning curve'
ax.set_title(title)
ax.legend(loc='best', frameon=True)
ax.grid(True)
plt.tight_layout()
# Save fig
if path is not None: plt.savefig(path, bbox_inches='tight')
return ax
def power_law_func(x, alpha, beta, gamma):
""" docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.power.html """
return alpha * np.power(x, beta) + gamma
def power_law_func_(x, alpha, beta, gamma1, gamma2):
""" docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.power.html """
return alpha * np.power(x, beta) + gamma1 + gamma2
def fit_power_law(x, y, p0:list=[30, -0.3, 0.06]):
""" Fit learning curve data (train set size vs metric) to power-law.
TODO: How should we fit the data across multiple folds? This can
be addressed using Bayesian methods (look at Bayesian linear regression).
The uncertainty of parameters indicates the consistency of across folds.
"""
prms, prms_cov = optimize.curve_fit(power_law_func, x, y, p0=p0)
prms_dct = {}
prms_dct['alpha'], prms_dct['beta'], prms_dct['gamma'] = prms[0], prms[1], prms[2]
return prms_dct
def fit_power_law_(x, y, p0:list=[30, -0.3, 0.06, 0.12]):
""" Fit learning curve data (train set size vs metric) to power-law. """
prms, prms_cov = optimize.curve_fit(power_law_func_, x, y, p0=p0)
prms_dct = {}
prms_dct['alpha'], prms_dct['beta'], prms_dct['gamma1'], prms_dct['gamma2'] = prms[0], prms[1], prms[2], prms[3]
return prms_dct
def plot_lrn_crv_power_law(x, y, plot_fit:bool=True, metric_name:str='score',
xtick_scale:str='log2', ytick_scale:str='log2',
xlim:list=None, ylim:list=None, title:str=None, figsize=(7,5)):
""" ... """
x = x.ravel()
y = y.ravel()
fontsize = 13
fig, ax = plt.subplots(figsize=figsize)
ax.plot(x, y, '.-', color=None, label='data');
# Fit power-law
power_law_params = fit_power_law(x, y)
yfit = power_law_func(x, **power_law_params)
# power_law_params_ = fit_power_law(x, y)
# yfit = power_law_func_(x, **power_law_params_)
if plot_fit: ax.plot(x, yfit, '--', color=None, label='fit');
basex, xlabel_scale = scale_ticks_params(tick_scale=xtick_scale)
basey, ylabel_scale = scale_ticks_params(tick_scale=ytick_scale)
ax.set_xlabel(f'Training Dataset Size ({xlabel_scale})', fontsize=fontsize)
if 'log' in xlabel_scale.lower(): ax.set_xscale('log', basex=basex)
ylabel = ' '.join(s.capitalize() for s in metric_name.split('_'))
ax.set_ylabel(f'{ylabel} ({ylabel_scale})', fontsize=fontsize)
if 'log' in ylabel_scale.lower(): ax.set_yscale('log', basey=basey)
# ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
# ax.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
# Add equation (text) on the plot
# matplotlib.org/3.1.1/gallery/text_labels_and_annotations/usetex_demo.html#sphx-glr-gallery-text-labels-and-annotations-usetex-demo-py
# eq = r"$\varepsilon_{mae}(m) = \alpha m^{\beta} + \gamma$" + rf"; $\alpha$={power_law_params['alpha']:.2f}, $\beta$={power_law_params['beta']:.2f}, $\gamma$={power_law_params['gamma']:.2f}"
# eq = rf"$\varepsilon(m) = {power_law_params['alpha']:.2f} m^{power_law_params['beta']:.2f} + {power_law_params['gamma']:.2f}$" # TODO: make this work
eq = r"$\varepsilon(m) = \alpha m^{\beta}$" + rf"; $\alpha$={power_law_params['alpha']:.2f}, $\beta$={power_law_params['beta']:.2f}"
# xloc = 2.0 * x.ravel().min()
xloc = x.min() + 0.1*(x.max() - x.min())
yloc = y.min() + 0.9*(y.max() - y.min())
ax.text(xloc, yloc, eq,
{'color': 'black', 'fontsize': fontsize, 'ha': 'left', 'va': 'center',
'bbox': {'boxstyle':'round', 'fc':'white', 'ec':'black', 'pad':0.2}})
# matplotlib.org/users/mathtext.html
# ax.set_title(r"$\varepsilon_{mae}(m) = \alpha m^{\beta} + \gamma$" + rf"; $\alpha$={power_law_params['alpha']:.2f}, $\beta$={power_law_params['beta']:.2f}, $\gamma$={power_law_params['gamma']:.2f}");
if ylim is not None: ax.set_ylim(ylim)
if xlim is not None: ax.set_ylim(xlim)
if title is None: title='Learning curve (power-law)'
ax.set_title(title)
ax.legend(loc='best', frameon=True, fontsize=fontsize)
ax.grid(True)
return fig, ax, power_law_params
# Define custom metric to calc auroc from regression
# scikit-learn.org/stable/modules/model_evaluation.html#scoring
def reg_auroc(y_true, y_pred, th=0.5):
""" Compute area under the ROC for regression. """
y_true = np.where(y_true < th, 1, 0)
y_score = np.where(y_pred < th, 1, 0)
reg_auroc_score = sklearn.metrics.roc_auc_score(y_true, y_score)
return reg_auroc_score
def reg_auroc_score():
return sklearn.metrics.make_scorer(score_func=reg_auroc, greater_is_better=True)
def calc_preds(model, x, y, mltype):
""" Calc predictions. """
if mltype == 'cls':
if y.ndim > 1 and y.shape[1] > 1:
y_pred = model.predict_proba(x)
y_pred = np.argmax(y_pred, axis=1)
y_true = np.argmax(ydata, axis=1)
else:
y_pred = model.predict_proba(x)
y_pred = np.argmax(y_pred, axis=1)
y_true = y
elif mltype == 'reg':
y_pred = model.predict(x)
y_true = y
return y_pred, y_true
def calc_scores(y_true, y_pred, mltype, metrics=None):
""" Create dict of scores.
Args:
metrics : TODO allow to pass a string of metrics
"""
scores = {}
if mltype == 'cls':
scores['auroc'] = sklearn.metrics.roc_auc_score(y_true, y_pred)
scores['f1_score'] = sklearn.metrics.f1_score(y_true, y_pred, average='micro')
scores['acc_blnc'] = sklearn.metrics.balanced_accuracy_score(y_true, y_pred)
elif mltype == 'reg':
scores['r2'] = sklearn.metrics.r2_score(y_true=y_true, y_pred=y_pred)
scores['mean_absolute_error'] = sklearn.metrics.mean_absolute_error(y_true=y_true, y_pred=y_pred)
scores['median_absolute_error'] = sklearn.metrics.median_absolute_error(y_true=y_true, y_pred=y_pred)
scores['mean_squared_error'] = sklearn.metrics.mean_squared_error(y_true=y_true, y_pred=y_pred)
# scores['auroc_reg'] = reg_auroc(y_true=y_true, y_pred=y_pred)
# # https://scikit-learn.org/stable/modules/model_evaluation.html
# for metric_name, metric in metrics.items():
# if isinstance(metric, str):
# scorer = sklearn.metrics.get_scorer(metric_name) # get a scorer from string
# scores[metric_name] = scorer(ydata, pred)
# else:
# scores[metric_name] = scorer(ydata, pred)
return scores
def scores_to_df(scores_all):
""" (tricky commands) """
df = pd.DataFrame(scores_all)
df = df.melt(id_vars=['fold', 'tr_size', 'tr_set'])
df = df.rename(columns={'variable': 'metric'})
df = df.pivot_table(index=['metric', 'tr_size', 'tr_set'], columns=['fold'], values='value')
df = df.reset_index(drop=False)
df.columns.name = None
return df
| [
"ml_models.save_krs_history",
"sklearn.externals.joblib.dump",
"numpy.abs",
"numpy.argmax",
"numpy.logspace",
"sklearn.metrics.r2_score",
"sklearn.metrics.mean_absolute_error",
"pathlib.Path",
"numpy.mean",
"sklearn.metrics.f1_score",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"pandas... | [((344, 365), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (358, 365), False, 'import matplotlib\n'), ((16633, 16667), 'keras.callbacks.CSVLogger', 'CSVLogger', (["(outdir / 'training.log')"], {}), "(outdir / 'training.log')\n", (16642, 16667), False, 'from keras.callbacks import ModelCheckpoint, CSVLogger, ReduceLROnPlateau, EarlyStopping, TensorBoard\n'), ((16682, 16817), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_loss"""', 'factor': '(0.75)', 'patience': '(20)', 'verbose': '(1)', 'mode': '"""auto"""', 'min_delta': '(0.0001)', 'cooldown': '(3)', 'min_lr': '(1e-09)'}), "(monitor='val_loss', factor=0.75, patience=20, verbose=1,\n mode='auto', min_delta=0.0001, cooldown=3, min_lr=1e-09)\n", (16699, 16817), False, 'from keras.callbacks import ModelCheckpoint, CSVLogger, ReduceLROnPlateau, EarlyStopping, TensorBoard\n'), ((16949, 17006), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(60)', 'verbose': '(1)'}), "(monitor='val_loss', patience=60, verbose=1)\n", (16962, 17006), False, 'from keras.callbacks import ModelCheckpoint, CSVLogger, ReduceLROnPlateau, EarlyStopping, TensorBoard\n'), ((22166, 22184), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (22182, 22184), True, 'import matplotlib.pyplot as plt\n'), ((23036, 23083), 'scipy.optimize.curve_fit', 'optimize.curve_fit', (['power_law_func', 'x', 'y'], {'p0': 'p0'}), '(power_law_func, x, y, p0=p0)\n', (23054, 23083), False, 'from scipy import optimize\n'), ((23367, 23415), 'scipy.optimize.curve_fit', 'optimize.curve_fit', (['power_law_func_', 'x', 'y'], {'p0': 'p0'}), '(power_law_func_, x, y, p0=p0)\n', (23385, 23415), False, 'from scipy import optimize\n'), ((23906, 23935), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (23918, 23935), True, 'import matplotlib.pyplot as plt\n'), ((26710, 26737), 'numpy.where', 'np.where', (['(y_true < th)', '(1)', '(0)'], {}), '(y_true < th, 1, 0)\n', (26718, 26737), True, 'import numpy as np\n'), ((26752, 26779), 'numpy.where', 'np.where', (['(y_pred < th)', '(1)', '(0)'], {}), '(y_pred < th, 1, 0)\n', (26760, 26779), True, 'import numpy as np\n'), ((26802, 26848), 'sklearn.metrics.roc_auc_score', 'sklearn.metrics.roc_auc_score', (['y_true', 'y_score'], {}), '(y_true, y_score)\n', (26831, 26848), False, 'import sklearn\n'), ((26912, 26985), 'sklearn.metrics.make_scorer', 'sklearn.metrics.make_scorer', ([], {'score_func': 'reg_auroc', 'greater_is_better': '(True)'}), '(score_func=reg_auroc, greater_is_better=True)\n', (26939, 26985), False, 'import sklearn\n'), ((28928, 28952), 'pandas.DataFrame', 'pd.DataFrame', (['scores_all'], {}), '(scores_all)\n', (28940, 28952), True, 'import pandas as pd\n'), ((3190, 3202), 'pathlib.Path', 'Path', (['outdir'], {}), '(outdir)\n', (3194, 3202), False, 'from pathlib import Path\n'), ((13579, 13626), 'pandas.concat', 'pd.concat', (['[tr_scores_df, vl_scores_df]'], {'axis': '(0)'}), '([tr_scores_df, vl_scores_df], axis=0)\n', (13588, 13626), True, 'import pandas as pd\n'), ((14166, 14233), 'keras.utils.plot_model', 'keras.utils.plot_model', (['model'], {'to_file': "(self.outdir / 'nn_model.png')"}), "(model, to_file=self.outdir / 'nn_model.png')\n", (14188, 14233), False, 'import keras\n'), ((14342, 14381), 'os.makedirs', 'os.makedirs', (['trn_outdir'], {'exist_ok': '(False)'}), '(trn_outdir, exist_ok=False)\n', (14353, 14381), False, 'import os\n'), ((14982, 15036), 'ml_models.save_krs_history', 'ml_models.save_krs_history', (['history'], {'outdir': 'trn_outdir'}), '(history, outdir=trn_outdir)\n', (15008, 15036), False, 'import ml_models\n'), ((15045, 15160), 'ml_models.plot_prfrm_metrics', 'ml_models.plot_prfrm_metrics', (['history'], {'title': 'f"""Train size: {tr_sz}"""', 'skp_ep': '(20)', 'add_lr': '(True)', 'outdir': 'trn_outdir'}), "(history, title=f'Train size: {tr_sz}', skp_ep=\n 20, add_lr=True, outdir=trn_outdir)\n", (15073, 15160), False, 'import ml_models\n'), ((15756, 15794), 'os.makedirs', 'os.makedirs', (['trn_outdir'], {'exist_ok': '(True)'}), '(trn_outdir, exist_ok=True)\n', (15767, 15794), False, 'import os\n'), ((16339, 16418), 'sklearn.externals.joblib.dump', 'joblib.dump', (['model'], {'filename': "(trn_outdir / ('model.' + self.model_name + '.pkl'))"}), "(model, filename=trn_outdir / ('model.' + self.model_name + '.pkl'))\n", (16350, 16418), False, 'from sklearn.externals import joblib\n'), ((20831, 20854), 'numpy.mean', 'np.mean', (['scores'], {'axis': '(1)'}), '(scores, axis=1)\n', (20838, 20854), True, 'import numpy as np\n'), ((20877, 20899), 'numpy.std', 'np.std', (['scores'], {'axis': '(1)'}), '(scores, axis=1)\n', (20883, 20899), True, 'import numpy as np\n'), ((21160, 21189), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (21172, 21189), True, 'import matplotlib.pyplot as plt\n'), ((22226, 22264), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'bbox_inches': '"""tight"""'}), "(path, bbox_inches='tight')\n", (22237, 22264), True, 'import matplotlib.pyplot as plt\n'), ((27747, 27792), 'sklearn.metrics.roc_auc_score', 'sklearn.metrics.roc_auc_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (27776, 27792), False, 'import sklearn\n'), ((27822, 27879), 'sklearn.metrics.f1_score', 'sklearn.metrics.f1_score', (['y_true', 'y_pred'], {'average': '"""micro"""'}), "(y_true, y_pred, average='micro')\n", (27846, 27879), False, 'import sklearn\n'), ((27909, 27964), 'sklearn.metrics.balanced_accuracy_score', 'sklearn.metrics.balanced_accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (27948, 27964), False, 'import sklearn\n'), ((2877, 2892), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (2889, 2892), True, 'import pandas as pd\n'), ((2917, 2932), 'pandas.DataFrame', 'pd.DataFrame', (['Y'], {}), '(Y)\n', (2929, 2932), True, 'import pandas as pd\n'), ((7205, 7233), 'numpy.abs', 'np.abs', (['(v - self.max_samples)'], {}), '(v - self.max_samples)\n', (7211, 7233), True, 'import numpy as np\n'), ((9702, 9730), 'numpy.squeeze', 'np.squeeze', (['self.Y[vl_id, :]'], {}), '(self.Y[vl_id, :])\n', (9712, 9730), True, 'import numpy as np\n'), ((22424, 22441), 'numpy.power', 'np.power', (['x', 'beta'], {}), '(x, beta)\n', (22432, 22441), True, 'import numpy as np\n'), ((27194, 27219), 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (27203, 27219), True, 'import numpy as np\n'), ((27241, 27265), 'numpy.argmax', 'np.argmax', (['ydata'], {'axis': '(1)'}), '(ydata, axis=1)\n', (27250, 27265), True, 'import numpy as np\n'), ((27345, 27370), 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (27354, 27370), True, 'import numpy as np\n'), ((28015, 28069), 'sklearn.metrics.r2_score', 'sklearn.metrics.r2_score', ([], {'y_true': 'y_true', 'y_pred': 'y_pred'}), '(y_true=y_true, y_pred=y_pred)\n', (28039, 28069), False, 'import sklearn\n'), ((28110, 28175), 'sklearn.metrics.mean_absolute_error', 'sklearn.metrics.mean_absolute_error', ([], {'y_true': 'y_true', 'y_pred': 'y_pred'}), '(y_true=y_true, y_pred=y_pred)\n', (28145, 28175), False, 'import sklearn\n'), ((28218, 28285), 'sklearn.metrics.median_absolute_error', 'sklearn.metrics.median_absolute_error', ([], {'y_true': 'y_true', 'y_pred': 'y_pred'}), '(y_true=y_true, y_pred=y_pred)\n', (28255, 28285), False, 'import sklearn\n'), ((28325, 28389), 'sklearn.metrics.mean_squared_error', 'sklearn.metrics.mean_squared_error', ([], {'y_true': 'y_true', 'y_pred': 'y_pred'}), '(y_true=y_true, y_pred=y_pred)\n', (28359, 28389), False, 'import sklearn\n'), ((4333, 4409), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'self.cv_folds', 'shuffle': '(False)', 'random_state': 'self.random_state'}), '(n_splits=self.cv_folds, shuffle=False, random_state=self.random_state)\n', (4338, 4409), False, 'from sklearn.model_selection import ShuffleSplit, KFold\n'), ((5471, 5507), 'numpy.linspace', 'np.linspace', (['(0.1)', '(1.0)', 'self.n_shards'], {}), '(0.1, 1.0, self.n_shards)\n', (5482, 5507), True, 'import numpy as np\n'), ((10369, 10400), 'numpy.squeeze', 'np.squeeze', (['ytr[idx[:tr_sz], :]'], {}), '(ytr[idx[:tr_sz], :])\n', (10379, 10400), True, 'import numpy as np\n'), ((10478, 10544), 'ml_models.get_model', 'ml_models.get_model', (['self.model_name'], {'init_kwargs': 'self.init_kwargs'}), '(self.model_name, init_kwargs=self.init_kwargs)\n', (10497, 10544), False, 'import ml_models\n'), ((14619, 14672), 'ml_models.clr_keras_callback', 'ml_models.clr_keras_callback', ([], {}), '(**self.clr_keras_kwargs)\n', (14647, 14672), False, 'import ml_models\n'), ((22613, 22630), 'numpy.power', 'np.power', (['x', 'beta'], {}), '(x, beta)\n', (22621, 22630), True, 'import numpy as np\n'), ((7161, 7174), 'numpy.arange', 'np.arange', (['(30)'], {}), '(30)\n', (7170, 7174), True, 'import numpy as np\n'), ((4785, 4810), 'numpy.argmax', 'np.argmax', (['self.Y'], {'axis': '(1)'}), '(self.Y, axis=1)\n', (4794, 4810), True, 'import numpy as np\n'), ((6179, 6256), 'numpy.logspace', 'np.logspace', ([], {'start': '(0.0)', 'stop': '(1.0)', 'num': 'self.n_shards', 'endpoint': '(True)', 'base': 'base'}), '(start=0.0, stop=1.0, num=self.n_shards, endpoint=True, base=base)\n', (6190, 6256), True, 'import numpy as np\n')] |
import torch
import numpy as np
import argparse
from scipy.stats import laplace
from pathlib import Path
import sys
file = Path(__file__). resolve()
package_root_directory = file.parents [1]
sys.path.append(str(package_root_directory))
from Model.model import Model
from scipy.cluster.hierarchy import dendrogram as plot_dendrogam
import itertools
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['mathtext.fontset'] = 'cm'
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
cm = plt.get_cmap('Set1')
cm2 = plt.get_cmap('Set2')
parser = argparse.ArgumentParser('Plot network')
parser.add_argument('--job_id', type=int)
parser.add_argument('--epoch', type=int)
parser.add_argument('--gamma_size', type=int, default = 25)
parser.add_argument('--z_size', type=int, default = 20)
parser.add_argument('--decoder_size', type=int, default = 65)
parser.add_argument('--Nflows', type=int, default = 3)
parser.add_argument('--flow_hidden', type=int, default = 24)
parser.add_argument('--f_nn_size', type=int, default = 12)
parser.add_argument('--W_prior_scale', type=float, default = 0.1)
args = parser.parse_args()
deficits = np.array(['Gait speed', 'Dom Grip strength', 'Non-dom grip str', 'ADL score','IADL score', 'Chair rises','Leg raise','Full tandem stance', 'Self-rated health', 'Eyesight','Hearing', 'Walking ability', 'Diastolic blood pressure', 'Systolic blood pressure', 'Pulse', 'Triglycerides','C-reactive protein','HDL cholesterol','LDL cholesterol','Glucose','IGF-1','Hemoglobin','Fibrinogen','Ferritin', 'Total cholesterol', r'White blood cell count', 'MCH', 'Glycated hemoglobin', 'Vitamin-D'])
N = 29
model = Model('cpu', N, args.gamma_size, args.z_size, args.decoder_size, args.Nflows, args.flow_hidden, args.f_nn_size, 0, 0, 0.5)
model.load_state_dict(torch.load('../Parameters/train%d_Model_DJIN_epoch%d.params'%(args.job_id, args.epoch),map_location='cpu'))
mean = model.mean.detach().numpy()*(np.ones((N,N)) - np.eye(N))
scale = model.logscale.exp().detach().numpy()*(np.ones((N,N)) - np.eye(N))
mean_list = mean[~np.eye(mean.shape[0],dtype=bool)]
scale_list = scale[~np.eye(scale.shape[0],dtype=bool)]
robust_network = np.ones(mean.shape)
network = np.ones(mean.shape)*mean
for i in range(N):
for j in range(N):
if i!=j:
posterior = laplace(mean[i,j], scale[i,j])
interval = posterior.interval(0.99)
if (interval[0] < 0 and interval[1] > 0):
robust_network[i,j] = 0
robust_list = robust_network[~np.eye(robust_network.shape[0],dtype=bool)]
fig,ax = plt.subplots(figsize=(6,4))
pd = np.zeros(mean_list.shape)
for i, (m, s) in enumerate(zip(mean_list, scale_list)):
if m > 0:
dist = posterior = laplace(m, s)
pd[i] = posterior.sf(0)
else:
dist = posterior = laplace(m, s)
pd[i] = posterior.cdf(0)
size = 10 + 10*robust_list
color = ['grey' if r==0 else 'black' for r in robust_list]
cax = ax.scatter(pd, mean_list,
c = color, s=size, edgecolors='white', linewidths=0.05)
plt.plot([0.995,0.995], [np.min(mean_list), np.max(mean_list)],color='r', linestyle = '--')
ax.set_ylabel(r'Posterior mean weight', fontsize = 14)
ax.set_xlabel(r'Proportion of posterior in direction of mean', fontsize = 14)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.tight_layout()
plt.savefig('../Plots/Posterior_network_uncertainty_job_id%d_epoch%d.pdf'%(args.job_id, args.epoch))
fig, ax = plt.subplots(figsize=(7,7))
import seaborn as sns
sns.set(style="white")
cmap = sns.color_palette("RdBu_r", 100)
network = np.ones(mean.shape)*mean
network_scale = np.ones(mean.shape)*scale
order = np.array([28, 8, 9, 10, 11, 3, 4, 7, 6, 5, 0, 2, 1, 16, 27, 19, 21, 26, 23, 24, 18, 17, 15, 13, 12, 14, 22, 20, 25])
for i in range(N):
for j in range(N):
if i!=j:
posterior = laplace(mean[i,j], scale[i,j])
interval = posterior.interval(0.99)
if (interval[0] < 0 and interval[1] > 0) or np.abs(network[i,j]) < 0.0001:
network[i,j] = np.nan
network_scale[i,j] = np.nan
np.save('../Analysis_Data/network_weights_job_id%d_epoch%d.npy'%(args.job_id, args.epoch), network)
network = network[order][:,order]
cbar_ax = fig.add_axes([0.31, 0.09, 0.59, 0.02])
sns.heatmap(network, ax=ax, xticklabels=deficits[order], yticklabels=deficits[order],
square=True, mask = np.eye(N) > 0, cmap=cmap, vmax=np.nanmax(np.abs(network)),vmin=-1*np.nanmax(np.abs(network)), cbar_kws={'label': r'Mean connection weight', 'orientation':'horizontal'}, cbar_ax = cbar_ax)
cbar_ax.yaxis.label.set_size(9)
ax.tick_params(labelsize=9)
#ax.text(-0.05, 1.05, 'b', horizontalalignment='left', verticalalignment='center',transform=ax.transAxes, color='k',fontsize = 16, zorder=1000000,fontweight='bold')
fig.tight_layout()
plt.subplots_adjust(bottom=0.35)
fig.savefig('../Plots/Posterior_network_job_id%d_epoch%d.pdf'%(args.job_id, args.epoch))
| [
"numpy.abs",
"argparse.ArgumentParser",
"numpy.ones",
"pathlib.Path",
"matplotlib.pyplot.tight_layout",
"Model.model.Model",
"torch.load",
"numpy.max",
"seaborn.set",
"matplotlib.pyplot.subplots",
"numpy.save",
"matplotlib.pyplot.get_cmap",
"numpy.min",
"matplotlib.pyplot.subplots_adjust",... | [((517, 537), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Set1"""'], {}), "('Set1')\n", (529, 537), True, 'import matplotlib.pyplot as plt\n'), ((544, 564), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Set2"""'], {}), "('Set2')\n", (556, 564), True, 'import matplotlib.pyplot as plt\n'), ((575, 614), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Plot network"""'], {}), "('Plot network')\n", (598, 614), False, 'import argparse\n'), ((1156, 1680), 'numpy.array', 'np.array', (["['Gait speed', 'Dom Grip strength', 'Non-dom grip str', 'ADL score',\n 'IADL score', 'Chair rises', 'Leg raise', 'Full tandem stance',\n 'Self-rated health', 'Eyesight', 'Hearing', 'Walking ability',\n 'Diastolic blood pressure', 'Systolic blood pressure', 'Pulse',\n 'Triglycerides', 'C-reactive protein', 'HDL cholesterol',\n 'LDL cholesterol', 'Glucose', 'IGF-1', 'Hemoglobin', 'Fibrinogen',\n 'Ferritin', 'Total cholesterol', 'White blood cell count', 'MCH',\n 'Glycated hemoglobin', 'Vitamin-D']"], {}), "(['Gait speed', 'Dom Grip strength', 'Non-dom grip str',\n 'ADL score', 'IADL score', 'Chair rises', 'Leg raise',\n 'Full tandem stance', 'Self-rated health', 'Eyesight', 'Hearing',\n 'Walking ability', 'Diastolic blood pressure',\n 'Systolic blood pressure', 'Pulse', 'Triglycerides',\n 'C-reactive protein', 'HDL cholesterol', 'LDL cholesterol', 'Glucose',\n 'IGF-1', 'Hemoglobin', 'Fibrinogen', 'Ferritin', 'Total cholesterol',\n 'White blood cell count', 'MCH', 'Glycated hemoglobin', 'Vitamin-D'])\n", (1164, 1680), True, 'import numpy as np\n'), ((1659, 1786), 'Model.model.Model', 'Model', (['"""cpu"""', 'N', 'args.gamma_size', 'args.z_size', 'args.decoder_size', 'args.Nflows', 'args.flow_hidden', 'args.f_nn_size', '(0)', '(0)', '(0.5)'], {}), "('cpu', N, args.gamma_size, args.z_size, args.decoder_size, args.\n Nflows, args.flow_hidden, args.f_nn_size, 0, 0, 0.5)\n", (1664, 1786), False, 'from Model.model import Model\n'), ((2178, 2197), 'numpy.ones', 'np.ones', (['mean.shape'], {}), '(mean.shape)\n', (2185, 2197), True, 'import numpy as np\n'), ((2575, 2603), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (2587, 2603), True, 'import matplotlib.pyplot as plt\n'), ((2610, 2635), 'numpy.zeros', 'np.zeros', (['mean_list.shape'], {}), '(mean_list.shape)\n', (2618, 2635), True, 'import numpy as np\n'), ((3362, 3380), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3378, 3380), True, 'import matplotlib.pyplot as plt\n'), ((3381, 3487), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../Plots/Posterior_network_uncertainty_job_id%d_epoch%d.pdf' % (args.\n job_id, args.epoch))"], {}), "('../Plots/Posterior_network_uncertainty_job_id%d_epoch%d.pdf' %\n (args.job_id, args.epoch))\n", (3392, 3487), True, 'import matplotlib.pyplot as plt\n'), ((3498, 3526), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(7, 7)'}), '(figsize=(7, 7))\n', (3510, 3526), True, 'import matplotlib.pyplot as plt\n'), ((3549, 3571), 'seaborn.set', 'sns.set', ([], {'style': '"""white"""'}), "(style='white')\n", (3556, 3571), True, 'import seaborn as sns\n'), ((3580, 3612), 'seaborn.color_palette', 'sns.color_palette', (['"""RdBu_r"""', '(100)'], {}), "('RdBu_r', 100)\n", (3597, 3612), True, 'import seaborn as sns\n'), ((3700, 3820), 'numpy.array', 'np.array', (['[28, 8, 9, 10, 11, 3, 4, 7, 6, 5, 0, 2, 1, 16, 27, 19, 21, 26, 23, 24, 18, \n 17, 15, 13, 12, 14, 22, 20, 25]'], {}), '([28, 8, 9, 10, 11, 3, 4, 7, 6, 5, 0, 2, 1, 16, 27, 19, 21, 26, 23,\n 24, 18, 17, 15, 13, 12, 14, 22, 20, 25])\n', (3708, 3820), True, 'import numpy as np\n'), ((4151, 4257), 'numpy.save', 'np.save', (["('../Analysis_Data/network_weights_job_id%d_epoch%d.npy' % (args.job_id,\n args.epoch))", 'network'], {}), "('../Analysis_Data/network_weights_job_id%d_epoch%d.npy' % (args.\n job_id, args.epoch), network)\n", (4158, 4257), True, 'import numpy as np\n'), ((4924, 4956), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.35)'}), '(bottom=0.35)\n', (4943, 4956), True, 'import matplotlib.pyplot as plt\n'), ((1804, 1917), 'torch.load', 'torch.load', (["('../Parameters/train%d_Model_DJIN_epoch%d.params' % (args.job_id, args.epoch))"], {'map_location': '"""cpu"""'}), "('../Parameters/train%d_Model_DJIN_epoch%d.params' % (args.job_id,\n args.epoch), map_location='cpu')\n", (1814, 1917), False, 'import torch\n'), ((2208, 2227), 'numpy.ones', 'np.ones', (['mean.shape'], {}), '(mean.shape)\n', (2215, 2227), True, 'import numpy as np\n'), ((3624, 3643), 'numpy.ones', 'np.ones', (['mean.shape'], {}), '(mean.shape)\n', (3631, 3643), True, 'import numpy as np\n'), ((3665, 3684), 'numpy.ones', 'np.ones', (['mean.shape'], {}), '(mean.shape)\n', (3672, 3684), True, 'import numpy as np\n'), ((124, 138), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (128, 138), False, 'from pathlib import Path\n'), ((1949, 1964), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (1956, 1964), True, 'import numpy as np\n'), ((1966, 1975), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (1972, 1975), True, 'import numpy as np\n'), ((2024, 2039), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (2031, 2039), True, 'import numpy as np\n'), ((2041, 2050), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (2047, 2050), True, 'import numpy as np\n'), ((2072, 2105), 'numpy.eye', 'np.eye', (['mean.shape[0]'], {'dtype': 'bool'}), '(mean.shape[0], dtype=bool)\n', (2078, 2105), True, 'import numpy as np\n'), ((2126, 2160), 'numpy.eye', 'np.eye', (['scale.shape[0]'], {'dtype': 'bool'}), '(scale.shape[0], dtype=bool)\n', (2132, 2160), True, 'import numpy as np\n'), ((2521, 2564), 'numpy.eye', 'np.eye', (['robust_network.shape[0]'], {'dtype': 'bool'}), '(robust_network.shape[0], dtype=bool)\n', (2527, 2564), True, 'import numpy as np\n'), ((2733, 2746), 'scipy.stats.laplace', 'laplace', (['m', 's'], {}), '(m, s)\n', (2740, 2746), False, 'from scipy.stats import laplace\n'), ((2816, 2829), 'scipy.stats.laplace', 'laplace', (['m', 's'], {}), '(m, s)\n', (2823, 2829), False, 'from scipy.stats import laplace\n'), ((3085, 3102), 'numpy.min', 'np.min', (['mean_list'], {}), '(mean_list)\n', (3091, 3102), True, 'import numpy as np\n'), ((3104, 3121), 'numpy.max', 'np.max', (['mean_list'], {}), '(mean_list)\n', (3110, 3121), True, 'import numpy as np\n'), ((2317, 2349), 'scipy.stats.laplace', 'laplace', (['mean[i, j]', 'scale[i, j]'], {}), '(mean[i, j], scale[i, j])\n', (2324, 2349), False, 'from scipy.stats import laplace\n'), ((3902, 3934), 'scipy.stats.laplace', 'laplace', (['mean[i, j]', 'scale[i, j]'], {}), '(mean[i, j], scale[i, j])\n', (3909, 3934), False, 'from scipy.stats import laplace\n'), ((4489, 4498), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (4495, 4498), True, 'import numpy as np\n'), ((4530, 4545), 'numpy.abs', 'np.abs', (['network'], {}), '(network)\n', (4536, 4545), True, 'import numpy as np\n'), ((4565, 4580), 'numpy.abs', 'np.abs', (['network'], {}), '(network)\n', (4571, 4580), True, 'import numpy as np\n'), ((4037, 4058), 'numpy.abs', 'np.abs', (['network[i, j]'], {}), '(network[i, j])\n', (4043, 4058), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
def cart2pol(x, y):
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return(rho, phi)
def pol2cart(rho, phi):
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return(x, y)
def sum2(x, y):
return tuple(map(sum, zip(x,y)))
def sum3(x, y, z):
return tuple(map(sum, zip(x,y, z)))
def mag(x, y):
return(np.sqrt(x**2+y**2))
def acc(x, y):
return(-.5*x/mag(x,y)**3, -.5*y/mag(x,y)**3)
def mag_array(r):
return(np.sqrt(r[0]**2+r[1]**2))
def acc_array(r):
return(-.5*r/mag_array(r)**3)
def mag_multi(r1,r2):
return(np.sqrt((r2[0]-r1[0])**2+(r2[1]-r1[1])**2))
def acc_multi(r,r1,r2):
return(-.5*(r-r1)/mag_multi(r1,r)**3-.5*(r-r2)/mag_multi(r2,r)**3)
def int_q_array(r,v,dt):
r = r + dt*v
return(r)
def int_v_array(r,r1,r2,v,t):
v = v + t*acc_multi(r,r1,r2)
return(v)
def v_magnetic_calc(r, v, B, dt):
theta = B[2]*dt
B_unit = B/np.linalg.norm(B)
return(v+np.sin(theta)*np.cross(B_unit,v)+(1-np.cos(theta))*np.cross(B_unit,np.cross(B_unit,v)))
def v_damped(r, v, w_0, dt):
v - w_0**2*dt
return(v)
def plotting(x, y):
fig2, ax5 = plt.subplots()
ax5.set_ylabel('(E(t)/E_0-1)/t^4')
ax5.set_xlabel('Time/Period')
ax5.set_title("Energy Ratio - Forest Ruth")
ax5.plot(x,y)
ax5.legend(('Runge-Kutta', 'Forest Ruth'), loc='upper right')
plt.show()
def plot_polar(r, theta):
fig1, ax3 = plt.subplots()
ax3=fig1.add_subplot(111, projection='polar')
ax3=fig1.add_subplot(111)
ax3.plot(y_val,x_val)
ax3.plot(theta_comp, r_comp, 'o', markerfacecolor='none', markeredgecolor='r')
ax3.set_rmax(.5)
ax3.set_rticks([3, 6, 9, 12]) # less radial ticks
ax3.set_rlabel_position(-22.5) # get radial labels away from plotted line
ax3.grid(True)
ax3.set_title("Forest Ruth", va='bottom')
plt.show() | [
"matplotlib.pyplot.show",
"numpy.arctan2",
"numpy.cross",
"numpy.sin",
"numpy.linalg.norm",
"numpy.cos",
"matplotlib.pyplot.subplots",
"numpy.sqrt"
] | [((82, 106), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (89, 106), True, 'import numpy as np\n'), ((113, 129), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (123, 129), True, 'import numpy as np\n'), ((381, 405), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (388, 405), True, 'import numpy as np\n'), ((492, 522), 'numpy.sqrt', 'np.sqrt', (['(r[0] ** 2 + r[1] ** 2)'], {}), '(r[0] ** 2 + r[1] ** 2)\n', (499, 522), True, 'import numpy as np\n'), ((600, 652), 'numpy.sqrt', 'np.sqrt', (['((r2[0] - r1[0]) ** 2 + (r2[1] - r1[1]) ** 2)'], {}), '((r2[0] - r1[0]) ** 2 + (r2[1] - r1[1]) ** 2)\n', (607, 652), True, 'import numpy as np\n'), ((1140, 1154), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1152, 1154), True, 'import matplotlib.pyplot as plt\n'), ((1346, 1356), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1354, 1356), True, 'import matplotlib.pyplot as plt\n'), ((1397, 1411), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1409, 1411), True, 'import matplotlib.pyplot as plt\n'), ((1795, 1805), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1803, 1805), True, 'import matplotlib.pyplot as plt\n'), ((190, 201), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (196, 201), True, 'import numpy as np\n'), ((216, 227), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (222, 227), True, 'import numpy as np\n'), ((925, 942), 'numpy.linalg.norm', 'np.linalg.norm', (['B'], {}), '(B)\n', (939, 942), True, 'import numpy as np\n'), ((953, 966), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (959, 966), True, 'import numpy as np\n'), ((967, 986), 'numpy.cross', 'np.cross', (['B_unit', 'v'], {}), '(B_unit, v)\n', (975, 986), True, 'import numpy as np\n'), ((989, 1002), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (995, 1002), True, 'import numpy as np\n'), ((1020, 1039), 'numpy.cross', 'np.cross', (['B_unit', 'v'], {}), '(B_unit, v)\n', (1028, 1039), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 9 17:12:47 2020
@author: <NAME>
code to run the quadcopter model autonomously on a track using trained CNN model
"""
#import essential libraries
import sim
import sys
#import os
#import matplotlib.pyplot as plt
import cv2
import numpy as np
import time
from keras.models import load_model
#Loading trained CNN model with weights
PATH = '/home/anish/anaconda_py3_copelia'
QuadNet = load_model(PATH + '/Model/Quad_Net_Wt.h5')
time.sleep(0.5)
#just in case, close all opened connections to CoppeliaSim
sim.simxFinish(-1)
#connect to CoppeliaSim
clientID=sim.simxStart('127.0.0.1',19999,True,True,5000,5)
#verify that connection is established with coppeliasim
if clientID!=-1:
print ('Connected to remote API server')
else:
print("Not connected to remote API server")
sys.exit("Could not connect")
#getting object handles for quad control
err_code,target_handle = sim.simxGetObjectHandle(clientID,'Quadricopter_target',sim.simx_opmode_blocking)
#initialise var for accessing LUA function at Server
inputInts=[] #dtype table (inside it string)
inputFloats=[] #dtype table (inside it floats)
inputStrings=[] #dtype table inside it strings
inputBuffer='' #dtype stirng
while True:
###Getting Image using LUAcode in server(coppeliaSim))###
res,retTable1,retTable2,retTable3,retString=sim.simxCallScriptFunction(clientID,'Vision_sensor',sim.sim_scripttype_childscript,
'getImage',inputInts,inputFloats,inputStrings,inputBuffer,sim.simx_opmode_blocking)
if res==sim.simx_return_ok:
image = retString
resolution = retTable1
#Image Processing
image = np.array(image, dtype = np.uint8) #signedint -> unsigned int now each value range 0-255
image.resize([resolution[0],resolution[1],3]) #resize to 512*512*3
image = np.flip(image,0)
image = cv2.resize(image,(int(256/2),int(256/2))) #resize image to model input dimension 128x128
image = image[None,:,:,:]
#using QuadNet to predict the quad motion
y_pred = QuadNet.predict(image)
cls_pred = np.argmax(y_pred,axis=1)
cls = np.squeeze(cls_pred)
#print (cls)
#getting current pos & orien of the quad
err_code, target_orien_body = sim.simxGetObjectOrientation(clientID, target_handle, target_handle, sim.simx_opmode_blocking)
err_code, target_pos_body = sim.simxGetObjectPosition(clientID, target_handle, target_handle, sim.simx_opmode_blocking)
#condtion for motion control of the quad (setting pos&orien based on QuadNetwork prediction)
if cls == 0:
#move Left
target_pos_body[0] = target_pos_body[0] + (0.018)
target_orien_body[2] = target_orien_body[2] + 0.02618
err_code = sim.simxSetObjectOrientation(clientID, target_handle, target_handle, target_orien_body, sim.simx_opmode_oneshot)
err_code = sim.simxSetObjectPosition(clientID, target_handle, target_handle, target_pos_body, sim.simx_opmode_oneshot)
elif cls == 1:
#move Right
target_pos_body[0] = target_pos_body[0] + (0.018)
target_orien_body[2] = target_orien_body[2] - 0.0349
err_code = sim.simxSetObjectOrientation(clientID, target_handle, target_handle, target_orien_body, sim.simx_opmode_oneshot)
err_code = sim.simxSetObjectPosition(clientID, target_handle, target_handle, target_pos_body, sim.simx_opmode_oneshot)
else:
#move forward
target_pos_body[0] = target_pos_body[0] + (0.018)
target_orien_body[2] = target_orien_body[2] + 0.0
err_code = sim.simxSetObjectOrientation(clientID, target_handle, target_handle, target_orien_body, sim.simx_opmode_oneshot)
err_code = sim.simxSetObjectPosition(clientID, target_handle, target_handle, target_pos_body, sim.simx_opmode_oneshot)
#time.sleep(0.025)
| [
"keras.models.load_model",
"sim.simxCallScriptFunction",
"numpy.flip",
"sim.simxSetObjectOrientation",
"numpy.argmax",
"time.sleep",
"sim.simxGetObjectOrientation",
"numpy.array",
"sim.simxGetObjectHandle",
"sim.simxGetObjectPosition",
"sim.simxStart",
"sim.simxFinish",
"numpy.squeeze",
"s... | [((432, 474), 'keras.models.load_model', 'load_model', (["(PATH + '/Model/Quad_Net_Wt.h5')"], {}), "(PATH + '/Model/Quad_Net_Wt.h5')\n", (442, 474), False, 'from keras.models import load_model\n'), ((475, 490), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (485, 490), False, 'import time\n'), ((552, 570), 'sim.simxFinish', 'sim.simxFinish', (['(-1)'], {}), '(-1)\n', (566, 570), False, 'import sim\n'), ((604, 658), 'sim.simxStart', 'sim.simxStart', (['"""127.0.0.1"""', '(19999)', '(True)', '(True)', '(5000)', '(5)'], {}), "('127.0.0.1', 19999, True, True, 5000, 5)\n", (617, 658), False, 'import sim\n'), ((929, 1016), 'sim.simxGetObjectHandle', 'sim.simxGetObjectHandle', (['clientID', '"""Quadricopter_target"""', 'sim.simx_opmode_blocking'], {}), "(clientID, 'Quadricopter_target', sim.\n simx_opmode_blocking)\n", (952, 1016), False, 'import sim\n'), ((831, 860), 'sys.exit', 'sys.exit', (['"""Could not connect"""'], {}), "('Could not connect')\n", (839, 860), False, 'import sys\n'), ((1433, 1616), 'sim.simxCallScriptFunction', 'sim.simxCallScriptFunction', (['clientID', '"""Vision_sensor"""', 'sim.sim_scripttype_childscript', '"""getImage"""', 'inputInts', 'inputFloats', 'inputStrings', 'inputBuffer', 'sim.simx_opmode_blocking'], {}), "(clientID, 'Vision_sensor', sim.\n sim_scripttype_childscript, 'getImage', inputInts, inputFloats,\n inputStrings, inputBuffer, sim.simx_opmode_blocking)\n", (1459, 1616), False, 'import sim\n'), ((1754, 1785), 'numpy.array', 'np.array', (['image'], {'dtype': 'np.uint8'}), '(image, dtype=np.uint8)\n', (1762, 1785), True, 'import numpy as np\n'), ((1989, 2006), 'numpy.flip', 'np.flip', (['image', '(0)'], {}), '(image, 0)\n', (1996, 2006), True, 'import numpy as np\n'), ((2269, 2294), 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (2278, 2294), True, 'import numpy as np\n'), ((2304, 2324), 'numpy.squeeze', 'np.squeeze', (['cls_pred'], {}), '(cls_pred)\n', (2314, 2324), True, 'import numpy as np\n'), ((2426, 2525), 'sim.simxGetObjectOrientation', 'sim.simxGetObjectOrientation', (['clientID', 'target_handle', 'target_handle', 'sim.simx_opmode_blocking'], {}), '(clientID, target_handle, target_handle, sim.\n simx_opmode_blocking)\n', (2454, 2525), False, 'import sim\n'), ((2553, 2649), 'sim.simxGetObjectPosition', 'sim.simxGetObjectPosition', (['clientID', 'target_handle', 'target_handle', 'sim.simx_opmode_blocking'], {}), '(clientID, target_handle, target_handle, sim.\n simx_opmode_blocking)\n', (2578, 2649), False, 'import sim\n'), ((2939, 3055), 'sim.simxSetObjectOrientation', 'sim.simxSetObjectOrientation', (['clientID', 'target_handle', 'target_handle', 'target_orien_body', 'sim.simx_opmode_oneshot'], {}), '(clientID, target_handle, target_handle,\n target_orien_body, sim.simx_opmode_oneshot)\n', (2967, 3055), False, 'import sim\n'), ((3071, 3182), 'sim.simxSetObjectPosition', 'sim.simxSetObjectPosition', (['clientID', 'target_handle', 'target_handle', 'target_pos_body', 'sim.simx_opmode_oneshot'], {}), '(clientID, target_handle, target_handle,\n target_pos_body, sim.simx_opmode_oneshot)\n', (3096, 3182), False, 'import sim\n'), ((3374, 3490), 'sim.simxSetObjectOrientation', 'sim.simxSetObjectOrientation', (['clientID', 'target_handle', 'target_handle', 'target_orien_body', 'sim.simx_opmode_oneshot'], {}), '(clientID, target_handle, target_handle,\n target_orien_body, sim.simx_opmode_oneshot)\n', (3402, 3490), False, 'import sim\n'), ((3506, 3617), 'sim.simxSetObjectPosition', 'sim.simxSetObjectPosition', (['clientID', 'target_handle', 'target_handle', 'target_pos_body', 'sim.simx_opmode_oneshot'], {}), '(clientID, target_handle, target_handle,\n target_pos_body, sim.simx_opmode_oneshot)\n', (3531, 3617), False, 'import sim\n'), ((3799, 3915), 'sim.simxSetObjectOrientation', 'sim.simxSetObjectOrientation', (['clientID', 'target_handle', 'target_handle', 'target_orien_body', 'sim.simx_opmode_oneshot'], {}), '(clientID, target_handle, target_handle,\n target_orien_body, sim.simx_opmode_oneshot)\n', (3827, 3915), False, 'import sim\n'), ((3931, 4042), 'sim.simxSetObjectPosition', 'sim.simxSetObjectPosition', (['clientID', 'target_handle', 'target_handle', 'target_pos_body', 'sim.simx_opmode_oneshot'], {}), '(clientID, target_handle, target_handle,\n target_pos_body, sim.simx_opmode_oneshot)\n', (3956, 4042), False, 'import sim\n')] |
# Author: <NAME> (http://falexwolf.de)
# T. Callies
"""Rank genes according to differential expression.
"""
import numpy as np
import pandas as pd
from math import sqrt, floor
from scipy.sparse import issparse
from .. import utils
from .. import settings
from .. import logging as logg
from ..preprocessing import simple
def rank_genes_groups(
adata,
group_by,
use_raw=True,
groups='all',
reference='rest',
n_genes=100,
compute_distribution=False,
only_positive=True,
copy=False,
test_type='t-test_overestim_var',
correction_factors=None):
"""Rank genes according to differential expression [Wolf17]_.
Rank genes by differential expression. By default, a t-test-like ranking is
used, in which means are normalized with variances.
Parameters
----------
adata : :class:`~scanpy.api.AnnData`
Annotated data matrix.
group_by : `str`
The key of the sample grouping to consider.
use_raw : `bool`, optional (default: `True`)
Use `raw` attribute of `adata` if present.
groups : `str`, `list`, optional (default: `'all'`)
Subset of groups, e.g. `['g1', 'g2', 'g3']`, to which comparison shall
be restricted. If not passed, a ranking will be generated for all
groups.
reference : `str`, optional (default: `'rest'`)
If `'rest'`, compare each group to the union of the rest of the group. If
a group identifier, compare with respect to this group.
n_genes : `int`, optional (default: 100)
The number of genes that appear in the returned tables.
test_type : {'t-test_overestim_var', 't-test', 'wilcoxon', , 't-test_double_overestim_var',
't-test_correction_factors'}, optional (default: 't-test_overestim_var')
If 't-test', use t-test to calculate test statistics. If 'wilcoxon', use
Wilcoxon-Rank-Sum to calculate test statistic. If
't-test_overestim_var', overestimate variance.
't-test_double_overestim_var', additionally, underestimate variance of the rest
't-test_correction_factors', define correction factors manually
only_positive : bool, optional (default: `True`)
Only consider positive differences.
correction_factors: [a,b], optional (default: None)
Only for the test-type 't-test_correction_factors'. Then, a determines correction factor for group variance,
b determines correction factor for variance of the comparison group
Returns
-------
rank_genes_groups_gene_scores : structured `np.ndarray` (adata.uns)
Structured array to be indexed by group id of shape storing the zscore
for each gene for each group.
rank_genes_groups_gene_names : structured `np.ndarray` (adata.uns)
Structured array to be indexed by group id for storing the gene names.
"""
logg.info('rank differentially expressed genes', r=True)
adata = adata.copy() if copy else adata
utils.sanitize_anndata(adata)
if compute_distribution:
logg.warn('`compute_distribution` is deprecated, as it requires storing'
'a shifted and rescaled disribution for each gene'
'You can now run `sc.pl.rank_genes_groups_violin` without it, '
'which will show the original distribution of the gene.')
# for clarity, rename variable
groups_order = groups
if isinstance(groups_order, list) and isinstance(groups_order[0], int):
groups_order = [str(n) for n in groups_order]
if reference != 'rest' and reference not in set(groups_order):
groups_order += [reference]
if (reference != 'rest'
and reference not in set(adata.obs[group_by].cat.categories)):
raise ValueError('reference = {} needs to be one of group_by = {}.'
.format(reference,
adata.obs[group_by].cat.categories.tolist()))
groups_order, groups_masks = utils.select_groups(
adata, groups_order, group_by)
adata.uns['rank_genes_groups_params'] = np.array(
(group_by, reference, test_type, use_raw),
dtype=[('group_by', 'U50'), ('reference', 'U50'), ('test_type', 'U50'), ('use_raw', np.bool_)])
# adata_comp mocks an AnnData object if use_raw is True
# otherwise it's just the AnnData object
adata_comp = adata
if adata.raw is not None and use_raw:
adata_comp = adata.raw
X = adata_comp.X
# for clarity, rename variable
n_genes_user = n_genes
# make sure indices are not OoB in case there are less genes than n_genes
if n_genes_user > X.shape[1]:
n_genes_user = X.shape[1]
# in the following, n_genes is simply another name for the total number of genes
n_genes = X.shape[1]
rankings_gene_zscores = []
rankings_gene_names = []
n_groups = groups_masks.shape[0]
ns = np.zeros(n_groups, dtype=int)
for imask, mask in enumerate(groups_masks):
ns[imask] = np.where(mask)[0].size
logg.info(' consider \'{}\':'.format(group_by), groups_order,
'with sample numbers', ns)
if reference != 'rest':
ireference = np.where(groups_order == reference)[0][0]
reference_indices = np.arange(adata_comp.n_vars, dtype=int)
avail_tests = {'t-test', 't-test_overestim_var', 'wilcoxon', 't-test_double_overestim_var',
't-test_correction_factors'}
if test_type not in avail_tests:
raise ValueError('test_type should be one of {}.'
'"t-test_overestim_var" is being used as default.'
.format(avail_tests))
if test_type is 't-test_correction_factors':
if correction_factors is None:
raise ValueError('For this test type, you need to enter correction factors manually.')
if len(correction_factors) != 2:
raise ValueError('We need exactly 2 correction factors, accessible via correction_factors[i], i=0,1')
if correction_factors[0]<0 or correction_factors[1]<0:
raise ValueError('Correction factors need to be positive numbers!')
if test_type in {'t-test', 't-test_overestim_var', 't-test_double_overestim_var',
't-test_correction_factors'}:
# loop over all masks and compute means, variances and sample numbers
means = np.zeros((n_groups, n_genes))
vars = np.zeros((n_groups, n_genes))
for imask, mask in enumerate(groups_masks):
means[imask], vars[imask] = simple._get_mean_var(X[mask])
# test each either against the union of all other groups or against a
# specific group
for igroup in range(n_groups):
if reference == 'rest':
mask_rest = ~groups_masks[igroup]
else:
if igroup == ireference: continue
else: mask_rest = groups_masks[ireference]
mean_rest, var_rest = simple._get_mean_var(X[mask_rest])
if test_type == 't-test':
ns_rest = np.where(mask_rest)[0].size
elif test_type == 't-test_correction_factors':
# The tendency is as follows: For the comparison group (rest), overesimate variance --> smaller ns_rest
ns_rest = np.where(mask_rest)[0].size/correction_factors[1]
else: # hack for overestimating the variance
ns_rest = ns[igroup]
if test_type in {'t-test', 't-test_overestim_var'}:
ns_group=ns[igroup]
elif test_type == 't-test_correction_factors':
# We underestimate group variance by increasing denominator, i.e. ns_group
ns_group=ns[igroup]*correction_factors[0]
else :
# We do the opposite of t-test_overestim_var
ns_group=np.where(mask_rest)[0].size
denominator = np.sqrt(vars[igroup]/ns_group + var_rest/ns_rest)
denominator[np.flatnonzero(denominator == 0)] = np.nan
zscores = (means[igroup] - mean_rest) / denominator
zscores[np.isnan(zscores)] = 0
zscores = zscores if only_positive else np.abs(zscores)
partition = np.argpartition(zscores, -n_genes_user)[-n_genes_user:]
partial_indices = np.argsort(zscores[partition])[::-1]
global_indices = reference_indices[partition][partial_indices]
rankings_gene_zscores.append(zscores[global_indices])
rankings_gene_names.append(adata_comp.var_names[global_indices])
if compute_distribution:
mask = groups_masks[igroup]
for gene_counter in range(n_genes_user):
gene_idx = global_indices[gene_counter]
X_col = X[mask, gene_idx]
if issparse(X): X_col = X_col.toarray()[:, 0]
identifier = _build_identifier(group_by, groups_order[igroup],
gene_counter, adata_comp.var_names[gene_idx])
full_col = np.empty(adata.n_obs)
full_col[:] = np.nan
full_col[mask] = (X_col - mean_rest[gene_idx]) / denominator[gene_idx]
adata.obs[identifier] = full_col
elif test_type == 'wilcoxon':
# Wilcoxon-rank-sum test is usually more powerful in detecting marker genes
# Limit maximal RAM that is required by the calculation. Currently set fixed to roughly 100 MByte
CONST_MAX_SIZE = 10000000
ns_rest = np.zeros(n_groups, dtype=int)
# initialize space for z-scores
zscores = np.zeros(n_genes)
# First loop: Loop over all genes
if reference != 'rest':
for imask, mask in enumerate(groups_masks):
if imask == ireference: continue
else: mask_rest = groups_masks[ireference]
ns_rest[imask] = np.where(mask_rest)[0].size
if ns_rest[imask] <= 25 or ns[imask] <= 25:
logg.hint('Few observations in a group for '
'normal approximation (<=25). Lower test accuracy.')
n_active = ns[imask]
m_active = ns_rest[imask]
# Now calculate gene expression ranking in chunkes:
chunk = []
# Calculate chunk frames
n_genes_max_chunk = floor(CONST_MAX_SIZE / (n_active + m_active))
if n_genes_max_chunk < n_genes - 1:
chunk_index = n_genes_max_chunk
while chunk_index < n_genes - 1:
chunk.append(chunk_index)
chunk_index = chunk_index + n_genes_max_chunk
chunk.append(n_genes - 1)
else:
chunk.append(n_genes - 1)
left = 0
# Calculate rank sums for each chunk for the current mask
for chunk_index, right in enumerate(chunk):
# Check if issparse is true: AnnData objects are currently sparse.csr or ndarray.
if issparse(X):
df1 = pd.DataFrame(data=X[mask, left:right].todense())
df2 = pd.DataFrame(data=X[mask_rest, left:right].todense(),
index=np.arange(start=n_active, stop=n_active + m_active))
else:
df1 = pd.DataFrame(data=X[mask, left:right])
df2 = pd.DataFrame(data=X[mask_rest, left:right],
index=np.arange(start=n_active, stop=n_active + m_active))
df1 = df1.append(df2)
ranks = df1.rank()
# sum up adjusted_ranks to calculate W_m,n
zscores[left:right] = np.sum(ranks.loc[0:n_active, :])
left = right + 1
zscores = (zscores - (n_active * (n_active + m_active + 1) / 2)) / sqrt(
(n_active * m_active * (n_active + m_active + 1) / 12))
zscores = zscores if only_positive else np.abs(zscores)
zscores[np.isnan(zscores)] = 0
partition = np.argpartition(zscores, -n_genes_user)[-n_genes_user:]
partial_indices = np.argsort(zscores[partition])[::-1]
global_indices = reference_indices[partition][partial_indices]
rankings_gene_zscores.append(zscores[global_indices])
rankings_gene_names.append(adata_comp.var_names[global_indices])
if compute_distribution:
# Add calculation of means, var: (Unnecessary for wilcoxon if compute distribution=False)
mean, vars = simple._get_mean_var(X[mask])
mean_rest, var_rest = simple._get_mean_var(X[mask_rest])
denominator = np.sqrt(vars / ns[imask] + var_rest / ns_rest[imask])
denominator[np.flatnonzero(denominator == 0)] = np.nan
for gene_counter in range(n_genes_user):
gene_idx = global_indices[gene_counter]
X_col = X[mask, gene_idx]
if issparse(X): X_col = X_col.toarray()[:, 0]
identifier = _build_identifier(group_by, groups_order[imask],
gene_counter, adata_comp.var_names[gene_idx])
full_col = np.empty(adata.n_obs)
full_col[:] = np.nan
full_col[mask] = (X_col - mean_rest[gene_idx]) / denominator[gene_idx]
adata.obs[identifier] = full_col
# If no reference group exists, ranking needs only to be done once (full mask)
else:
zscores = np.zeros((n_groups, n_genes))
chunk = []
n_cells = X.shape[0]
n_genes_max_chunk = floor(CONST_MAX_SIZE / n_cells)
if n_genes_max_chunk < n_genes - 1:
chunk_index = n_genes_max_chunk
while chunk_index < n_genes - 1:
chunk.append(chunk_index)
chunk_index = chunk_index + n_genes_max_chunk
chunk.append(n_genes - 1)
else:
chunk.append(n_genes - 1)
left = 0
for chunk_index, right in enumerate(chunk):
# Check if issparse is true
if issparse(X):
df1 = pd.DataFrame(data=X[:, left:right].todense())
else:
df1 = pd.DataFrame(data=X[:, left:right])
ranks = df1.rank()
# sum up adjusted_ranks to calculate W_m,n
for imask, mask in enumerate(groups_masks):
zscores[imask, left:right] = np.sum(ranks.loc[mask, :])
left = right + 1
for imask, mask in enumerate(groups_masks):
zscores[imask, :] = (zscores[imask, :] - (ns[imask] * (n_cells + 1) / 2)) / sqrt(
(ns[imask] * (n_cells - ns[imask]) * (n_cells + 1) / 12))
zscores = zscores if only_positive else np.abs(zscores)
zscores[np.isnan(zscores)] = 0
partition = np.argpartition(zscores[imask, :], -n_genes_user)[-n_genes_user:]
partial_indices = np.argsort(zscores[imask, partition])[::-1]
global_indices = reference_indices[partition][partial_indices]
rankings_gene_zscores.append(zscores[imask, global_indices])
rankings_gene_names.append(adata_comp.var_names[global_indices])
if compute_distribution:
mean, vars = simple._get_mean_var(X[mask])
mean_rest, var_rest = simple._get_mean_var(X[~mask])
denominator = np.sqrt(vars / ns[imask] + var_rest / (n_cells-ns[imask]))
denominator[np.flatnonzero(denominator == 0)] = np.nan
for gene_counter in range(n_genes_user):
gene_idx = global_indices[gene_counter]
X_col = X[mask, gene_idx]
if issparse(X): X_col = X_col.toarray()[:, 0]
identifier = _build_identifier(group_by, groups_order[imask],
gene_counter, adata_comp.var_names[gene_idx])
full_col = np.empty(adata.n_obs)
full_col[:] = np.nan
full_col[mask] = (X_col - mean_rest[gene_idx]) / denominator[gene_idx]
adata.obs[identifier] = full_col
groups_order_save = [str(g) for g in groups_order]
if reference != 'rest':
groups_order_save = [g for g in groups_order if g != reference]
adata.uns['rank_genes_groups_gene_scores'] = np.rec.fromarrays(
[n for n in rankings_gene_zscores],
dtype=[(rn, 'float32') for rn in groups_order_save])
adata.uns['rank_genes_groups_gene_names'] = np.rec.fromarrays(
[n for n in rankings_gene_names],
dtype=[(rn, 'U50') for rn in groups_order_save])
logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\n')
logg.hint('added\n'
' \'rank_genes_groups_gene_names\', np.recarray to be indexed by group ids (adata.uns)\n'
' \'rank_genes_groups_gene_scores\', np.recarray to be indexed by group ids (adata.uns)')
return adata if copy else None
def _build_identifier(group_by, name, gene_counter, gene_name):
return 'rank_genes_{}_{}_{}_{}'.format(
group_by, name, gene_counter, gene_name)
| [
"pandas.DataFrame",
"numpy.abs",
"numpy.sum",
"math.sqrt",
"scipy.sparse.issparse",
"numpy.flatnonzero",
"numpy.empty",
"numpy.zeros",
"math.floor",
"numpy.isnan",
"numpy.rec.fromarrays",
"numpy.argsort",
"numpy.argpartition",
"numpy.where",
"numpy.array",
"numpy.arange",
"numpy.sqrt... | [((4116, 4267), 'numpy.array', 'np.array', (['(group_by, reference, test_type, use_raw)'], {'dtype': "[('group_by', 'U50'), ('reference', 'U50'), ('test_type', 'U50'), (\n 'use_raw', np.bool_)]"}), "((group_by, reference, test_type, use_raw), dtype=[('group_by',\n 'U50'), ('reference', 'U50'), ('test_type', 'U50'), ('use_raw', np.bool_)])\n", (4124, 4267), True, 'import numpy as np\n'), ((4930, 4959), 'numpy.zeros', 'np.zeros', (['n_groups'], {'dtype': 'int'}), '(n_groups, dtype=int)\n', (4938, 4959), True, 'import numpy as np\n'), ((5276, 5315), 'numpy.arange', 'np.arange', (['adata_comp.n_vars'], {'dtype': 'int'}), '(adata_comp.n_vars, dtype=int)\n', (5285, 5315), True, 'import numpy as np\n'), ((16989, 17100), 'numpy.rec.fromarrays', 'np.rec.fromarrays', (['[n for n in rankings_gene_zscores]'], {'dtype': "[(rn, 'float32') for rn in groups_order_save]"}), "([n for n in rankings_gene_zscores], dtype=[(rn, 'float32'\n ) for rn in groups_order_save])\n", (17006, 17100), True, 'import numpy as np\n'), ((17161, 17265), 'numpy.rec.fromarrays', 'np.rec.fromarrays', (['[n for n in rankings_gene_names]'], {'dtype': "[(rn, 'U50') for rn in groups_order_save]"}), "([n for n in rankings_gene_names], dtype=[(rn, 'U50') for\n rn in groups_order_save])\n", (17178, 17265), True, 'import numpy as np\n'), ((6395, 6424), 'numpy.zeros', 'np.zeros', (['(n_groups, n_genes)'], {}), '((n_groups, n_genes))\n', (6403, 6424), True, 'import numpy as np\n'), ((6440, 6469), 'numpy.zeros', 'np.zeros', (['(n_groups, n_genes)'], {}), '((n_groups, n_genes))\n', (6448, 6469), True, 'import numpy as np\n'), ((7927, 7980), 'numpy.sqrt', 'np.sqrt', (['(vars[igroup] / ns_group + var_rest / ns_rest)'], {}), '(vars[igroup] / ns_group + var_rest / ns_rest)\n', (7934, 7980), True, 'import numpy as np\n'), ((9588, 9617), 'numpy.zeros', 'np.zeros', (['n_groups'], {'dtype': 'int'}), '(n_groups, dtype=int)\n', (9596, 9617), True, 'import numpy as np\n'), ((9676, 9693), 'numpy.zeros', 'np.zeros', (['n_genes'], {}), '(n_genes)\n', (9684, 9693), True, 'import numpy as np\n'), ((5028, 5042), 'numpy.where', 'np.where', (['mask'], {}), '(mask)\n', (5036, 5042), True, 'import numpy as np\n'), ((5210, 5245), 'numpy.where', 'np.where', (['(groups_order == reference)'], {}), '(groups_order == reference)\n', (5218, 5245), True, 'import numpy as np\n'), ((8001, 8033), 'numpy.flatnonzero', 'np.flatnonzero', (['(denominator == 0)'], {}), '(denominator == 0)\n', (8015, 8033), True, 'import numpy as np\n'), ((8128, 8145), 'numpy.isnan', 'np.isnan', (['zscores'], {}), '(zscores)\n', (8136, 8145), True, 'import numpy as np\n'), ((8203, 8218), 'numpy.abs', 'np.abs', (['zscores'], {}), '(zscores)\n', (8209, 8218), True, 'import numpy as np\n'), ((8243, 8282), 'numpy.argpartition', 'np.argpartition', (['zscores', '(-n_genes_user)'], {}), '(zscores, -n_genes_user)\n', (8258, 8282), True, 'import numpy as np\n'), ((8329, 8359), 'numpy.argsort', 'np.argsort', (['zscores[partition]'], {}), '(zscores[partition])\n', (8339, 8359), True, 'import numpy as np\n'), ((13911, 13940), 'numpy.zeros', 'np.zeros', (['(n_groups, n_genes)'], {}), '((n_groups, n_genes))\n', (13919, 13940), True, 'import numpy as np\n'), ((14029, 14060), 'math.floor', 'floor', (['(CONST_MAX_SIZE / n_cells)'], {}), '(CONST_MAX_SIZE / n_cells)\n', (14034, 14060), False, 'from math import sqrt, floor\n'), ((8851, 8862), 'scipy.sparse.issparse', 'issparse', (['X'], {}), '(X)\n', (8859, 8862), False, 'from scipy.sparse import issparse\n'), ((9105, 9126), 'numpy.empty', 'np.empty', (['adata.n_obs'], {}), '(adata.n_obs)\n', (9113, 9126), True, 'import numpy as np\n'), ((10452, 10497), 'math.floor', 'floor', (['(CONST_MAX_SIZE / (n_active + m_active))'], {}), '(CONST_MAX_SIZE / (n_active + m_active))\n', (10457, 10497), False, 'from math import sqrt, floor\n'), ((14560, 14571), 'scipy.sparse.issparse', 'issparse', (['X'], {}), '(X)\n', (14568, 14571), False, 'from scipy.sparse import issparse\n'), ((7080, 7099), 'numpy.where', 'np.where', (['mask_rest'], {}), '(mask_rest)\n', (7088, 7099), True, 'import numpy as np\n'), ((11173, 11184), 'scipy.sparse.issparse', 'issparse', (['X'], {}), '(X)\n', (11181, 11184), False, 'from scipy.sparse import issparse\n'), ((11908, 11940), 'numpy.sum', 'np.sum', (['ranks.loc[0:n_active, :]'], {}), '(ranks.loc[0:n_active, :])\n', (11914, 11940), True, 'import numpy as np\n'), ((12061, 12119), 'math.sqrt', 'sqrt', (['(n_active * m_active * (n_active + m_active + 1) / 12)'], {}), '(n_active * m_active * (n_active + m_active + 1) / 12)\n', (12065, 12119), False, 'from math import sqrt, floor\n'), ((12199, 12214), 'numpy.abs', 'np.abs', (['zscores'], {}), '(zscores)\n', (12205, 12214), True, 'import numpy as np\n'), ((12239, 12256), 'numpy.isnan', 'np.isnan', (['zscores'], {}), '(zscores)\n', (12247, 12256), True, 'import numpy as np\n'), ((12290, 12329), 'numpy.argpartition', 'np.argpartition', (['zscores', '(-n_genes_user)'], {}), '(zscores, -n_genes_user)\n', (12305, 12329), True, 'import numpy as np\n'), ((12380, 12410), 'numpy.argsort', 'np.argsort', (['zscores[partition]'], {}), '(zscores[partition])\n', (12390, 12410), True, 'import numpy as np\n'), ((12972, 13025), 'numpy.sqrt', 'np.sqrt', (['(vars / ns[imask] + var_rest / ns_rest[imask])'], {}), '(vars / ns[imask] + var_rest / ns_rest[imask])\n', (12979, 13025), True, 'import numpy as np\n'), ((14693, 14728), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'X[:, left:right]'}), '(data=X[:, left:right])\n', (14705, 14728), True, 'import pandas as pd\n'), ((14932, 14958), 'numpy.sum', 'np.sum', (['ranks.loc[mask, :]'], {}), '(ranks.loc[mask, :])\n', (14938, 14958), True, 'import numpy as np\n'), ((15141, 15201), 'math.sqrt', 'sqrt', (['(ns[imask] * (n_cells - ns[imask]) * (n_cells + 1) / 12)'], {}), '(ns[imask] * (n_cells - ns[imask]) * (n_cells + 1) / 12)\n', (15145, 15201), False, 'from math import sqrt, floor\n'), ((15281, 15296), 'numpy.abs', 'np.abs', (['zscores'], {}), '(zscores)\n', (15287, 15296), True, 'import numpy as np\n'), ((15321, 15338), 'numpy.isnan', 'np.isnan', (['zscores'], {}), '(zscores)\n', (15329, 15338), True, 'import numpy as np\n'), ((15372, 15421), 'numpy.argpartition', 'np.argpartition', (['zscores[imask, :]', '(-n_genes_user)'], {}), '(zscores[imask, :], -n_genes_user)\n', (15387, 15421), True, 'import numpy as np\n'), ((15472, 15509), 'numpy.argsort', 'np.argsort', (['zscores[imask, partition]'], {}), '(zscores[imask, partition])\n', (15482, 15509), True, 'import numpy as np\n'), ((15964, 16024), 'numpy.sqrt', 'np.sqrt', (['(vars / ns[imask] + var_rest / (n_cells - ns[imask]))'], {}), '(vars / ns[imask] + var_rest / (n_cells - ns[imask]))\n', (15971, 16024), True, 'import numpy as np\n'), ((7872, 7891), 'numpy.where', 'np.where', (['mask_rest'], {}), '(mask_rest)\n', (7880, 7891), True, 'import numpy as np\n'), ((9965, 9984), 'numpy.where', 'np.where', (['mask_rest'], {}), '(mask_rest)\n', (9973, 9984), True, 'import numpy as np\n'), ((11507, 11545), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'X[mask, left:right]'}), '(data=X[mask, left:right])\n', (11519, 11545), True, 'import pandas as pd\n'), ((13058, 13090), 'numpy.flatnonzero', 'np.flatnonzero', (['(denominator == 0)'], {}), '(denominator == 0)\n', (13072, 13090), True, 'import numpy as np\n'), ((13303, 13314), 'scipy.sparse.issparse', 'issparse', (['X'], {}), '(X)\n', (13311, 13314), False, 'from scipy.sparse import issparse\n'), ((13568, 13589), 'numpy.empty', 'np.empty', (['adata.n_obs'], {}), '(adata.n_obs)\n', (13576, 13589), True, 'import numpy as np\n'), ((16055, 16087), 'numpy.flatnonzero', 'np.flatnonzero', (['(denominator == 0)'], {}), '(denominator == 0)\n', (16069, 16087), True, 'import numpy as np\n'), ((16300, 16311), 'scipy.sparse.issparse', 'issparse', (['X'], {}), '(X)\n', (16308, 16311), False, 'from scipy.sparse import issparse\n'), ((16565, 16586), 'numpy.empty', 'np.empty', (['adata.n_obs'], {}), '(adata.n_obs)\n', (16573, 16586), True, 'import numpy as np\n'), ((7313, 7332), 'numpy.where', 'np.where', (['mask_rest'], {}), '(mask_rest)\n', (7321, 7332), True, 'import numpy as np\n'), ((11398, 11449), 'numpy.arange', 'np.arange', ([], {'start': 'n_active', 'stop': '(n_active + m_active)'}), '(start=n_active, stop=n_active + m_active)\n', (11407, 11449), True, 'import numpy as np\n'), ((11669, 11720), 'numpy.arange', 'np.arange', ([], {'start': 'n_active', 'stop': '(n_active + m_active)'}), '(start=n_active, stop=n_active + m_active)\n', (11678, 11720), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 17 16:17:25 2017
@author: jorgemauricio
"""
# librerias
import numpy as np
# Funciones universales
arr = np.arange(11)
# desplegar
arr
# raiz cuadrada
np.sqrt(arr)
# calcular el exponencial (e^)
np.exp(arr)
# Funciones binarias requieren dos arreglos
# arreglo aleatorio (distribucion normal)
A = np.random.randn(10)
# desplegar
A
# arreglo aleatorio (distribucion normal)
B = np.random.randn(10)
# desplegar
B
# anadir
np.add(A,B)
# Maximo y minimo de dos arreglos
np.maximum(A,B)
# | [
"numpy.maximum",
"numpy.random.randn",
"numpy.arange",
"numpy.exp",
"numpy.add",
"numpy.sqrt"
] | [((179, 192), 'numpy.arange', 'np.arange', (['(11)'], {}), '(11)\n', (188, 192), True, 'import numpy as np\n'), ((227, 239), 'numpy.sqrt', 'np.sqrt', (['arr'], {}), '(arr)\n', (234, 239), True, 'import numpy as np\n'), ((272, 283), 'numpy.exp', 'np.exp', (['arr'], {}), '(arr)\n', (278, 283), True, 'import numpy as np\n'), ((376, 395), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (391, 395), True, 'import numpy as np\n'), ((458, 477), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (473, 477), True, 'import numpy as np\n'), ((503, 515), 'numpy.add', 'np.add', (['A', 'B'], {}), '(A, B)\n', (509, 515), True, 'import numpy as np\n'), ((550, 566), 'numpy.maximum', 'np.maximum', (['A', 'B'], {}), '(A, B)\n', (560, 566), True, 'import numpy as np\n')] |
import unittest
import numpy as np
import tensorflow as tf
from monopsr.datasets.kitti import instance_utils
class InstanceUtilsTest(tf.test.TestCase):
def test_get_proj_uv_map(self):
box_2d = np.asarray([0, 10, 10, 20], dtype=np.float32)
roi_size = (10, 10)
proj_uv_map = instance_utils.get_exp_proj_uv_map(box_2d, roi_size)
proj_u_row = proj_uv_map[0][0]
proj_v_col = proj_uv_map[1][:, 0]
# Check that points are in the middle of each pixel
exp_u_row = np.linspace(10.5, 19.5, 10)
exp_v_row = np.linspace(0.5, 9.5, 10)
np.testing.assert_allclose(proj_u_row, exp_u_row)
np.testing.assert_allclose(proj_v_col, exp_v_row)
def test_tf_get_proj_uv_map(self):
boxes_2d = np.asarray([
[0.0, 10.0, 10.0, 20.0],
[5.0, 5.0, 10.0, 10.0],
[0.0, 0.0, 100.0, 100.0],
], np.float32)
roi_size = (10, 10)
exp_proj_uv_maps = [instance_utils.get_exp_proj_uv_map(
box_2d, roi_size, use_pixel_centres=True)
for box_2d in boxes_2d]
# Convert to tensors
tf_boxes_2d = tf.to_float(boxes_2d)
proj_uv_map = instance_utils.tf_get_exp_proj_uv_map(tf_boxes_2d, roi_size)
with self.test_session() as sess:
proj_uv_map_out = sess.run(proj_uv_map)
# Compare with expected
np.testing.assert_allclose(proj_uv_map_out, exp_proj_uv_maps)
def test_tf_inst_xyz_map_local_to_global(self):
inst_points_local = np.random.rand(2304, 3).astype(np.float32)
viewing_angle = np.deg2rad(10.0).astype(np.float32)
centroid = np.asarray([2.5, 1.5, 15.0], dtype=np.float32)
np_inst_points_global = instance_utils.inst_points_local_to_global(
inst_points_local, viewing_angle, centroid)
xyz_maps_local = inst_points_local.reshape(1, 48, 48, 3)
tf_view_angs = np.reshape(viewing_angle, (-1, 1))
tf_centroids = np.reshape(centroid, (-1, 3))
tf_inst_xyz_map_global = instance_utils.tf_inst_xyz_map_local_to_global(
xyz_maps_local, map_roi_size=(48, 48),
view_angs=tf_view_angs, centroids=tf_centroids)
with self.test_session() as sess:
tf_inst_xyz_map_global_out = sess.run(tf_inst_xyz_map_global)
# Check equivalence
tf_inst_points_global = tf_inst_xyz_map_global_out.reshape(2304, 3)
np.testing.assert_allclose(np_inst_points_global, tf_inst_points_global)
| [
"numpy.deg2rad",
"numpy.asarray",
"monopsr.datasets.kitti.instance_utils.get_exp_proj_uv_map",
"monopsr.datasets.kitti.instance_utils.tf_inst_xyz_map_local_to_global",
"tensorflow.to_float",
"numpy.linspace",
"numpy.reshape",
"numpy.random.rand",
"numpy.testing.assert_allclose",
"monopsr.datasets.... | [((211, 256), 'numpy.asarray', 'np.asarray', (['[0, 10, 10, 20]'], {'dtype': 'np.float32'}), '([0, 10, 10, 20], dtype=np.float32)\n', (221, 256), True, 'import numpy as np\n'), ((308, 360), 'monopsr.datasets.kitti.instance_utils.get_exp_proj_uv_map', 'instance_utils.get_exp_proj_uv_map', (['box_2d', 'roi_size'], {}), '(box_2d, roi_size)\n', (342, 360), False, 'from monopsr.datasets.kitti import instance_utils\n'), ((523, 550), 'numpy.linspace', 'np.linspace', (['(10.5)', '(19.5)', '(10)'], {}), '(10.5, 19.5, 10)\n', (534, 550), True, 'import numpy as np\n'), ((571, 596), 'numpy.linspace', 'np.linspace', (['(0.5)', '(9.5)', '(10)'], {}), '(0.5, 9.5, 10)\n', (582, 596), True, 'import numpy as np\n'), ((606, 655), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['proj_u_row', 'exp_u_row'], {}), '(proj_u_row, exp_u_row)\n', (632, 655), True, 'import numpy as np\n'), ((664, 713), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['proj_v_col', 'exp_v_row'], {}), '(proj_v_col, exp_v_row)\n', (690, 713), True, 'import numpy as np\n'), ((774, 878), 'numpy.asarray', 'np.asarray', (['[[0.0, 10.0, 10.0, 20.0], [5.0, 5.0, 10.0, 10.0], [0.0, 0.0, 100.0, 100.0]]', 'np.float32'], {}), '([[0.0, 10.0, 10.0, 20.0], [5.0, 5.0, 10.0, 10.0], [0.0, 0.0, \n 100.0, 100.0]], np.float32)\n', (784, 878), True, 'import numpy as np\n'), ((1157, 1178), 'tensorflow.to_float', 'tf.to_float', (['boxes_2d'], {}), '(boxes_2d)\n', (1168, 1178), True, 'import tensorflow as tf\n'), ((1202, 1262), 'monopsr.datasets.kitti.instance_utils.tf_get_exp_proj_uv_map', 'instance_utils.tf_get_exp_proj_uv_map', (['tf_boxes_2d', 'roi_size'], {}), '(tf_boxes_2d, roi_size)\n', (1239, 1262), False, 'from monopsr.datasets.kitti import instance_utils\n'), ((1399, 1460), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['proj_uv_map_out', 'exp_proj_uv_maps'], {}), '(proj_uv_map_out, exp_proj_uv_maps)\n', (1425, 1460), True, 'import numpy as np\n'), ((1665, 1711), 'numpy.asarray', 'np.asarray', (['[2.5, 1.5, 15.0]'], {'dtype': 'np.float32'}), '([2.5, 1.5, 15.0], dtype=np.float32)\n', (1675, 1711), True, 'import numpy as np\n'), ((1745, 1835), 'monopsr.datasets.kitti.instance_utils.inst_points_local_to_global', 'instance_utils.inst_points_local_to_global', (['inst_points_local', 'viewing_angle', 'centroid'], {}), '(inst_points_local, viewing_angle,\n centroid)\n', (1787, 1835), False, 'from monopsr.datasets.kitti import instance_utils\n'), ((1934, 1968), 'numpy.reshape', 'np.reshape', (['viewing_angle', '(-1, 1)'], {}), '(viewing_angle, (-1, 1))\n', (1944, 1968), True, 'import numpy as np\n'), ((1992, 2021), 'numpy.reshape', 'np.reshape', (['centroid', '(-1, 3)'], {}), '(centroid, (-1, 3))\n', (2002, 2021), True, 'import numpy as np\n'), ((2055, 2193), 'monopsr.datasets.kitti.instance_utils.tf_inst_xyz_map_local_to_global', 'instance_utils.tf_inst_xyz_map_local_to_global', (['xyz_maps_local'], {'map_roi_size': '(48, 48)', 'view_angs': 'tf_view_angs', 'centroids': 'tf_centroids'}), '(xyz_maps_local, map_roi_size\n =(48, 48), view_angs=tf_view_angs, centroids=tf_centroids)\n', (2101, 2193), False, 'from monopsr.datasets.kitti import instance_utils\n'), ((2444, 2516), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['np_inst_points_global', 'tf_inst_points_global'], {}), '(np_inst_points_global, tf_inst_points_global)\n', (2470, 2516), True, 'import numpy as np\n'), ((979, 1055), 'monopsr.datasets.kitti.instance_utils.get_exp_proj_uv_map', 'instance_utils.get_exp_proj_uv_map', (['box_2d', 'roi_size'], {'use_pixel_centres': '(True)'}), '(box_2d, roi_size, use_pixel_centres=True)\n', (1013, 1055), False, 'from monopsr.datasets.kitti import instance_utils\n'), ((1543, 1566), 'numpy.random.rand', 'np.random.rand', (['(2304)', '(3)'], {}), '(2304, 3)\n', (1557, 1566), True, 'import numpy as np\n'), ((1610, 1626), 'numpy.deg2rad', 'np.deg2rad', (['(10.0)'], {}), '(10.0)\n', (1620, 1626), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
'''
!pip3 install -U tensorflow-gpu keras numpy scipy
'''
import os
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from scipy.io import wavfile
from scipy.linalg.blas import daxpy
from keras import backend as K
from keras.callbacks import EarlyStopping
from keras.layers import Dense, Embedding, GRU
from keras.models import Sequential
from keras.optimizers import RMSprop
# Load a short audio file
(breath_sr, breath_wav) = wavfile.read('11_Dialogue_Class_Rank.wav')
# Retain just one stereo track
data = breath_wav[:,0]
# GPUs to use (0-1)
gpu_count = 1
# Number of time-domain samples to convert to frequency domain in one window
window_size = 1024
# Number of time-domain samples between successive windows
slide = 256
# Frequency domain dimension
freq_dim = 2 * (1 + (window_size // 2)) # x2 because of real-imag parts
# Number of successive freq domain windows to predict the next window from
sequence_len = 25
# Dimension of GRU units
gru_dim = 1024
# Optimizer learning rate
learning_rate=0.1
specgram = plt.specgram(data, NFFT=window_size, Fs=slide)
print("Spectrum of input audio")
plt.show()
# Hanning window weights to apply to time-domain sample windows
# Normalize weights to sum to 1, for later convenience
window_weight = slide * np.hanning(window_size) / np.sum(np.hanning(window_size))
n = len(data)
# Data, sliced into a series of windows, and weighted
weighted_slices = data[np.arange(window_size)[None, :] + slide * np.arange(1 + (n - window_size) // slide)[:, None]] * window_weight
del data
# Apply the FFT to convert to a sequence of frequency-domain windows
freq_slices = np.fft.rfft(weighted_slices)
del weighted_slices
# FFT outputs (real,imag) 64-bit pairs. Flatten them to two separate 32-bit values
freq_slices_flattened = np.apply_along_axis(lambda a: a.view('(2,)float').flatten(), 1, freq_slices).astype('float32')
del freq_slices
# Select devices for training based on GPU availability
if gpu_count > 0:
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
session = tf.Session(config=config)
K.set_session(session)
device1 = '/gpu:0'
if gpu_count > 1:
device2 = '/gpu:1'
else:
device2 = '/gpu:0'
else:
device1 = '/cpu:0'
device2 = '/cpu:0'
model = Sequential()
with tf.device(device1):
model.add(GRU(gru_dim,
input_shape=(sequence_len, freq_dim)))
with tf.device(device2):
model.add(Dense(freq_dim, activation=None))
model.compile(optimizer=RMSprop(lr=learning_rate),
loss='mean_absolute_error')
model.summary()
# Initialize predicted audio out with the first few input windows
predicted_freq_slices = freq_slices_flattened[:sequence_len]
# Build up batches of input windows, paired with next window as prediction target
input_freq_slices = []
next_freq_slices = []
for i in range(0, len(freq_slices_flattened) - sequence_len - 1):
input_freq_slices.append(freq_slices_flattened[i : i + sequence_len])
next_freq_slices.append(freq_slices_flattened[i + sequence_len])
del freq_slices_flattened
# Convert them to numpy arrays for future use
input_freq_slices = np.array(input_freq_slices)
next_freq_slices = np.array(next_freq_slices)
# Pick most (input,next) pairs as training; rest for validation
shuffled_indices = np.random.permutation(len(input_freq_slices))
training_size = int(0.95 * len(input_freq_slices))
train_indices = shuffled_indices[:training_size]
val_indices = shuffled_indices[training_size:]
input_freq_slices_train = input_freq_slices[train_indices]
input_freq_slices_val = input_freq_slices[val_indices]
next_freq_slices_train = next_freq_slices[train_indices]
next_freq_slices_val = next_freq_slices[val_indices]
early_stopping = EarlyStopping(patience=10, verbose=1)
model.fit(input_freq_slices_train,
next_freq_slices_train,
epochs=100,
batch_size=64,
shuffle=True,
validation_data=(input_freq_slices_val, next_freq_slices_val),
verbose=2,
callbacks=[early_stopping])
# Starting with initial part of input audio, predict many next windows
for i in range(0, 1000):
pred_next_slice = model.predict(predicted_freq_slices[None,-sequence_len:])
predicted_freq_slices = np.append(predicted_freq_slices, pred_next_slice, axis=0)
# Convert back to (real,imag) complex representation in freq domain
predicted_freq_slices_unflattened = \
np.reshape(predicted_freq_slices, (-1, freq_dim//2, 2)).view('complex64').reshape(-1, freq_dim//2).astype('complex128')
# Apply inverse FFT to get back time-domain windows
pred_time_slices = np.fft.irfft(predicted_freq_slices_unflattened)
# Reassemble full time domain signal by adding overlapping windows
reassembled = np.zeros(window_size + (len(pred_time_slices) - 1) * slide)
for i in range(0, len(pred_time_slices)):
daxpy(pred_time_slices[i], reassembled, offy=slide * i)
# Plot some of the first generated time-domain data as a check
plot_sample_base = sequence_len * slide
plt.plot(reassembled[plot_sample_base:plot_sample_base + window_size])
plt.show()
# Scale time-domain data to have max at 32767, for 16-bit wav output
reassembled_scale = np.max(np.abs(reassembled))
reassembled = reassembled * (32767 / reassembled_scale)
print("Spectrum of output audio")
specgram = plt.specgram(reassembled, NFFT=window_size, Fs=slide)
plt.show()
# Overwrite output to out.wav
out_file = 'out.wav'
if os.path.isfile(out_file):
os.remove(out_file)
wavfile.write(out_file, breath_sr, reassembled.astype(np.int16))
| [
"os.remove",
"numpy.fft.rfft",
"numpy.abs",
"scipy.io.wavfile.read",
"tensorflow.ConfigProto",
"os.path.isfile",
"numpy.arange",
"matplotlib.pyplot.specgram",
"numpy.fft.irfft",
"keras.layers.GRU",
"numpy.append",
"numpy.reshape",
"numpy.hanning",
"matplotlib.pyplot.show",
"keras.backend... | [((480, 522), 'scipy.io.wavfile.read', 'wavfile.read', (['"""11_Dialogue_Class_Rank.wav"""'], {}), "('11_Dialogue_Class_Rank.wav')\n", (492, 522), False, 'from scipy.io import wavfile\n'), ((1069, 1115), 'matplotlib.pyplot.specgram', 'plt.specgram', (['data'], {'NFFT': 'window_size', 'Fs': 'slide'}), '(data, NFFT=window_size, Fs=slide)\n', (1081, 1115), True, 'import matplotlib.pyplot as plt\n'), ((1149, 1159), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1157, 1159), True, 'import matplotlib.pyplot as plt\n'), ((1656, 1684), 'numpy.fft.rfft', 'np.fft.rfft', (['weighted_slices'], {}), '(weighted_slices)\n', (1667, 1684), True, 'import numpy as np\n'), ((2306, 2318), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2316, 2318), False, 'from keras.models import Sequential\n'), ((3172, 3199), 'numpy.array', 'np.array', (['input_freq_slices'], {}), '(input_freq_slices)\n', (3180, 3199), True, 'import numpy as np\n'), ((3219, 3245), 'numpy.array', 'np.array', (['next_freq_slices'], {}), '(next_freq_slices)\n', (3227, 3245), True, 'import numpy as np\n'), ((3766, 3803), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': '(10)', 'verbose': '(1)'}), '(patience=10, verbose=1)\n', (3779, 3803), False, 'from keras.callbacks import EarlyStopping\n'), ((4646, 4693), 'numpy.fft.irfft', 'np.fft.irfft', (['predicted_freq_slices_unflattened'], {}), '(predicted_freq_slices_unflattened)\n', (4658, 4693), True, 'import numpy as np\n'), ((5048, 5118), 'matplotlib.pyplot.plot', 'plt.plot', (['reassembled[plot_sample_base:plot_sample_base + window_size]'], {}), '(reassembled[plot_sample_base:plot_sample_base + window_size])\n', (5056, 5118), True, 'import matplotlib.pyplot as plt\n'), ((5119, 5129), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5127, 5129), True, 'import matplotlib.pyplot as plt\n'), ((5352, 5405), 'matplotlib.pyplot.specgram', 'plt.specgram', (['reassembled'], {'NFFT': 'window_size', 'Fs': 'slide'}), '(reassembled, NFFT=window_size, Fs=slide)\n', (5364, 5405), True, 'import matplotlib.pyplot as plt\n'), ((5406, 5416), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5414, 5416), True, 'import matplotlib.pyplot as plt\n'), ((5472, 5496), 'os.path.isfile', 'os.path.isfile', (['out_file'], {}), '(out_file)\n', (5486, 5496), False, 'import os\n'), ((2011, 2027), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (2025, 2027), True, 'import tensorflow as tf\n'), ((2083, 2108), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (2093, 2108), True, 'import tensorflow as tf\n'), ((2113, 2135), 'keras.backend.set_session', 'K.set_session', (['session'], {}), '(session)\n', (2126, 2135), True, 'from keras import backend as K\n'), ((2324, 2342), 'tensorflow.device', 'tf.device', (['device1'], {}), '(device1)\n', (2333, 2342), True, 'import tensorflow as tf\n'), ((2434, 2452), 'tensorflow.device', 'tf.device', (['device2'], {}), '(device2)\n', (2443, 2452), True, 'import tensorflow as tf\n'), ((4282, 4339), 'numpy.append', 'np.append', (['predicted_freq_slices', 'pred_next_slice'], {'axis': '(0)'}), '(predicted_freq_slices, pred_next_slice, axis=0)\n', (4291, 4339), True, 'import numpy as np\n'), ((4886, 4941), 'scipy.linalg.blas.daxpy', 'daxpy', (['pred_time_slices[i]', 'reassembled'], {'offy': '(slide * i)'}), '(pred_time_slices[i], reassembled, offy=slide * i)\n', (4891, 4941), False, 'from scipy.linalg.blas import daxpy\n'), ((5229, 5248), 'numpy.abs', 'np.abs', (['reassembled'], {}), '(reassembled)\n', (5235, 5248), True, 'import numpy as np\n'), ((5502, 5521), 'os.remove', 'os.remove', (['out_file'], {}), '(out_file)\n', (5511, 5521), False, 'import os\n'), ((1304, 1327), 'numpy.hanning', 'np.hanning', (['window_size'], {}), '(window_size)\n', (1314, 1327), True, 'import numpy as np\n'), ((1337, 1360), 'numpy.hanning', 'np.hanning', (['window_size'], {}), '(window_size)\n', (1347, 1360), True, 'import numpy as np\n'), ((2358, 2408), 'keras.layers.GRU', 'GRU', (['gru_dim'], {'input_shape': '(sequence_len, freq_dim)'}), '(gru_dim, input_shape=(sequence_len, freq_dim))\n', (2361, 2408), False, 'from keras.layers import Dense, Embedding, GRU\n'), ((2468, 2500), 'keras.layers.Dense', 'Dense', (['freq_dim'], {'activation': 'None'}), '(freq_dim, activation=None)\n', (2473, 2500), False, 'from keras.layers import Dense, Embedding, GRU\n'), ((2527, 2552), 'keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (2534, 2552), False, 'from keras.optimizers import RMSprop\n'), ((1454, 1476), 'numpy.arange', 'np.arange', (['window_size'], {}), '(window_size)\n', (1463, 1476), True, 'import numpy as np\n'), ((1496, 1537), 'numpy.arange', 'np.arange', (['(1 + (n - window_size) // slide)'], {}), '(1 + (n - window_size) // slide)\n', (1505, 1537), True, 'import numpy as np\n'), ((4452, 4509), 'numpy.reshape', 'np.reshape', (['predicted_freq_slices', '(-1, freq_dim // 2, 2)'], {}), '(predicted_freq_slices, (-1, freq_dim // 2, 2))\n', (4462, 4509), True, 'import numpy as np\n')] |
"""
Extract MADIS METAR QC information to the database
"""
from __future__ import print_function
import os
import sys
import datetime
import numpy as np
import pytz
from netCDF4 import chartostring
from pyiem.datatypes import temperature
from pyiem.util import get_dbconn, ncopen
def figure(val, qcval):
if qcval > 1000:
return 'Null'
tmpf = temperature(val, 'K').value("F")
qcval = temperature(val + qcval, 'K').value("F")
return qcval - tmpf
def figure_alti(val, qcval):
if qcval > 100000.:
return 'Null'
return qcval / 100.0
def check(val):
if val > 200000.:
return 'Null'
return val
def main():
"""Go Main Go"""
pgconn = get_dbconn('iem')
icursor = pgconn.cursor()
now = datetime.datetime.utcnow() - datetime.timedelta(hours=3)
fn = "/mesonet/data/madis/metar/%s.nc" % (now.strftime("%Y%m%d_%H00"), )
table = "current_qc"
if not os.path.isfile(fn):
sys.exit()
nc = ncopen(fn)
ids = chartostring(nc.variables["stationName"][:])
nc_tmpk = nc.variables["temperature"]
nc_dwpk = nc.variables["dewpoint"]
nc_alti = nc.variables["altimeter"]
tmpkqcd = nc.variables["temperatureQCD"]
dwpkqcd = nc.variables["dewpointQCD"]
altiqcd = nc.variables["altimeterQCD"]
for j in range(ids.shape[0]):
sid = ids[j]
if len(sid) < 4:
continue
if sid[0] == "K":
ts = datetime.datetime(1970, 1, 1) + datetime.timedelta(
seconds=int(nc.variables["timeObs"][j]))
ts = ts.replace(tzinfo=pytz.utc)
(tmpf, tmpf_qc_av, tmpf_qc_sc) = ('Null', 'Null', 'Null')
(dwpf, dwpf_qc_av, dwpf_qc_sc) = ('Null', 'Null', 'Null')
(alti, alti_qc_av, alti_qc_sc) = ('Null', 'Null', 'Null')
if (not np.ma.is_masked(nc_tmpk[j]) and
not np.ma.is_masked(tmpkqcd[j, 0]) and
not np.ma.is_masked(tmpkqcd[j, 6])):
tmpf = check(temperature(nc_tmpk[j], 'K').value('F'))
tmpf_qc_av = figure(nc_tmpk[j], tmpkqcd[j, 0])
tmpf_qc_sc = figure(nc_tmpk[j], tmpkqcd[j, 6])
if (not np.ma.is_masked(nc_dwpk[j]) and
not np.ma.is_masked(dwpkqcd[j, 0]) and
not np.ma.is_masked(dwpkqcd[j, 6])):
dwpf = check(temperature(nc_dwpk[j], 'K').value('F'))
dwpf_qc_av = figure(nc_dwpk[j], dwpkqcd[j, 0])
dwpf_qc_sc = figure(nc_dwpk[j], dwpkqcd[j, 6])
if not np.ma.is_masked(nc_alti[j]):
alti = check(nc_alti[j] / 100.0 * 0.0295298)
alti_qc_av = figure_alti(alti, altiqcd[j, 0] * 0.0295298)
alti_qc_sc = figure_alti(alti, altiqcd[j, 6] * 0.0295298)
sql = """
UPDATE %s SET tmpf = %s, tmpf_qc_av = %s,
tmpf_qc_sc = %s, dwpf = %s, dwpf_qc_av = %s,
dwpf_qc_sc = %s, alti = %s, alti_qc_av = %s,
alti_qc_sc = %s, valid = '%s' WHERE
station = '%s'
""" % (table, tmpf,
tmpf_qc_av, tmpf_qc_sc, dwpf, dwpf_qc_av,
dwpf_qc_sc, alti, alti_qc_av, alti_qc_sc,
ts.strftime("%Y-%m-%d %H:%M+00"), sid[1:])
sql = sql.replace("--", "Null").replace("nan", "Null")
try:
icursor.execute(sql)
except Exception as exp:
print(exp)
print(sql)
nc.close()
icursor.close()
pgconn.commit()
pgconn.close()
if __name__ == '__main__':
main()
| [
"pyiem.util.ncopen",
"pyiem.datatypes.temperature",
"datetime.datetime",
"datetime.datetime.utcnow",
"os.path.isfile",
"datetime.timedelta",
"netCDF4.chartostring",
"numpy.ma.is_masked",
"pyiem.util.get_dbconn",
"sys.exit"
] | [((699, 716), 'pyiem.util.get_dbconn', 'get_dbconn', (['"""iem"""'], {}), "('iem')\n", (709, 716), False, 'from pyiem.util import get_dbconn, ncopen\n'), ((979, 989), 'pyiem.util.ncopen', 'ncopen', (['fn'], {}), '(fn)\n', (985, 989), False, 'from pyiem.util import get_dbconn, ncopen\n'), ((1001, 1045), 'netCDF4.chartostring', 'chartostring', (["nc.variables['stationName'][:]"], {}), "(nc.variables['stationName'][:])\n", (1013, 1045), False, 'from netCDF4 import chartostring\n'), ((758, 784), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (782, 784), False, 'import datetime\n'), ((787, 814), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(3)'}), '(hours=3)\n', (805, 814), False, 'import datetime\n'), ((930, 948), 'os.path.isfile', 'os.path.isfile', (['fn'], {}), '(fn)\n', (944, 948), False, 'import os\n'), ((958, 968), 'sys.exit', 'sys.exit', ([], {}), '()\n', (966, 968), False, 'import sys\n'), ((362, 383), 'pyiem.datatypes.temperature', 'temperature', (['val', '"""K"""'], {}), "(val, 'K')\n", (373, 383), False, 'from pyiem.datatypes import temperature\n'), ((407, 436), 'pyiem.datatypes.temperature', 'temperature', (['(val + qcval)', '"""K"""'], {}), "(val + qcval, 'K')\n", (418, 436), False, 'from pyiem.datatypes import temperature\n'), ((1442, 1471), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (1459, 1471), False, 'import datetime\n'), ((2573, 2600), 'numpy.ma.is_masked', 'np.ma.is_masked', (['nc_alti[j]'], {}), '(nc_alti[j])\n', (2588, 2600), True, 'import numpy as np\n'), ((1846, 1873), 'numpy.ma.is_masked', 'np.ma.is_masked', (['nc_tmpk[j]'], {}), '(nc_tmpk[j])\n', (1861, 1873), True, 'import numpy as np\n'), ((1902, 1932), 'numpy.ma.is_masked', 'np.ma.is_masked', (['tmpkqcd[j, 0]'], {}), '(tmpkqcd[j, 0])\n', (1917, 1932), True, 'import numpy as np\n'), ((1961, 1991), 'numpy.ma.is_masked', 'np.ma.is_masked', (['tmpkqcd[j, 6]'], {}), '(tmpkqcd[j, 6])\n', (1976, 1991), True, 'import numpy as np\n'), ((2210, 2237), 'numpy.ma.is_masked', 'np.ma.is_masked', (['nc_dwpk[j]'], {}), '(nc_dwpk[j])\n', (2225, 2237), True, 'import numpy as np\n'), ((2266, 2296), 'numpy.ma.is_masked', 'np.ma.is_masked', (['dwpkqcd[j, 0]'], {}), '(dwpkqcd[j, 0])\n', (2281, 2296), True, 'import numpy as np\n'), ((2325, 2355), 'numpy.ma.is_masked', 'np.ma.is_masked', (['dwpkqcd[j, 6]'], {}), '(dwpkqcd[j, 6])\n', (2340, 2355), True, 'import numpy as np\n'), ((2023, 2051), 'pyiem.datatypes.temperature', 'temperature', (['nc_tmpk[j]', '"""K"""'], {}), "(nc_tmpk[j], 'K')\n", (2034, 2051), False, 'from pyiem.datatypes import temperature\n'), ((2387, 2415), 'pyiem.datatypes.temperature', 'temperature', (['nc_dwpk[j]', '"""K"""'], {}), "(nc_dwpk[j], 'K')\n", (2398, 2415), False, 'from pyiem.datatypes import temperature\n')] |
import numpy as np
import iminuit
import matplotlib.pyplot as plt
class LogisticRegression(object):
"""
Class for fitting, predicting and estimating error on logistic regression
Quick usage:
- instantiate: m = LogisticRegression()
- fit: m.fit(X, y)
- predict: m.predict(X)
- errors: dwn, up = m.estimate_errors(X)
Attributes:
:param fit_intercept: whether or not to fit the include the intercept/bias in the fit
:param l1: L1-regularization parameter. Multiplies the sum of absolute parameters
:param l2: L2-regularization parameter. Multiplies half the sum of squared parameters
:param minuit: instance of the Minuit minimization class
:param X: input features for fitting
:param y: targets for fitting
"""
def __init__(self, fit_intercept=True, l1=0, l2=0):
"""
Instantiate a logistic regression
:param fit_intercept: whether or not to fit the include the intercept/bias in the fit
:param l1: L1-regularization parameter. Multiplies the sum of absolute parameters
:param l2: L2-regularization parameter. Multiplies half the sum of squared parameters
"""
# pre-fit attributes
self.fit_intercept = fit_intercept
self.l1 = l1
self.l2 = l2
# post-fit attributes
self.minuit = None
# training data
self.X = None
self.y = None
@property
def parameters(self):
"""
Fit parameters, a.k.a. weights
"""
if self.minuit is None:
raise RuntimeError("Fit before access to fit parameters")
return self.minuit.np_values()
@property
def errors(self):
"""
Errors of the fit parameters
Square root of the diagonal of the covariance matrix
"""
if self.minuit is None:
raise RuntimeError("Fit before access to fit parameters")
return np.sqrt(np.diag(self.minuit.np_matrix()))
@property
def cvr_mtx(self):
"""
Covariance matrix of the fit parameters
"""
if self.minuit is None:
raise RuntimeError("Fit before access to fit parameters")
return self.minuit.np_matrix()
@staticmethod
def logistic(x):
"""
Calculate the logistic function, a.k.a. sigmoid, given an input
:param x: [numpy nD array] input
:return: [numpy nD array] logistic function of the input
"""
return 1. / (1. + np.exp(-x))
@staticmethod
def negativeLogPosterior(p, X, y, l1, l2):
"""
Calculate the negative of the log of the posterior
distribution over parameters given targets and
features.
A combination of the negative log likelihood for
classification (i.e. the log-loss), and l1 and/or l2
regularization
:param p: [numpy 1D array] parameter vector
:param X: [numpy 2D array] feature matrix
:param y: [numpy 1D array] target vector
:param l1: [float or numpy 1D array] l1 regularization parameter
:param l2: [float or numpy 2D array] l2 regularization parameter
:return: negative log posterior of parameters given data
"""
# predictions on train set with given parameters
y_pred = LogisticRegression.logistic(X.dot(p))
# negative log-likelihood of predictions
nll = -np.sum(y*np.log(y_pred+1e-16) + (1-y)*np.log(1-y_pred+1e-16))
if l1 == 0 and l2 == 0:
return nll
# negative log-prior of parameters
nlp = np.sum(np.abs(l1 * p)) + 0.5 * np.sum(l2 * p**2)
return nll + nlp
@staticmethod
def gradientNegativeLogPosterior(p, X, y, l1, l2):
"""
Calculate the gradient of the negative of the
log of the posterior distribution over parameters
given targets and features
:param p: [numpy 1D array] parameter vector
:param X: [numpy 2D array] feature matrix
:param y: [numpy 1D array] target vector
:param l1: [float or numpy 1D array] l1 regularization parameter
:param l2: [float or numpy 2D array] l2 regularization parameter
:return: gradient with respect to the parameters of the negative
log posterior
"""
# predictions on train set with given parameters
y_pred = LogisticRegression.logistic(X.dot(p))
# gradient negative log-likelihood
gnll = np.sum((y_pred-y)[:,np.newaxis] * X, axis=0)
if l1 == 0 and l2 == 0:
return gnll
# gradient of negative log-prior
gnlp = l1 * np.sign(p) + l2 * p
return gnll + gnlp
def fit(self, X, y,
initial_parameters=None, initial_step_sizes=None,
parameter_limits=None, parameter_fixes=None,
print_level=0,
max_function_calls=10000, n_splits=1):
"""
Fit logistic regression to feature matrix X and target vector y
If you call this method more than once, you resume a fit with
parameters, step sizes, limits and fixes as they were at the end
of the previous fit, for each that is given as None as an argument
:param X: [numpy.ndarray shape (n_data, n_features,)] feature matrix
:param y: [numpy.ndarray shape (n_data,)] target vector
:param initial_parameters: [sequence of numbers, length n_features+1] initial parameter vector
A single number is promoted to all parameters
None means all zeros for a first fit, or resume from previous fit
:param initial_step_sizes: [sequence of numbers, length n_features+1] initial minimization
parameter step sizes. A single number is promoted to all parameters
Usually, the choice is not important. In the worst case, iminuit will use a few more
function evaluations to find the minimum
None means all ones for a first fit, or resume from previous fit
:param parameter_limits: [sequence of tuples of numbers, length n_features+1] lower and upper bounds
for parameters. Use None within the sequence for no bound for that parameter or False for no bounds
for all parameters, and use None to take the limits from the previous fit
:param parameter_fixes: [sequence of booleans, length n_features+1] Whether to fix a parameter to the
initial value
Use False not to fix any parameters and None to take the fixes from the previous fit
:param print_level: 0 is quiet. 1 print out fit results. 2 paranoid. 3 really paranoid
:param max_function_calls: [integer] maximum number of function calls
:param n_splits: [integer] split fit in to n_splits runs. Fitting stops when it found the function
minimum to be valid or n_calls is reached
"""
self.X, self.y = self._check_inputs(X, y)
if self.minuit is None or\
initial_parameters is not None or\
initial_step_sizes is not None or\
parameter_limits is not None or\
parameter_fixes is not None:
if initial_parameters is None:
if self.minuit is not None:
initial_parameters = self.parameters
else:
initial_parameters = [0]*self.X.shape[1]
elif isinstance(initial_parameters, (float, int,)):
initial_parameters = [initial_parameters]*self.X.shape[1]
elif hasattr(initial_parameters, "__iter__"):
initial_parameters = np.array(initial_parameters, dtype=float)
if initial_parameters.shape[0] != self.X.shape[1]:
raise ValueError("Dimensions of features X and initial parameters don't match")
else:
raise ValueError("Initial parameters not understood")
if initial_step_sizes is not None:
if isinstance(initial_step_sizes, (float, int,)):
initial_step_sizes = [initial_step_sizes]*len(initial_parameters)
elif not hasattr(initial_step_sizes, "__iter__") or isinstance(initial_step_sizes, str):
raise ValueError("Step sizes should be a sequence of numbers")
elif not all([isinstance(s, (float, int,)) for s in initial_step_sizes]):
raise ValueError("Step sizes should be a sequence of numbers")
elif len(initial_step_sizes) != len(initial_parameters):
raise ValueError("{:d} step sizes given for {:d} parameters".format(len(initial_step_sizes), len(initial_parameters)))
elif self.minuit is not None:
initial_step_sizes = [state['error'] for state in self.minuit.get_param_states()]
else:
initial_step_sizes = 1
if parameter_limits == False:
parameter_limits = None
elif parameter_limits is not None:
if not hasattr(parameter_limits, "__iter__") or isinstance(initial_step_sizes, str):
raise ValueError("Limits should be a sequence of range tuples")
if not all([l is None or (isinstance(l,(tuple,)) and len(l)==2 and\
(l[0] is None or isinstance(l[0],(int,float,))) and\
(l[1] is None or isinstance(l[1],(int,float,)))) for l in parameter_limits]):
raise ValueError("A limit should be a range tuple or None")
if len(parameter_limits) != len(initial_parameters):
raise ValueError("{:d} limits given for {:d} parameters".format(len(parameter_limits), len(initial_parameters)))
elif self.minuit is not None:
parameter_limits = [(state['lower_limit'], state['upper_limit'],) for state in self.minuit.get_param_states()]
if parameter_fixes == False:
parameter_fixes = None
elif parameter_fixes is not None:
if not hasattr(parameter_fixes, "__iter__") or isinstance(initial_step_sizes, str):
raise ValueError("Fixes should be a sequence of booleans")
if not all([isinstance(f, (bool, int, float,)) for f in parameter_fixes]):
raise ValueError("A fix should be True or False")
if len(parameter_fixes) != len(initial_parameters):
raise ValueError("{:d} fixes given for {:d} parameters".format(len(parameter_fixes), len(initial_parameters)))
parameter_fixes = [bool(f) for f in parameter_fixes]
elif self.minuit is not None:
parameter_fixes = [state['is_fixed'] for state in self.minuit.get_param_states()]
# define function to be minimized
fcn = lambda p: self.negativeLogPosterior(p, self.X, self.y, self.l1, self.l2)
# define the gradient of the function to be minimized
grd = lambda p: self.gradientNegativeLogPosterior(p, self.X, self.y, self.l1, self.l2)
# initiate minuit minimizer
self.minuit = iminuit.Minuit.from_array_func(fcn=fcn,
start=initial_parameters, error=initial_step_sizes,
limit=parameter_limits, fix=parameter_fixes,
throw_nan=True, print_level=print_level,
grad=grd, use_array_call=True, errordef=0.5, pedantic=False)
self.minuit.print_level = print_level
# minimize with migrad
fmin, _ = self.minuit.migrad(ncall=max_function_calls, nsplit=n_splits, resume=True)
# check validity of minimum
if not fmin.is_valid:
if not fmin.has_covariance or not fmin.has_accurate_covar or not fmin.has_posdef_covar or \
fmin.has_made_posdef_covar or fmin.hesse_failed:
# It is known that migrad sometimes fails calculating the covariance matrix,
# but succeeds on a second try
self.minuit.set_strategy(2)
fmin, _ = self.minuit.migrad(ncall=max_function_calls, nsplit=n_splits, resume=True)
if not fmin.is_valid:
raise RuntimeError("Problem encountered with minimization.\n%s" % (str(fmin)))
self.minuit.hesse()
def predict(self, X):
"""
Calculate the logistic scores given features X
:param X: [numpy 2D array] feature matrix
:return: [numpy 1D array] logistic regression scores
"""
X, _ = self._check_inputs(X, None)
y_pred = LogisticRegression.logistic(X.dot(self.parameters))
return y_pred
def estimate_errors(self, X, nstddevs=1):
"""
Estimate upper and lower uncertainties
This method is based on error intervals, where every standard
deviation interval in parameter space is the multi-dimensional
range where the negative log-likelihood goes up by 1/2
The lower and upper errors are the maximum and minimum amount
respectively that the logistic function goes down or up when
taking parameters within this interval
:param X: [numpy 2D array] feature matrix
:param nstddevs: [int] error contour
:return: [numpy 1D arrays] upper and lower error estimates
"""
X, _ = self._check_inputs(X, None)
mid = X.dot(self.parameters)
delta = np.array([np.sqrt(np.abs(np.dot(u,np.dot(self.cvr_mtx, u)))) for u in X], dtype=float)
y_pred = LogisticRegression.logistic(mid)
upper = LogisticRegression.logistic(mid + nstddevs * delta) - y_pred
lower = y_pred - LogisticRegression.logistic(mid - nstddevs * delta)
return lower, upper
def estimate_errors_sampling(self, X, n_samples=10000, return_covariance=False):
"""
Estimate uncertainties via sampling the posterior
Calculates the non-central variance for each input data point
by sampling parameters from an approximate posterior (a multivariate
normal distribution)
:param X: [numpy 2D array] feature matrix
:param n_samples: [int] number of samples to draw
:param return_covariance: [boolean] return only error estitimes (False),
or full covariance matrix (True) of the estimates
:return: covariance matrix of error estimates if return_covariance
is True, otherwise the upper and lower error estimates (symmetric)
"""
X, _ = self._check_inputs(X, None)
if not isinstance(n_samples, (int,)):
raise ValueError("Non-integer number of samples provided")
sampled_parameters = np.random.multivariate_normal(self.parameters, self.cvr_mtx, n_samples).T # shape (npars, nsamples,)
fitted_parameters = np.tile(self.parameters, (n_samples, 1)).T # shape (npars, nsamples,)
sigmoid_sampled_parameters = LogisticRegression.logistic(X.dot(sampled_parameters)) # shape (ndata, nsamples,)
sigmoid_fitted_parameters = LogisticRegression.logistic(X.dot(fitted_parameters)) # shape (ndata, nsamples,)
sigmoid_variation = sigmoid_sampled_parameters - sigmoid_fitted_parameters # shape (ndata, nsamples,)
if return_covariance == True:
covar = np.dot(sigmoid_variation, sigmoid_variation.T) / n_samples # shape (ndata, ndata,)
return covar
else:
var = np.mean(np.square(sigmoid_variation), axis = 1) # shape (ndata,)
symmetric_error = np.sqrt(np.abs(var))
return symmetric_error
def estimate_errors_linear(self, X, n_stddevs=1, return_covariance=False):
"""
Estimate uncertainties via linear error propagation
Calculates the non-central variance for each input data point
by approximating the logistic function linearly.
This method is fast, but may be inaccurate
:param X: [numpy 2D array] feature matrix
:param n_stddevs: [int] number of standard deviations to estimate gradient on
None means take exact gradient
:return: covariance matrix of error estimates if return_covariance
is True, otherwise the upper and lower error estimates (symmetric)
"""
X, _ = self._check_inputs(X, None)
fcn = lambda p: LogisticRegression.logistic(X.dot(p))
if isinstance(n_stddevs, (float, int,)):
gradients = np.array([(fcn(self.parameters + n_stddevs*u) - fcn(self.parameters - n_stddevs*u)) \
/(2 * np.sum(n_stddevs*u)) for u in np.diag(np.sqrt(np.diag(self.cvr_mtx)))]).T
else:
gradients = X * (fcn(self.parameters) * (1 - fcn(self.parameters)))[:, np.newaxis] # shape (ndata, npars,)
if return_covariance == True:
covar = np.dot(gradients, np.dot(self.cvr_mtx, gradients.T)) # shape (ndata, ndata,)
return covar
else:
symmetric_error = np.sqrt(np.abs([np.dot(g, np.dot(self.cvr_mtx, g)) for g in gradients]))
return symmetric_error
def _check_inputs(self, X, y=None):
"""
Check inputs for matching dimensions and convert to numpy arrays
:param X: feature matrix
:param y: target vector
:return: X, y as numpy arrays
"""
X = np.array(X)
if X.ndim > 2:
raise ValueError("Dimension of features X bigger than 2 not supported")
elif X.ndim == 1:
X = X[:, np.newaxis]
if self.minuit is not None:
p = self.minuit.np_values()
if X.shape[1] + int(self.fit_intercept) == p.shape[0]:
pass
elif X.shape[1] == 1 and X.shape[0] + int(self.fit_intercept) == p.shape[0]:
X = X.T
else:
raise ValueError("Dimension of X do not match dimensions of parameters")
if self.fit_intercept:
X = np.concatenate((X, np.ones((X.shape[0], 1), dtype=float)), axis=1)
if y is not None:
y = (np.atleast_1d(y) != 0).astype(int)
if y.ndim > 1:
raise ValueError("Dimension of target y bigger than 1 not supported")
if X.shape[0] != y.shape[0]:
raise ValueError("Number of data points in features X and target y don't match")
return X, y
| [
"iminuit.Minuit.from_array_func",
"numpy.sum",
"numpy.abs",
"numpy.log",
"numpy.square",
"numpy.ones",
"numpy.sign",
"numpy.array",
"numpy.tile",
"numpy.random.multivariate_normal",
"numpy.exp",
"numpy.dot",
"numpy.atleast_1d",
"numpy.diag"
] | [((4504, 4551), 'numpy.sum', 'np.sum', (['((y_pred - y)[:, np.newaxis] * X)'], {'axis': '(0)'}), '((y_pred - y)[:, np.newaxis] * X, axis=0)\n', (4510, 4551), True, 'import numpy as np\n'), ((17422, 17433), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (17430, 17433), True, 'import numpy as np\n'), ((11195, 11446), 'iminuit.Minuit.from_array_func', 'iminuit.Minuit.from_array_func', ([], {'fcn': 'fcn', 'start': 'initial_parameters', 'error': 'initial_step_sizes', 'limit': 'parameter_limits', 'fix': 'parameter_fixes', 'throw_nan': '(True)', 'print_level': 'print_level', 'grad': 'grd', 'use_array_call': '(True)', 'errordef': '(0.5)', 'pedantic': '(False)'}), '(fcn=fcn, start=initial_parameters, error=\n initial_step_sizes, limit=parameter_limits, fix=parameter_fixes,\n throw_nan=True, print_level=print_level, grad=grd, use_array_call=True,\n errordef=0.5, pedantic=False)\n', (11225, 11446), False, 'import iminuit\n'), ((14751, 14822), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['self.parameters', 'self.cvr_mtx', 'n_samples'], {}), '(self.parameters, self.cvr_mtx, n_samples)\n', (14780, 14822), True, 'import numpy as np\n'), ((14880, 14920), 'numpy.tile', 'np.tile', (['self.parameters', '(n_samples, 1)'], {}), '(self.parameters, (n_samples, 1))\n', (14887, 14920), True, 'import numpy as np\n'), ((2526, 2536), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (2532, 2536), True, 'import numpy as np\n'), ((3623, 3637), 'numpy.abs', 'np.abs', (['(l1 * p)'], {}), '(l1 * p)\n', (3629, 3637), True, 'import numpy as np\n'), ((3647, 3666), 'numpy.sum', 'np.sum', (['(l2 * p ** 2)'], {}), '(l2 * p ** 2)\n', (3653, 3666), True, 'import numpy as np\n'), ((4668, 4678), 'numpy.sign', 'np.sign', (['p'], {}), '(p)\n', (4675, 4678), True, 'import numpy as np\n'), ((15357, 15403), 'numpy.dot', 'np.dot', (['sigmoid_variation', 'sigmoid_variation.T'], {}), '(sigmoid_variation, sigmoid_variation.T)\n', (15363, 15403), True, 'import numpy as np\n'), ((15505, 15533), 'numpy.square', 'np.square', (['sigmoid_variation'], {}), '(sigmoid_variation)\n', (15514, 15533), True, 'import numpy as np\n'), ((15600, 15611), 'numpy.abs', 'np.abs', (['var'], {}), '(var)\n', (15606, 15611), True, 'import numpy as np\n'), ((16923, 16956), 'numpy.dot', 'np.dot', (['self.cvr_mtx', 'gradients.T'], {}), '(self.cvr_mtx, gradients.T)\n', (16929, 16956), True, 'import numpy as np\n'), ((18054, 18091), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {'dtype': 'float'}), '((X.shape[0], 1), dtype=float)\n', (18061, 18091), True, 'import numpy as np\n'), ((3449, 3471), 'numpy.log', 'np.log', (['(y_pred + 1e-16)'], {}), '(y_pred + 1e-16)\n', (3455, 3471), True, 'import numpy as np\n'), ((3478, 3504), 'numpy.log', 'np.log', (['(1 - y_pred + 1e-16)'], {}), '(1 - y_pred + 1e-16)\n', (3484, 3504), True, 'import numpy as np\n'), ((7653, 7694), 'numpy.array', 'np.array', (['initial_parameters'], {'dtype': 'float'}), '(initial_parameters, dtype=float)\n', (7661, 7694), True, 'import numpy as np\n'), ((18146, 18162), 'numpy.atleast_1d', 'np.atleast_1d', (['y'], {}), '(y)\n', (18159, 18162), True, 'import numpy as np\n'), ((13520, 13543), 'numpy.dot', 'np.dot', (['self.cvr_mtx', 'u'], {}), '(self.cvr_mtx, u)\n', (13526, 13543), True, 'import numpy as np\n'), ((17078, 17101), 'numpy.dot', 'np.dot', (['self.cvr_mtx', 'g'], {}), '(self.cvr_mtx, g)\n', (17084, 17101), True, 'import numpy as np\n'), ((16639, 16660), 'numpy.sum', 'np.sum', (['(n_stddevs * u)'], {}), '(n_stddevs * u)\n', (16645, 16660), True, 'import numpy as np\n'), ((16685, 16706), 'numpy.diag', 'np.diag', (['self.cvr_mtx'], {}), '(self.cvr_mtx)\n', (16692, 16706), True, 'import numpy as np\n')] |
import andes
from andes.utils.paths import get_case
case_path = "11BUS_KUNDUR.raw"
dyr_path = "11BUS_KUNDUR_TGOV.dyr"
ss = andes.run(case_path, addfile = dyr_path, routine='eig')
import numpy as np
eigs = ss.EIG.mu
eigs_sorted = np.sort_complex(eigs)
np.savetxt("eigs_tgov_andes.csv", eigs_sorted, delimiter = ",")
# An additional post process was done to transform Python complex to Julia complex numbers
| [
"andes.run",
"numpy.savetxt",
"numpy.sort_complex"
] | [((125, 178), 'andes.run', 'andes.run', (['case_path'], {'addfile': 'dyr_path', 'routine': '"""eig"""'}), "(case_path, addfile=dyr_path, routine='eig')\n", (134, 178), False, 'import andes\n'), ((233, 254), 'numpy.sort_complex', 'np.sort_complex', (['eigs'], {}), '(eigs)\n', (248, 254), True, 'import numpy as np\n'), ((255, 316), 'numpy.savetxt', 'np.savetxt', (['"""eigs_tgov_andes.csv"""', 'eigs_sorted'], {'delimiter': '""","""'}), "('eigs_tgov_andes.csv', eigs_sorted, delimiter=',')\n", (265, 316), True, 'import numpy as np\n')] |
#! /usr/bin/env python
# Copyright (C) 2015 ETH Zurich, Institute for Astronomy
# System imports
from __future__ import print_function, division, absolute_import, unicode_literals
# External modules
import numpy as np
# fastell4py imports
from fastell4py import _fastell
#axis ratio (<=1; <0 means: end)
#arat = 0.2
#q,gam,s2 = 0.2, 1, 0.001
#x1,x2, = 0.9, 0.9
#defl = np.empty((2))
#defl3 = np.empty((2))
#phi = 0
#magmx = np.empty((2,2))
#fastell.fastelldefl(x1,x2,q,gam,arat,s2,defl3)
#print('fastell Defl.= ', defl3)
#fastell.fastellmag(x1,x2,q,gam,arat,s2,defl, magmx)
#print('fastell mag routine: Defl.= ',defl)
#print('and Magmx(11,22,12) = ',magmx[0,0])
#print(magmx[0,1],magmx[1,0], magmx[1,1])
#fastell.ellipphi(x1,x2,q,gam,arat,s2,phi)
#print('slow Phi= ',phi)
def fastelldefl(x1,x2,q,gam,arat,s2):
"""
:param x1:
:param x2:
:param q:
:param gam:
:param arat:
:param s2:
:return:
"""
if isinstance(x1, int) or isinstance(x1, float):
defl = np.empty((2))
_fastell.fastelldefl(x1, x2, q, gam, arat, s2, defl)
return defl[0], defl[1]
else:
n = len(x1)
defl1 = np.empty(n)
defl2 = np.empty(n)
_fastell.fastelldefl_array(x1, x2, q, gam, arat, s2, defl1, defl2, n)
alpha1 = defl1
alpha2 = defl2
return alpha1, alpha2
def fastellmag(x1, x2, q, gam, arat, s2):
"""
:param x1:
:param x2:
:param q:
:param gam:
:param arat:
:param s2:
:return:
"""
if isinstance(x1, int) or isinstance(x1, float):
n = 1
else:
n = len(x1)
defl1 = np.empty(n)
defl2 = np.empty(n)
magmx_xx = np.empty(n)
magmx_xy = np.empty(n)
magmx_yy = np.empty(n)
_fastell.fastellmag_array(x1, x2, q, gam, arat, s2, defl1, defl2, magmx_xx, magmx_xy, magmx_yy, n)
alpha1 = defl1
alpha2 = defl2
f_xx = magmx_xx
f_xy = magmx_xy
f_yy = magmx_yy
return alpha1, alpha2, f_xx, f_yy, f_xy
def ellipphi(x1,x2,q,gam,arat,s2):
"""
:param x1:
:param x2:
:param q:
:param gam:
:param arat:
:param s2:
:return:
"""
if isinstance(x1, int) or isinstance(x1, float):
n = 1
else:
n = len(x1)
phi = np.empty(n)
_fastell.ellipphi_array(x1, x2, q, gam, arat, s2, phi, n)
return phi | [
"fastell4py._fastell.ellipphi_array",
"numpy.empty",
"fastell4py._fastell.fastellmag_array",
"fastell4py._fastell.fastelldefl_array",
"fastell4py._fastell.fastelldefl"
] | [((1636, 1647), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (1644, 1647), True, 'import numpy as np\n'), ((1660, 1671), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (1668, 1671), True, 'import numpy as np\n'), ((1687, 1698), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (1695, 1698), True, 'import numpy as np\n'), ((1714, 1725), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (1722, 1725), True, 'import numpy as np\n'), ((1741, 1752), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (1749, 1752), True, 'import numpy as np\n'), ((1757, 1859), 'fastell4py._fastell.fastellmag_array', '_fastell.fastellmag_array', (['x1', 'x2', 'q', 'gam', 'arat', 's2', 'defl1', 'defl2', 'magmx_xx', 'magmx_xy', 'magmx_yy', 'n'], {}), '(x1, x2, q, gam, arat, s2, defl1, defl2, magmx_xx,\n magmx_xy, magmx_yy, n)\n', (1782, 1859), False, 'from fastell4py import _fastell\n'), ((2264, 2275), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (2272, 2275), True, 'import numpy as np\n'), ((2280, 2337), 'fastell4py._fastell.ellipphi_array', '_fastell.ellipphi_array', (['x1', 'x2', 'q', 'gam', 'arat', 's2', 'phi', 'n'], {}), '(x1, x2, q, gam, arat, s2, phi, n)\n', (2303, 2337), False, 'from fastell4py import _fastell\n'), ((1014, 1025), 'numpy.empty', 'np.empty', (['(2)'], {}), '(2)\n', (1022, 1025), True, 'import numpy as np\n'), ((1036, 1088), 'fastell4py._fastell.fastelldefl', '_fastell.fastelldefl', (['x1', 'x2', 'q', 'gam', 'arat', 's2', 'defl'], {}), '(x1, x2, q, gam, arat, s2, defl)\n', (1056, 1088), False, 'from fastell4py import _fastell\n'), ((1167, 1178), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (1175, 1178), True, 'import numpy as np\n'), ((1195, 1206), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (1203, 1206), True, 'import numpy as np\n'), ((1215, 1284), 'fastell4py._fastell.fastelldefl_array', '_fastell.fastelldefl_array', (['x1', 'x2', 'q', 'gam', 'arat', 's2', 'defl1', 'defl2', 'n'], {}), '(x1, x2, q, gam, arat, s2, defl1, defl2, n)\n', (1241, 1284), False, 'from fastell4py import _fastell\n')] |
import tensorflow as tf
import keras
from reg_cnn import get_model
from datagen import DataGenerator
from matplotlib import pyplot as plt
import os
import numpy as np
np.set_printoptions(precision=2)
import config as cfg
path = os.path.dirname(os.path.abspath(__file__))
train_gen = DataGenerator(path='cat_dog/cats_and_dogs_filtered/train')
val_gen = DataGenerator(path='cat_dog/cats_and_dogs_filtered/validation')
if 1:
model = tf.keras.models.load_model(cfg.checkpoint_path)
else:
input_shape=(128,128,3)
model = get_model(input_shape)
opt = tf.keras.optimizers.Adam(learning_rate=0.001)
model.compile(optimizer=opt,loss = 'mse',metrics =[tf.keras.metrics.MeanSquaredError()])
model.summary()
checkpoint = keras.callbacks.ModelCheckpoint(cfg.checkpoint_path,monitor='val_mean_squared_error',verbose=1,save_best_only=True,save_weights_only=False)
callbacks = [checkpoint]
if 1:
history = model.fit(train_gen, validation_data=val_gen, epochs=100,callbacks=callbacks)
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
#model.evaluate(val_gen)
#import pdb;pdb.set_trace()
ypred = model.predict(val_gen)
for i in range(3):
batch_x, batch_y = val_gen.__getitem__(0)
pred_y = model.predict(batch_x)
for i in range(len(pred_y)):
print(f'actual:{batch_y[i]}, pred: {pred_y[i][0]}')
| [
"matplotlib.pyplot.title",
"os.path.abspath",
"numpy.set_printoptions",
"tensorflow.keras.models.load_model",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"reg_cnn.get_model",
"keras.callbacks.ModelCheckpoint",
"tensorflow.keras.metrics.MeanSquaredError",
"matplotlib.pyplot.legend",
"data... | [((167, 199), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (186, 199), True, 'import numpy as np\n'), ((284, 342), 'datagen.DataGenerator', 'DataGenerator', ([], {'path': '"""cat_dog/cats_and_dogs_filtered/train"""'}), "(path='cat_dog/cats_and_dogs_filtered/train')\n", (297, 342), False, 'from datagen import DataGenerator\n'), ((353, 416), 'datagen.DataGenerator', 'DataGenerator', ([], {'path': '"""cat_dog/cats_and_dogs_filtered/validation"""'}), "(path='cat_dog/cats_and_dogs_filtered/validation')\n", (366, 416), False, 'from datagen import DataGenerator\n'), ((733, 885), 'keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', (['cfg.checkpoint_path'], {'monitor': '"""val_mean_squared_error"""', 'verbose': '(1)', 'save_best_only': '(True)', 'save_weights_only': '(False)'}), "(cfg.checkpoint_path, monitor=\n 'val_mean_squared_error', verbose=1, save_best_only=True,\n save_weights_only=False)\n", (764, 885), False, 'import keras\n'), ((245, 270), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (260, 270), False, 'import os\n'), ((437, 484), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['cfg.checkpoint_path'], {}), '(cfg.checkpoint_path)\n', (463, 484), True, 'import tensorflow as tf\n'), ((531, 553), 'reg_cnn.get_model', 'get_model', (['input_shape'], {}), '(input_shape)\n', (540, 553), False, 'from reg_cnn import get_model\n'), ((564, 609), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (588, 609), True, 'import tensorflow as tf\n'), ((1035, 1068), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), "(history.history['loss'])\n", (1043, 1068), True, 'from matplotlib import pyplot as plt\n'), ((1073, 1110), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (1081, 1110), True, 'from matplotlib import pyplot as plt\n'), ((1115, 1138), 'matplotlib.pyplot.title', 'plt.title', (['"""model loss"""'], {}), "('model loss')\n", (1124, 1138), True, 'from matplotlib import pyplot as plt\n'), ((1143, 1161), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (1153, 1161), True, 'from matplotlib import pyplot as plt\n'), ((1166, 1185), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (1176, 1185), True, 'from matplotlib import pyplot as plt\n'), ((1190, 1237), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper left"""'}), "(['train', 'test'], loc='upper left')\n", (1200, 1237), True, 'from matplotlib import pyplot as plt\n'), ((1242, 1252), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1250, 1252), True, 'from matplotlib import pyplot as plt\n'), ((665, 700), 'tensorflow.keras.metrics.MeanSquaredError', 'tf.keras.metrics.MeanSquaredError', ([], {}), '()\n', (698, 700), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
from __future__ import annotations
import os
import tempfile
import platform
from abc import ABCMeta, abstractmethod
from typing import cast, List, Optional
from pathlib import Path
from datetime import datetime
from dataclasses import dataclass, field, InitVar
from distutils.dir_util import copy_tree
from importlib.metadata import version
import numpy as np
from .cases import CaseStudy
from .copier import copy_after
from .grid import write_fm_rectangle, write_structured_rectangle
from .grid.shared import generate_grid_xy
from .types import AnyByStrDict, Num, StrOrPath
from ._docs import docstringtemplate
@docstringtemplate
@dataclass
class Template:
"""Class for creating Delft3D projects from templates
Utilises the :mod:`.copier` and :mod:`.grid` subpackages to generate
Delft3D models from templates and type-specific grid generation routines.
Note that the template files are copied on initialization, therefore
changes to the template source will not affect the object's output.
Call a Template object with a length one :class:`.CaseStudy` object and
a path at which to create a flexible-mesh Delft3D project. For example:
>>> import pprint
>>> import tempfile
>>> from pathlib import Path
>>> template = Template()
>>> with tempfile.TemporaryDirectory() as tmpdirname:
... template(CaseStudy(), tmpdirname)
... inputdir = Path(tmpdirname) / "input"
... pprint.pprint(sorted([x.name for x in inputdir.iterdir()]))
['Discharge.bc',
'FlowFM.mdu',
'FlowFM_bnd.ext',
'FlowFM_net.nc',
'Inlet.pli',
'Outlet.pli',
'WaterLevel.bc',
'curves.trb',
'turbines.ini']
Note that for the ``'structured'`` model, if the turbine is located on
a grid line then it will be shifted very slightly in order to avoid a bug
in SNL-Delft3D-CEC.
:param template_type: type of Delft3D project to generate. Valid options
are:
- ``'fm'``: create a flexible mesh model
- ``'structured'``: create a structured mesh model
Defaults to {template_type}
:param template_path: optional path to the Delft3D project template
:param exist_ok: if True, allow an existing path to be overwritten,
defaults to {exist_ok}
:param no_template: variables to ignore in the given
:class:`.CaseStudy` objects when filling templates, defaults to
``["dx", "dy"]``
:raises ValueError: if :attr:`~template_type` has an invalid value
.. automethod:: __call__
"""
template_type: InitVar[str] = "fm"
template_path: InitVar[StrOrPath] = None
#: if True, allow an existing path to be overwritten
exist_ok: bool = False
#: variables to ignore in the given :class:`.CaseStudy` objects when
#: filling templates
no_template: List[str] = field(
default_factory=lambda: ["dx",
"dy",
"simulate_turbines"])
_template_tmp: tempfile.TemporaryDirectory = field(init=False, repr=False)
_extras: _BaseTemplateExtras = field(init=False, repr=False)
def __post_init__(self, template_type: str,
template_path: StrOrPath):
template_extras_map = {"fm": _FMTemplateExtras,
"structured": _StructuredTemplateExtras}
if template_type not in template_extras_map:
valid_types = ", ".join(template_extras_map)
msg = f"Template type not recognised. Must be one of {valid_types}"
raise ValueError(msg)
self._extras = template_extras_map[template_type]()
if template_path is None:
template_path = _package_template_path(template_type)
self._template_tmp = tempfile.TemporaryDirectory()
copy_tree(str(template_path), self._template_tmp.name)
def __call__(self, case: CaseStudy,
project_path: StrOrPath,
exist_ok: Optional[bool] = None):
"""Create a new Delft3D project from the given :class:`.CaseStudy`
object, at the given path.
Note that boolean values are converted to integers and Nones are
converted to empty strings.
:param case: :class:`.CaseStudy` object from which to build the
project
:param project_path: new project destination path
:param exist_ok: if True, allow an existing path to be overwritten.
Overrides :attr:`~exist_ok`, if given.
:raises ValueError: if the given :class:`.CaseStudy` object is not
length one or if :attr:`~template_path` does not exist
:raises FileExistsError: if the project path exists, but
:attr:`~exist_ok` is False
"""
if len(case) != 1:
raise ValueError("case study must have length one")
if exist_ok is None:
exist_ok = self.exist_ok
# Copy templated files
data = {field: value
for field, value in zip(case.fields, case.values)
if field not in self.no_template}
# Convert booleans to ints
data = {field: int(value) if type(value) is bool else value
for field, value in data.items()}
# Convert None to ""
data = {field: "" if value is None else value
for field, value in data.items()}
# Apply template specific updates to the data dictionary
self._extras.data_hook(case, data)
# Inform the type checker that we have Num for single value cases
dx = cast(Num, case.dx)
dy = cast(Num, case.dy)
x0 = cast(Num, case.x0)
x1 = cast(Num, case.x1)
y0 = cast(Num, case.y0)
y1 = cast(Num, case.y1)
template_path = Path(self._template_tmp.name)
project_path = Path(project_path)
with copy_after(template_path,
project_path,
data=data,
exist_ok=exist_ok) as data:
grid_data = self._extras.write_grid(project_path,
dx,
dy,
x0,
x1,
y0,
y1)
data.update(grid_data)
class _BaseTemplateExtras(metaclass=ABCMeta):
@abstractmethod
def data_hook(self, case: CaseStudy,
data: AnyByStrDict):
pass # pragma: no cover
@abstractmethod
def write_grid(self, project_path: StrOrPath,
dx: Num,
dy: Num,
x0: Num,
x1: Num,
y0: Num,
y1: Num) -> AnyByStrDict:
pass # pragma: no cover
class _FMTemplateExtras(_BaseTemplateExtras):
def data_hook(self, case: CaseStudy,
data: AnyByStrDict):
# Add Turbines section if requested
if case.simulate_turbines:
simulate_turbines = (
"\n"
"[Turbines]\n"
"TurbineFile = turbines.ini\n"
"CurvesFile = curves.trb")
else:
simulate_turbines = ""
data["simulate_turbines"] = simulate_turbines
def write_grid(self, project_path: StrOrPath,
dx: Num,
dy: Num,
x0: Num,
x1: Num,
y0: Num,
y1: Num) -> AnyByStrDict:
net_path = Path(project_path) / "input" / "FlowFM_net.nc"
return write_fm_rectangle(net_path, dx, dy, x0, x1, y0, y1)
class _StructuredTemplateExtras(_BaseTemplateExtras):
def data_hook(self, case: CaseStudy,
data: AnyByStrDict):
data["date"] = f"{datetime.today().strftime('%Y-%m-%d, %H:%M:%S')}"
data["version"] = f"{version('SNL-Delft3D-CEC-Verify')}"
data["os"] = f"{platform.system()}"
if not case.simulate_turbines:
data["simulate_turbines"] = ""
return
data["simulate_turbines"] = "Filtrb = #turbines.ini#"
# Inform the type checker that we have Num for single value cases
dx = cast(Num, case.dx)
dy = cast(Num, case.dy)
x0 = cast(Num, case.x0)
x1 = cast(Num, case.x1)
y0 = cast(Num, case.y0)
y1 = cast(Num, case.y1)
# If the turbine position lies on a grid line move it slightly
xsize = x1 - x0
ysize = y1 - y0
x, y = generate_grid_xy(x0,
y0,
xsize,
ysize,
dx,
dy)
micrometre = 1e-6
if np.isclose(case.turb_pos_x, x).any():
data["turb_pos_x"] += micrometre
if np.isclose(case.turb_pos_y, y).any():
data["turb_pos_y"] += micrometre
def write_grid(self, project_path: StrOrPath,
dx: Num,
dy: Num,
x0: Num,
x1: Num,
y0: Num,
y1: Num) -> AnyByStrDict:
return write_structured_rectangle(project_path, dx, dy, x0, x1, y0, y1)
def _package_template_path(template_type) -> Path:
this_dir = os.path.dirname(os.path.realpath(__file__))
return Path(this_dir) / "templates" / template_type
| [
"tempfile.TemporaryDirectory",
"datetime.datetime.today",
"typing.cast",
"os.path.realpath",
"dataclasses.field",
"pathlib.Path",
"numpy.isclose",
"importlib.metadata.version",
"platform.system"
] | [((2939, 3004), 'dataclasses.field', 'field', ([], {'default_factory': "(lambda : ['dx', 'dy', 'simulate_turbines'])"}), "(default_factory=lambda : ['dx', 'dy', 'simulate_turbines'])\n", (2944, 3004), False, 'from dataclasses import dataclass, field, InitVar\n'), ((3205, 3234), 'dataclasses.field', 'field', ([], {'init': '(False)', 'repr': '(False)'}), '(init=False, repr=False)\n', (3210, 3234), False, 'from dataclasses import dataclass, field, InitVar\n'), ((3270, 3299), 'dataclasses.field', 'field', ([], {'init': '(False)', 'repr': '(False)'}), '(init=False, repr=False)\n', (3275, 3299), False, 'from dataclasses import dataclass, field, InitVar\n'), ((3994, 4023), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (4021, 4023), False, 'import tempfile\n'), ((5985, 6003), 'typing.cast', 'cast', (['Num', 'case.dx'], {}), '(Num, case.dx)\n', (5989, 6003), False, 'from typing import cast, List, Optional\n'), ((6017, 6035), 'typing.cast', 'cast', (['Num', 'case.dy'], {}), '(Num, case.dy)\n', (6021, 6035), False, 'from typing import cast, List, Optional\n'), ((6049, 6067), 'typing.cast', 'cast', (['Num', 'case.x0'], {}), '(Num, case.x0)\n', (6053, 6067), False, 'from typing import cast, List, Optional\n'), ((6081, 6099), 'typing.cast', 'cast', (['Num', 'case.x1'], {}), '(Num, case.x1)\n', (6085, 6099), False, 'from typing import cast, List, Optional\n'), ((6113, 6131), 'typing.cast', 'cast', (['Num', 'case.y0'], {}), '(Num, case.y0)\n', (6117, 6131), False, 'from typing import cast, List, Optional\n'), ((6145, 6163), 'typing.cast', 'cast', (['Num', 'case.y1'], {}), '(Num, case.y1)\n', (6149, 6163), False, 'from typing import cast, List, Optional\n'), ((6197, 6226), 'pathlib.Path', 'Path', (['self._template_tmp.name'], {}), '(self._template_tmp.name)\n', (6201, 6226), False, 'from pathlib import Path\n'), ((6250, 6268), 'pathlib.Path', 'Path', (['project_path'], {}), '(project_path)\n', (6254, 6268), False, 'from pathlib import Path\n'), ((8977, 8995), 'typing.cast', 'cast', (['Num', 'case.dx'], {}), '(Num, case.dx)\n', (8981, 8995), False, 'from typing import cast, List, Optional\n'), ((9009, 9027), 'typing.cast', 'cast', (['Num', 'case.dy'], {}), '(Num, case.dy)\n', (9013, 9027), False, 'from typing import cast, List, Optional\n'), ((9041, 9059), 'typing.cast', 'cast', (['Num', 'case.x0'], {}), '(Num, case.x0)\n', (9045, 9059), False, 'from typing import cast, List, Optional\n'), ((9073, 9091), 'typing.cast', 'cast', (['Num', 'case.x1'], {}), '(Num, case.x1)\n', (9077, 9091), False, 'from typing import cast, List, Optional\n'), ((9105, 9123), 'typing.cast', 'cast', (['Num', 'case.y0'], {}), '(Num, case.y0)\n', (9109, 9123), False, 'from typing import cast, List, Optional\n'), ((9137, 9155), 'typing.cast', 'cast', (['Num', 'case.y1'], {}), '(Num, case.y1)\n', (9141, 9155), False, 'from typing import cast, List, Optional\n'), ((10196, 10222), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (10212, 10222), False, 'import os\n'), ((10235, 10249), 'pathlib.Path', 'Path', (['this_dir'], {}), '(this_dir)\n', (10239, 10249), False, 'from pathlib import Path\n'), ((8244, 8262), 'pathlib.Path', 'Path', (['project_path'], {}), '(project_path)\n', (8248, 8262), False, 'from pathlib import Path\n'), ((8620, 8653), 'importlib.metadata.version', 'version', (['"""SNL-Delft3D-CEC-Verify"""'], {}), "('SNL-Delft3D-CEC-Verify')\n", (8627, 8653), False, 'from importlib.metadata import version\n'), ((8680, 8697), 'platform.system', 'platform.system', ([], {}), '()\n', (8695, 8697), False, 'import platform\n'), ((9570, 9600), 'numpy.isclose', 'np.isclose', (['case.turb_pos_x', 'x'], {}), '(case.turb_pos_x, x)\n', (9580, 9600), True, 'import numpy as np\n'), ((9673, 9703), 'numpy.isclose', 'np.isclose', (['case.turb_pos_y', 'y'], {}), '(case.turb_pos_y, y)\n', (9683, 9703), True, 'import numpy as np\n'), ((8541, 8557), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (8555, 8557), False, 'from datetime import datetime\n')] |
from engine.geometry import calcs
from engine.geometry.calcs import NoSolutionException
from engine.geometry.obstacle.arcFinder.arcCriticalPoint import ArcCriticalPoint
from engine.geometry.obstacle.arcFinder.arcTarget import ArcTarget
from engine.geometry.obstacle.vertexTarget import VertexTarget
import numpy as np
class VertexArcTarget(VertexTarget, ArcTarget):
def __init__(self, position, velocity, normal, pointAngle):
VertexTarget.__init__(self, position, velocity, normal, pointAngle)
def notInitiallyReachable(self, arc):
toCenter = self.position - arc.center
return np.dot(toCenter, toCenter) < arc.radius * arc.radius
def getCriticalPoints(self, arc):
distances = calcs.getRayCircleIntersections(self.position, self.direction, arc.center, arc.radius)
criticalPoints = []
for distance in distances:
if distance >= 0.0:
intersectionPoint = self.position + distance * self.direction
targetTimeToIntersection = distance / self.speed
criticalPoints.append(ArcCriticalPoint(vehicleArc=arc.arcToPoint(intersectionPoint),
targetArc=targetTimeToIntersection * arc.angularSpeed))
return criticalPoints
def iterateSolution(self, arc):
endAngle = arc.start + arc.length
endAngleVec = calcs.unitVectorOfAngle(endAngle, arc.rotDirection)
arcEndPoint = arc.center + arc.radius * endAngleVec
solution = calcs.hitTargetAtSpeed(arcEndPoint, arc.speed, self.getPosition(arc.arcTime()), self.velocity)
if solution is None:
raise NoSolutionException
angle = arc.angleOfVelocity(solution.velocity)
return angle, solution
def calcAvoidanceRotDirection(self, passingVelocity):
"""
As the vehicle skirts this target, determine whether this is an avoidance in the CW (-1) or CCW (1) direction (0 == neither).
"""
if self.normal is None:
return 0.0
direction = calcs.cross2(self.normal, passingVelocity)
if direction < 0.0:
return -1.0
elif direction > 0.0:
return 1.0
else:
return 0.0 | [
"engine.geometry.calcs.unitVectorOfAngle",
"engine.geometry.calcs.getRayCircleIntersections",
"engine.geometry.calcs.cross2",
"numpy.dot",
"engine.geometry.obstacle.vertexTarget.VertexTarget.__init__"
] | [((441, 508), 'engine.geometry.obstacle.vertexTarget.VertexTarget.__init__', 'VertexTarget.__init__', (['self', 'position', 'velocity', 'normal', 'pointAngle'], {}), '(self, position, velocity, normal, pointAngle)\n', (462, 508), False, 'from engine.geometry.obstacle.vertexTarget import VertexTarget\n'), ((729, 819), 'engine.geometry.calcs.getRayCircleIntersections', 'calcs.getRayCircleIntersections', (['self.position', 'self.direction', 'arc.center', 'arc.radius'], {}), '(self.position, self.direction, arc.center,\n arc.radius)\n', (760, 819), False, 'from engine.geometry import calcs\n'), ((1414, 1465), 'engine.geometry.calcs.unitVectorOfAngle', 'calcs.unitVectorOfAngle', (['endAngle', 'arc.rotDirection'], {}), '(endAngle, arc.rotDirection)\n', (1437, 1465), False, 'from engine.geometry import calcs\n'), ((2094, 2136), 'engine.geometry.calcs.cross2', 'calcs.cross2', (['self.normal', 'passingVelocity'], {}), '(self.normal, passingVelocity)\n', (2106, 2136), False, 'from engine.geometry import calcs\n'), ((613, 639), 'numpy.dot', 'np.dot', (['toCenter', 'toCenter'], {}), '(toCenter, toCenter)\n', (619, 639), True, 'import numpy as np\n')] |
""" Redwood Creek Analysis: Landscape generation functions. """
import logging
import os
import pyproj
import numpy as np
import pandas as pd
import cartopy
from cartopy.io.shapereader import Reader
from functools import partial
from shapely.ops import transform
from shapely import geometry
import shapefile
import raster_tools
from Scripts import average_weather
from Scripts import MainOptions
def create_initial_conditions(host_array, out_stub="InitialConditions", seed_inf_cell=(0, 0),
host_numbers=False, prop_infected=1.0):
init_s_array = np.zeros((host_array.header_vals['nrows'], host_array.header_vals['ncols']))
init_i_array = np.zeros((host_array.header_vals['nrows'], host_array.header_vals['ncols']))
init_r_array = np.zeros((host_array.header_vals['nrows'], host_array.header_vals['ncols']))
for row in range(host_array.header_vals['nrows']):
for col in range(host_array.header_vals['ncols']):
if host_array.array[row, col] > 0:
if (row, col) != seed_inf_cell:
if host_numbers:
init_s_array[row, col] = host_array.array[row, col]
else:
init_s_array[row, col] = 1.0
else:
if host_numbers:
init_i_array[row, col] = int(np.ceil(
prop_infected * host_array.array[row, col]))
init_s_array[row, col] = int(np.floor(
(1 - prop_infected) * host_array.array[row, col]))
else:
init_i_array[row, col] = prop_infected
init_s_array[row, col] = 1 - prop_infected
init_s_raster = raster_tools.RasterData(
array=init_s_array, cellsize=host_array.header_vals['cellsize'], shape=init_s_array.shape,
llcorner=(host_array.header_vals['xllcorner'], host_array.header_vals['yllcorner']))
init_i_raster = raster_tools.RasterData(
array=init_i_array, cellsize=host_array.header_vals['cellsize'], shape=init_i_array.shape,
llcorner=(host_array.header_vals['xllcorner'], host_array.header_vals['yllcorner']))
init_r_raster = raster_tools.RasterData(
array=init_r_array, cellsize=host_array.header_vals['cellsize'], shape=init_r_array.shape,
llcorner=(host_array.header_vals['xllcorner'], host_array.header_vals['yllcorner']))
init_s_raster.to_file(os.path.join("GeneratedData", out_stub + "_S.txt"))
init_i_raster.to_file(os.path.join("GeneratedData", out_stub + "_I.txt"))
init_r_raster.to_file(os.path.join("GeneratedData", out_stub + "_R.txt"))
def generate_landscape(region, resolution, name):
"""Create necessary host files for a given landscape region.
Arguments:
region: Tuple of coordinates for llcorner and urcorner, each (long, lat)
resolution: Required resolution of output raster
name: Output landscape name for file outputs
"""
analysis_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..")
# Change coordinates to same format as raster
wgs84 = pyproj.Proj("+init=EPSG:4326")
NAD83_Cali_Albers = pyproj.Proj("+init=EPSG:3310")
nps_proj = pyproj.Proj("+init=EPSG:4269")
llcorner_3310 = pyproj.transform(wgs84, NAD83_Cali_Albers, *region[0])
urcorner_3310 = pyproj.transform(wgs84, NAD83_Cali_Albers, *region[1])
input_data_file = os.path.join(analysis_path, "InputData", "combinedHostsScenario0.txt")
# Extract host density
host_raster = raster_tools.extract_raster(input_data_file, llcorner_3310, urcorner_3310,
resolution=resolution)
# Make landscape directory
os.makedirs(os.path.join("GeneratedData", name), exist_ok=True)
# Save host density raster to file
host_raster.to_file(os.path.join("GeneratedData", name, "HostDensity.txt"))
logging.info("Size of %s raster: %dx%d", name, host_raster.header_vals['nrows'],
host_raster.header_vals['ncols'])
# Extract host numbers
host_num_raster = raster_tools.extract_raster(
input_data_file, llcorner_3310, urcorner_3310, resolution=resolution)
host_num_raster.array = np.multiply(host_num_raster.array,
np.where(host_num_raster.array >= 0, 100, 1)).astype(int)
host_num_raster.to_file(os.path.join("GeneratedData", name, "HostNumbers.txt"))
logging.info("Starting NP mask raster")
np_raster = make_np_mask(host_num_raster.header_vals)
np_raster.to_file(os.path.join("GeneratedData", name, "NPMask.txt"))
logging.info("Starting initial conditions")
# Generate initial conditions
# Find location of first SODMAP positive in region
query = "Latitude > " + str(region[0][1]) + " & Latitude < " + str(region[1][1])
query += " & Longitude > " + str(region[0][0]) + " & Longitude < " + str(region[1][0])
query += " & State == 'Positive'"
sodmap_df = pd.read_csv(
os.path.join(analysis_path, "InputData", "sodmap.csv"), parse_dates=True)
region_df = sodmap_df.query(query).sort_values("Date")
positive_pos = (region_df['Longitude'].values[0], region_df['Latitude'].values[0])
# Convert to map projection
positive_pos_3310 = pyproj.transform(wgs84, NAD83_Cali_Albers, *positive_pos)
# Find cell in each raster
cell_pos = raster_tools.find_position_in_raster(positive_pos_3310, host_raster)
logging.info("Found source cell")
# Create initial condition files
base_host_raster = raster_tools.extract_raster(
os.path.join(analysis_path, "InputData", "combinedHostsScenario0.txt"), llcorner_3310,
urcorner_3310, resolution=250)
base_cell_pos = raster_tools.find_position_in_raster(positive_pos_3310, base_host_raster)
base_inf_density = base_host_raster.array[base_cell_pos]
ncells = (resolution/250)*(resolution/250)
prop_inf = base_inf_density / (host_raster.array[cell_pos] * ncells)
create_initial_conditions(
host_num_raster, out_stub=os.path.join(name, "InitialConditions_Numbers"),
seed_inf_cell=cell_pos, prop_infected=prop_inf, host_numbers=True)
create_initial_conditions(
host_raster, out_stub=os.path.join(name, "InitialConditions_Density"),
seed_inf_cell=cell_pos, prop_infected=prop_inf, host_numbers=False)
# Create averaged weather and forest type mask
avg_mask = average_weather.average_mask(target_header=host_raster.header_vals)
avg_mask.to_file(os.path.join("GeneratedData", name, "RMSMask.txt"))
def make_np_mask(target_header):
"""Generate mask of cells that are in Redwood National Park"""
rdr = Reader(os.path.join("InputData", "nps_boundary", "nps_boundary.shp"))
redw_records = []
for x in rdr.records():
if 'Redwood' in x.attributes['UNIT_NAME']:
redw_records.append(x)
redw_shape_nps = redw_records[0].geometry[0]
NAD83_Cali_Albers = pyproj.Proj("+init=EPSG:3310")
nps_proj = pyproj.Proj("+init=EPSG:4269")
project = partial(pyproj.transform, nps_proj, NAD83_Cali_Albers)
redw_shape = transform(project, redw_shape_nps)
lower_x = np.array([target_header['xllcorner'] + i*target_header['cellsize']
for i in range(target_header['ncols'])])
upper_x = lower_x + target_header['cellsize']
lower_y = np.array([target_header['yllcorner'] + i*target_header['cellsize']
for i in range(target_header['nrows'])])[::-1]
upper_y = lower_y + target_header['cellsize']
np_array = np.zeros((target_header['nrows'], target_header['ncols']))
for i in range(target_header['nrows']):
for j in range(target_header['ncols']):
points = [[lower_x[j], lower_y[i]], [upper_x[j], lower_y[i]], [upper_x[j], upper_y[i]],
[lower_x[j], upper_y[i]]]
cell = geometry.Polygon(points)
intersection_area = redw_shape.intersection(cell).area / (
target_header['cellsize'] * target_header['cellsize'])
np_array[i, j] = intersection_area
np_raster = raster_tools.RasterData(
(target_header['nrows'], target_header['ncols']),
(target_header['xllcorner'], target_header['yllcorner']), target_header['cellsize'],
array=np_array)
return np_raster
| [
"functools.partial",
"numpy.ceil",
"shapely.geometry.Polygon",
"os.path.realpath",
"raster_tools.find_position_in_raster",
"numpy.zeros",
"shapely.ops.transform",
"numpy.floor",
"logging.info",
"numpy.where",
"pyproj.Proj",
"pyproj.transform",
"Scripts.average_weather.average_mask",
"os.pa... | [((586, 662), 'numpy.zeros', 'np.zeros', (["(host_array.header_vals['nrows'], host_array.header_vals['ncols'])"], {}), "((host_array.header_vals['nrows'], host_array.header_vals['ncols']))\n", (594, 662), True, 'import numpy as np\n'), ((682, 758), 'numpy.zeros', 'np.zeros', (["(host_array.header_vals['nrows'], host_array.header_vals['ncols'])"], {}), "((host_array.header_vals['nrows'], host_array.header_vals['ncols']))\n", (690, 758), True, 'import numpy as np\n'), ((778, 854), 'numpy.zeros', 'np.zeros', (["(host_array.header_vals['nrows'], host_array.header_vals['ncols'])"], {}), "((host_array.header_vals['nrows'], host_array.header_vals['ncols']))\n", (786, 854), True, 'import numpy as np\n'), ((1770, 1979), 'raster_tools.RasterData', 'raster_tools.RasterData', ([], {'array': 'init_s_array', 'cellsize': "host_array.header_vals['cellsize']", 'shape': 'init_s_array.shape', 'llcorner': "(host_array.header_vals['xllcorner'], host_array.header_vals['yllcorner'])"}), "(array=init_s_array, cellsize=host_array.header_vals\n ['cellsize'], shape=init_s_array.shape, llcorner=(host_array.\n header_vals['xllcorner'], host_array.header_vals['yllcorner']))\n", (1793, 1979), False, 'import raster_tools\n'), ((2007, 2216), 'raster_tools.RasterData', 'raster_tools.RasterData', ([], {'array': 'init_i_array', 'cellsize': "host_array.header_vals['cellsize']", 'shape': 'init_i_array.shape', 'llcorner': "(host_array.header_vals['xllcorner'], host_array.header_vals['yllcorner'])"}), "(array=init_i_array, cellsize=host_array.header_vals\n ['cellsize'], shape=init_i_array.shape, llcorner=(host_array.\n header_vals['xllcorner'], host_array.header_vals['yllcorner']))\n", (2030, 2216), False, 'import raster_tools\n'), ((2244, 2453), 'raster_tools.RasterData', 'raster_tools.RasterData', ([], {'array': 'init_r_array', 'cellsize': "host_array.header_vals['cellsize']", 'shape': 'init_r_array.shape', 'llcorner': "(host_array.header_vals['xllcorner'], host_array.header_vals['yllcorner'])"}), "(array=init_r_array, cellsize=host_array.header_vals\n ['cellsize'], shape=init_r_array.shape, llcorner=(host_array.\n header_vals['xllcorner'], host_array.header_vals['yllcorner']))\n", (2267, 2453), False, 'import raster_tools\n'), ((3198, 3228), 'pyproj.Proj', 'pyproj.Proj', (['"""+init=EPSG:4326"""'], {}), "('+init=EPSG:4326')\n", (3209, 3228), False, 'import pyproj\n'), ((3253, 3283), 'pyproj.Proj', 'pyproj.Proj', (['"""+init=EPSG:3310"""'], {}), "('+init=EPSG:3310')\n", (3264, 3283), False, 'import pyproj\n'), ((3299, 3329), 'pyproj.Proj', 'pyproj.Proj', (['"""+init=EPSG:4269"""'], {}), "('+init=EPSG:4269')\n", (3310, 3329), False, 'import pyproj\n'), ((3351, 3405), 'pyproj.transform', 'pyproj.transform', (['wgs84', 'NAD83_Cali_Albers', '*region[0]'], {}), '(wgs84, NAD83_Cali_Albers, *region[0])\n', (3367, 3405), False, 'import pyproj\n'), ((3426, 3480), 'pyproj.transform', 'pyproj.transform', (['wgs84', 'NAD83_Cali_Albers', '*region[1]'], {}), '(wgs84, NAD83_Cali_Albers, *region[1])\n', (3442, 3480), False, 'import pyproj\n'), ((3504, 3574), 'os.path.join', 'os.path.join', (['analysis_path', '"""InputData"""', '"""combinedHostsScenario0.txt"""'], {}), "(analysis_path, 'InputData', 'combinedHostsScenario0.txt')\n", (3516, 3574), False, 'import os\n'), ((3621, 3722), 'raster_tools.extract_raster', 'raster_tools.extract_raster', (['input_data_file', 'llcorner_3310', 'urcorner_3310'], {'resolution': 'resolution'}), '(input_data_file, llcorner_3310, urcorner_3310,\n resolution=resolution)\n', (3648, 3722), False, 'import raster_tools\n'), ((3990, 4109), 'logging.info', 'logging.info', (['"""Size of %s raster: %dx%d"""', 'name', "host_raster.header_vals['nrows']", "host_raster.header_vals['ncols']"], {}), "('Size of %s raster: %dx%d', name, host_raster.header_vals[\n 'nrows'], host_raster.header_vals['ncols'])\n", (4002, 4109), False, 'import logging\n'), ((4172, 4273), 'raster_tools.extract_raster', 'raster_tools.extract_raster', (['input_data_file', 'llcorner_3310', 'urcorner_3310'], {'resolution': 'resolution'}), '(input_data_file, llcorner_3310, urcorner_3310,\n resolution=resolution)\n', (4199, 4273), False, 'import raster_tools\n'), ((4531, 4570), 'logging.info', 'logging.info', (['"""Starting NP mask raster"""'], {}), "('Starting NP mask raster')\n", (4543, 4570), False, 'import logging\n'), ((4707, 4750), 'logging.info', 'logging.info', (['"""Starting initial conditions"""'], {}), "('Starting initial conditions')\n", (4719, 4750), False, 'import logging\n'), ((5370, 5427), 'pyproj.transform', 'pyproj.transform', (['wgs84', 'NAD83_Cali_Albers', '*positive_pos'], {}), '(wgs84, NAD83_Cali_Albers, *positive_pos)\n', (5386, 5427), False, 'import pyproj\n'), ((5475, 5543), 'raster_tools.find_position_in_raster', 'raster_tools.find_position_in_raster', (['positive_pos_3310', 'host_raster'], {}), '(positive_pos_3310, host_raster)\n', (5511, 5543), False, 'import raster_tools\n'), ((5549, 5582), 'logging.info', 'logging.info', (['"""Found source cell"""'], {}), "('Found source cell')\n", (5561, 5582), False, 'import logging\n'), ((5827, 5900), 'raster_tools.find_position_in_raster', 'raster_tools.find_position_in_raster', (['positive_pos_3310', 'base_host_raster'], {}), '(positive_pos_3310, base_host_raster)\n', (5863, 5900), False, 'import raster_tools\n'), ((6524, 6591), 'Scripts.average_weather.average_mask', 'average_weather.average_mask', ([], {'target_header': 'host_raster.header_vals'}), '(target_header=host_raster.header_vals)\n', (6552, 6591), False, 'from Scripts import average_weather\n'), ((7058, 7088), 'pyproj.Proj', 'pyproj.Proj', (['"""+init=EPSG:3310"""'], {}), "('+init=EPSG:3310')\n", (7069, 7088), False, 'import pyproj\n'), ((7104, 7134), 'pyproj.Proj', 'pyproj.Proj', (['"""+init=EPSG:4269"""'], {}), "('+init=EPSG:4269')\n", (7115, 7134), False, 'import pyproj\n'), ((7150, 7204), 'functools.partial', 'partial', (['pyproj.transform', 'nps_proj', 'NAD83_Cali_Albers'], {}), '(pyproj.transform, nps_proj, NAD83_Cali_Albers)\n', (7157, 7204), False, 'from functools import partial\n'), ((7223, 7257), 'shapely.ops.transform', 'transform', (['project', 'redw_shape_nps'], {}), '(project, redw_shape_nps)\n', (7232, 7257), False, 'from shapely.ops import transform\n'), ((7674, 7732), 'numpy.zeros', 'np.zeros', (["(target_header['nrows'], target_header['ncols'])"], {}), "((target_header['nrows'], target_header['ncols']))\n", (7682, 7732), True, 'import numpy as np\n'), ((8225, 8409), 'raster_tools.RasterData', 'raster_tools.RasterData', (["(target_header['nrows'], target_header['ncols'])", "(target_header['xllcorner'], target_header['yllcorner'])", "target_header['cellsize']"], {'array': 'np_array'}), "((target_header['nrows'], target_header['ncols']), (\n target_header['xllcorner'], target_header['yllcorner']), target_header[\n 'cellsize'], array=np_array)\n", (8248, 8409), False, 'import raster_tools\n'), ((2488, 2538), 'os.path.join', 'os.path.join', (['"""GeneratedData"""', "(out_stub + '_S.txt')"], {}), "('GeneratedData', out_stub + '_S.txt')\n", (2500, 2538), False, 'import os\n'), ((2566, 2616), 'os.path.join', 'os.path.join', (['"""GeneratedData"""', "(out_stub + '_I.txt')"], {}), "('GeneratedData', out_stub + '_I.txt')\n", (2578, 2616), False, 'import os\n'), ((2644, 2694), 'os.path.join', 'os.path.join', (['"""GeneratedData"""', "(out_stub + '_R.txt')"], {}), "('GeneratedData', out_stub + '_R.txt')\n", (2656, 2694), False, 'import os\n'), ((3813, 3848), 'os.path.join', 'os.path.join', (['"""GeneratedData"""', 'name'], {}), "('GeneratedData', name)\n", (3825, 3848), False, 'import os\n'), ((3929, 3983), 'os.path.join', 'os.path.join', (['"""GeneratedData"""', 'name', '"""HostDensity.txt"""'], {}), "('GeneratedData', name, 'HostDensity.txt')\n", (3941, 3983), False, 'import os\n'), ((4470, 4524), 'os.path.join', 'os.path.join', (['"""GeneratedData"""', 'name', '"""HostNumbers.txt"""'], {}), "('GeneratedData', name, 'HostNumbers.txt')\n", (4482, 4524), False, 'import os\n'), ((4651, 4700), 'os.path.join', 'os.path.join', (['"""GeneratedData"""', 'name', '"""NPMask.txt"""'], {}), "('GeneratedData', name, 'NPMask.txt')\n", (4663, 4700), False, 'import os\n'), ((5092, 5146), 'os.path.join', 'os.path.join', (['analysis_path', '"""InputData"""', '"""sodmap.csv"""'], {}), "(analysis_path, 'InputData', 'sodmap.csv')\n", (5104, 5146), False, 'import os\n'), ((5681, 5751), 'os.path.join', 'os.path.join', (['analysis_path', '"""InputData"""', '"""combinedHostsScenario0.txt"""'], {}), "(analysis_path, 'InputData', 'combinedHostsScenario0.txt')\n", (5693, 5751), False, 'import os\n'), ((6613, 6663), 'os.path.join', 'os.path.join', (['"""GeneratedData"""', 'name', '"""RMSMask.txt"""'], {}), "('GeneratedData', name, 'RMSMask.txt')\n", (6625, 6663), False, 'import os\n'), ((6784, 6845), 'os.path.join', 'os.path.join', (['"""InputData"""', '"""nps_boundary"""', '"""nps_boundary.shp"""'], {}), "('InputData', 'nps_boundary', 'nps_boundary.shp')\n", (6796, 6845), False, 'import os\n'), ((3100, 3126), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (3116, 3126), False, 'import os\n'), ((6147, 6194), 'os.path.join', 'os.path.join', (['name', '"""InitialConditions_Numbers"""'], {}), "(name, 'InitialConditions_Numbers')\n", (6159, 6194), False, 'import os\n'), ((6332, 6379), 'os.path.join', 'os.path.join', (['name', '"""InitialConditions_Density"""'], {}), "(name, 'InitialConditions_Density')\n", (6344, 6379), False, 'import os\n'), ((7993, 8017), 'shapely.geometry.Polygon', 'geometry.Polygon', (['points'], {}), '(points)\n', (8009, 8017), False, 'from shapely import geometry\n'), ((4383, 4427), 'numpy.where', 'np.where', (['(host_num_raster.array >= 0)', '(100)', '(1)'], {}), '(host_num_raster.array >= 0, 100, 1)\n', (4391, 4427), True, 'import numpy as np\n'), ((1369, 1420), 'numpy.ceil', 'np.ceil', (['(prop_infected * host_array.array[row, col])'], {}), '(prop_infected * host_array.array[row, col])\n', (1376, 1420), True, 'import numpy as np\n'), ((1504, 1562), 'numpy.floor', 'np.floor', (['((1 - prop_infected) * host_array.array[row, col])'], {}), '((1 - prop_infected) * host_array.array[row, col])\n', (1512, 1562), True, 'import numpy as np\n')] |
# coding: utf-8
# Object Detection Demo
import argparse
import cv2
import numpy as np
import os
import sys
import time
import tensorflow as tf
from distutils.version import StrictVersion
if StrictVersion(tf.__version__) < StrictVersion('1.12.0'):
raise ImportError('Please upgrade your TensorFlow installation to v1.12.*.')
# Path to label and frozen detection graph. This is the actual model that is used for the object detection.
parser = argparse.ArgumentParser(description='object_detection_tutorial.')
parser.add_argument("trained", type=str)
parser.add_argument('-m', '--model', default='frozen_inference_graph.pb')
parser.add_argument('-d', '--device', default='normal_cam') # normal_cam / jetson_nano_raspi_cam / jetson_nano_web_cam
parser.add_argument("--camera", type=int, default=0)
args = parser.parse_args()
detection_graph = tf.Graph()
mode = 'bbox'
def load_graph():
"""
download frozen_inference_graph.pb
$ wget https://raw.githubusercontent.com/victordibia/handtracking/master/model-checkpoint/ssdlitemobilenetv2/frozen_inference_graph.pb
"""
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(args.model, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return detection_graph
# Load a (frozen) Tensorflow model into memory.
print('Loading graph...')
detection_graph = load_graph()
print('Graph is loaded')
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
with detection_graph.as_default():
tf_sess = tf.Session(config=tf_config)
ops = tf.compat.v1.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.compat.v1.get_default_graph().get_tensor_by_name(
tensor_name)
image_tensor = tf.compat.v1.get_default_graph().get_tensor_by_name('image_tensor:0')
def run_inference_for_single_image(image, graph):
# Run inference
output_dict = tf_sess.run(
tensor_dict,
feed_dict={image_tensor: image},
)
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.int64)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
return output_dict
# Switch camera according to device
if args.device == 'normal_cam':
cam = cv2.VideoCapture(args.camera)
elif args.device == 'jetson_nano_raspi_cam':
GST_STR = 'nvarguscamerasrc \
! video/x-raw(memory:NVMM), width=3280, height=2464, format=(string)NV12, framerate=(fraction)30/1 \
! nvvidconv ! video/x-raw, width=(int)1920, height=(int)1080, format=(string)BGRx \
! videoconvert \
! appsink'
cam = cv2.VideoCapture(GST_STR, cv2.CAP_GSTREAMER) # Raspi cam
elif args.device == 'jetson_nano_web_cam':
cam = cv2.VideoCapture(1)
else:
print('wrong device')
sys.exit()
import argparse
import configparser
import logging
logger = logging.getLogger()
import os
import cv2
import chainer
import chainercv
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from models.selector import select_model
from hand_dataset.selector import select_dataset
from hand_dataset.image_utils import normalize_rgb
count_max = 0
logging.basicConfig(level=logging.INFO)
config = configparser.ConfigParser()
path = os.path.expanduser(os.path.join(args.trained, "result", "config.ini"))
logger.info("read {}".format(path))
config.read(path, 'UTF-8')
logger.info("setup devices")
chainer.global_config.autotune = True
chainer.config.cudnn_fast_batch_normalization = True
# dataset_type = config["dataset"]["type"]
use_rgb = config.getboolean("dataset", "use_rgb")
use_depth = config.getboolean("dataset", "use_depth")
assert use_rgb
assert use_rgb ^ use_depth, "XOR(use_rgb, use_depth) must be True"
hand_param = select_dataset(config, return_data=["hand_param"])
model_path = os.path.expanduser(os.path.join(args.trained, "result", "bestmodel.npz"))
logger.info("> restore model")
model = select_model(config, hand_param)
logger.info("> model.device = {}".format(model.device))
logger.info("> restore models")
chainer.serializers.load_npz(model_path, model)
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(121)
ax3 = fig.add_subplot(122, projection="3d")
color_map = hand_param["color_map"]
color = [color_map[k] for k in hand_param["keypoint_names"]]
edge_color = [color_map[s, t] for s, t in hand_param["edges"]]
pred_color = [[255, 255, 255] for k in hand_param["keypoint_names"]]
NUM_KEYPOINTS = 21
KEYPOINT_NAMES = []
for k in ["wrist", "thumb", "index", "middle", "ring", "little"]:
if k == "wrist":
joint_name = "_".join([k])
KEYPOINT_NAMES.append(joint_name)
else:
for p in ["tip", "dip", "pip", "mcp"]:
joint_name = "_".join([k, p])
KEYPOINT_NAMES.append(joint_name)
EDGE_NAMES = []
from utils import pairwise
for f in ["index", "middle", "ring", "little", "thumb"]:
for p, q in pairwise(["wrist", "mcp", "pip", "dip", "tip"]):
if p != "wrist":
p = "_".join([f, p])
q = "_".join([f, q])
EDGE_NAMES.append([p, q])
EDGES = [[KEYPOINT_NAMES.index(s), KEYPOINT_NAMES.index(t)]
for s, t in EDGE_NAMES]
from hand_dataset.common_dataset import EDGES
def ch_inferenec(image):
image = cv2.resize(image, (hand_param["inW"] // 4, hand_param["inH"] // 4))
image = image.transpose(2, 0, 1) # HWC -> CHW
image = chainercv.transforms.resize(image, (hand_param["inH"], hand_param["inW"]))
ret = model.predict(np.expand_dims(normalize_rgb(image), axis=0))
if len(ret) == 7:
resp, conf, x, y, w, h, v = ret
else:
resp, conf, x, y, w, h, e, v = ret
resp = chainer.backends.cuda.to_cpu(resp.array)
conf = chainer.backends.cuda.to_cpu(conf.array)
w = chainer.backends.cuda.to_cpu(w.array)
h = chainer.backends.cuda.to_cpu(h.array)
x = chainer.backends.cuda.to_cpu(x.array)
y = chainer.backends.cuda.to_cpu(y.array)
# e = chainer.backends.cuda.to_cpu(e.array)
v = chainer.backends.cuda.to_cpu(v.array)
resp = np.squeeze(resp, axis=0)
conf = np.squeeze(conf, axis=0)
x = np.squeeze(x, axis=0)
y = np.squeeze(y, axis=0)
w = np.squeeze(w, axis=0)
h = np.squeeze(h, axis=0)
# e = np.squeeze(e, axis=0)
v = np.squeeze(v, axis=0)
color_map = hand_param["color_map"]
keypoint_names = hand_param["keypoint_names"]
edges = hand_param["edges"]
delta = resp * conf
scaleH = hand_param["inH"] / model.outsize[1]
scaleW = hand_param["inW"] / model.outsize[0]
joint2d = {}
grid_position = {}
finger_order = ["mcp", "pip", "dip", "tip"]
for kname in keypoint_names:
if "mcp" in kname or "root" == kname:
i = keypoint_names.index(kname)
u_ind = np.unravel_index(np.argmax(delta[i]), delta[i].shape)
y_offset, x_offset = u_ind
joint2d[kname] = [
scaleH * (y_offset + y[i][u_ind]),
scaleW * (x_offset + x[i][u_ind])
]
grid_position[kname] = u_ind
for f in ["thumb", "index", "middle", "ring", "little"]:
for p, q in zip(["mcp", "pip", "dip"], ["pip", "dip", "tip"]):
f_p = "_".join([f, p])
f_q = "_".join([f, q])
p_h, p_w = grid_position[f_p]
i = keypoint_names.index(f_q)
sz = 1 if q == ["tip", "dip"] else 2
hslice = slice(max(0, p_h - sz), min(model.outsize[1], p_h + sz + 1))
wslice = slice(max(0, p_w - sz), min(model.outsize[0], p_w + sz + 1))
target = delta[i][hslice, wslice]
q_h, q_w = np.unravel_index(np.argmax(target), target.shape)
y_offset = (p_h - sz) + q_h if p_h - sz >= 0 else q_h
x_offset = (p_w - sz) + q_w if p_w - sz >= 0 else q_w
joint2d[f_q] = [
scaleH * (y_offset + y[i][(y_offset, x_offset)]),
scaleW * (x_offset + x[i][(y_offset, x_offset)])
]
grid_position[f_q] = (y_offset, x_offset)
kp_zyx = np.zeros((len(keypoint_names), 3))
for ei, (s, t) in enumerate(edges):
u_ind = grid_position[keypoint_names[s]]
orien = v[ei, :, u_ind[0], u_ind[1]]
elen = 1.5 if s == 0 else 1
kp_zyx[t] = kp_zyx[s] + orien * elen
joint2d = np.array([joint2d[k] for k in keypoint_names])
return joint2d
if __name__ == '__main__':
count = 0
labels = ['blank', 'hand']
while True:
ret, img = cam.read()
if not ret:
print('error')
break
key = cv2.waitKey(1)
if key == 27: # when ESC key is pressed break
break
count += 1
if count > count_max:
img_bgr = cv2.resize(img, (300, 300))
# convert bgr to rgb
image_np = img_bgr[:, :, ::-1]
image_np_expanded = np.expand_dims(image_np, axis=0)
start = time.time()
output_dict = run_inference_for_single_image(image_np_expanded, detection_graph)
elapsed_time = time.time() - start
for i in range(output_dict['num_detections']):
class_id = output_dict['detection_classes'][i]
if class_id < len(labels):
label = labels[class_id]
else:
label = 'unknown'
detection_score = output_dict['detection_scores'][i]
if detection_score > 0.5:
# Define bounding box
h, w, c = img.shape
box = output_dict['detection_boxes'][i] * np.array(
[h, w, h, w])
ymin, xmin, ymax, xmax = box.astype(int)
ulen = xmax - xmin
vlen = ymax - ymin
boxscale = 1.5
boxlen = int(boxscale * max(ulen, vlen))
uc = int((xmax + xmin) / 2)
vc = int((ymax + ymin) / 2)
umin = max(0, uc - boxlen // 2)
umax = min(w, uc + boxlen // 2)
vmin = max(0, vc - boxlen // 2)
vmax = min(h, vc + boxlen // 2)
crop_img = img[vmin:vmax, umin:umax][:, :, ::-1]
oriH, oriW, _ = crop_img.shape
joint2d = ch_inferenec(crop_img)
if joint2d is not None:
joint2d = np.array([[oriH / model.inH, oriW / model.inW]]) * joint2d + np.array([[vmin, umin]])
for v, u in joint2d:
cv2.circle(img, (int(u), int(v)), 3, (255, 0, 0), -1)
for s, t in EDGES:
sy, sx = joint2d[s].astype(int)
ty, tx = joint2d[t].astype(int)
cv2.line(img, (sx, sy), (tx, ty), (255, 0, 0), 5)
speed_info = '%s: %f' % ('speed=', elapsed_time)
cv2.putText(img, speed_info, (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
cv2.rectangle(img,
(umin, vmin), (umax, vmax), (0, 255, 0), 3)
else:
# Draw bounding box
cv2.rectangle(img,
(xmin, ymin), (xmax, ymax), (0, 0, 255), 3)
# Put label near bounding box
information = '%s: %f' % (label, output_dict['detection_scores'][i])
cv2.putText(img, information, (xmin, ymax),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
cv2.imshow('detection result', img)
count = 0
tf_sess.close()
cam.release()
cv2.destroyAllWindows()
| [
"argparse.ArgumentParser",
"distutils.version.StrictVersion",
"numpy.argmax",
"models.selector.select_model",
"tensorflow.ConfigProto",
"matplotlib.pyplot.figure",
"cv2.rectangle",
"cv2.imshow",
"os.path.join",
"cv2.line",
"utils.pairwise",
"chainer.serializers.load_npz",
"hand_dataset.selec... | [((448, 513), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""object_detection_tutorial."""'}), "(description='object_detection_tutorial.')\n", (471, 513), False, 'import argparse\n'), ((848, 858), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (856, 858), True, 'import tensorflow as tf\n'), ((1551, 1567), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1565, 1567), True, 'import tensorflow as tf\n'), ((3519, 3538), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3536, 3538), False, 'import logging\n'), ((3614, 3635), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (3628, 3635), False, 'import matplotlib\n'), ((3891, 3930), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (3910, 3930), False, 'import logging\n'), ((3941, 3968), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (3966, 3968), False, 'import configparser\n'), ((4475, 4525), 'hand_dataset.selector.select_dataset', 'select_dataset', (['config'], {'return_data': "['hand_param']"}), "(config, return_data=['hand_param'])\n", (4489, 4525), False, 'from hand_dataset.selector import select_dataset\n'), ((4653, 4685), 'models.selector.select_model', 'select_model', (['config', 'hand_param'], {}), '(config, hand_param)\n', (4665, 4685), False, 'from models.selector import select_model\n'), ((4775, 4822), 'chainer.serializers.load_npz', 'chainer.serializers.load_npz', (['model_path', 'model'], {}), '(model_path, model)\n', (4803, 4822), False, 'import chainer\n'), ((4830, 4857), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (4840, 4857), True, 'from matplotlib import pyplot as plt\n'), ((192, 221), 'distutils.version.StrictVersion', 'StrictVersion', (['tf.__version__'], {}), '(tf.__version__)\n', (205, 221), False, 'from distutils.version import StrictVersion\n'), ((224, 247), 'distutils.version.StrictVersion', 'StrictVersion', (['"""1.12.0"""'], {}), "('1.12.0')\n", (237, 247), False, 'from distutils.version import StrictVersion\n'), ((1659, 1687), 'tensorflow.Session', 'tf.Session', ([], {'config': 'tf_config'}), '(config=tf_config)\n', (1669, 1687), True, 'import tensorflow as tf\n'), ((2931, 2960), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.camera'], {}), '(args.camera)\n', (2947, 2960), False, 'import cv2\n'), ((3996, 4046), 'os.path.join', 'os.path.join', (['args.trained', '"""result"""', '"""config.ini"""'], {}), "(args.trained, 'result', 'config.ini')\n", (4008, 4046), False, 'import os\n'), ((4558, 4611), 'os.path.join', 'os.path.join', (['args.trained', '"""result"""', '"""bestmodel.npz"""'], {}), "(args.trained, 'result', 'bestmodel.npz')\n", (4570, 4611), False, 'import os\n'), ((5626, 5673), 'utils.pairwise', 'pairwise', (["['wrist', 'mcp', 'pip', 'dip', 'tip']"], {}), "(['wrist', 'mcp', 'pip', 'dip', 'tip'])\n", (5634, 5673), False, 'from utils import pairwise\n'), ((5976, 6043), 'cv2.resize', 'cv2.resize', (['image', "(hand_param['inW'] // 4, hand_param['inH'] // 4)"], {}), "(image, (hand_param['inW'] // 4, hand_param['inH'] // 4))\n", (5986, 6043), False, 'import cv2\n'), ((6107, 6181), 'chainercv.transforms.resize', 'chainercv.transforms.resize', (['image', "(hand_param['inH'], hand_param['inW'])"], {}), "(image, (hand_param['inH'], hand_param['inW']))\n", (6134, 6181), False, 'import chainercv\n'), ((6378, 6418), 'chainer.backends.cuda.to_cpu', 'chainer.backends.cuda.to_cpu', (['resp.array'], {}), '(resp.array)\n', (6406, 6418), False, 'import chainer\n'), ((6430, 6470), 'chainer.backends.cuda.to_cpu', 'chainer.backends.cuda.to_cpu', (['conf.array'], {}), '(conf.array)\n', (6458, 6470), False, 'import chainer\n'), ((6479, 6516), 'chainer.backends.cuda.to_cpu', 'chainer.backends.cuda.to_cpu', (['w.array'], {}), '(w.array)\n', (6507, 6516), False, 'import chainer\n'), ((6525, 6562), 'chainer.backends.cuda.to_cpu', 'chainer.backends.cuda.to_cpu', (['h.array'], {}), '(h.array)\n', (6553, 6562), False, 'import chainer\n'), ((6571, 6608), 'chainer.backends.cuda.to_cpu', 'chainer.backends.cuda.to_cpu', (['x.array'], {}), '(x.array)\n', (6599, 6608), False, 'import chainer\n'), ((6617, 6654), 'chainer.backends.cuda.to_cpu', 'chainer.backends.cuda.to_cpu', (['y.array'], {}), '(y.array)\n', (6645, 6654), False, 'import chainer\n'), ((6711, 6748), 'chainer.backends.cuda.to_cpu', 'chainer.backends.cuda.to_cpu', (['v.array'], {}), '(v.array)\n', (6739, 6748), False, 'import chainer\n'), ((6760, 6784), 'numpy.squeeze', 'np.squeeze', (['resp'], {'axis': '(0)'}), '(resp, axis=0)\n', (6770, 6784), True, 'import numpy as np\n'), ((6796, 6820), 'numpy.squeeze', 'np.squeeze', (['conf'], {'axis': '(0)'}), '(conf, axis=0)\n', (6806, 6820), True, 'import numpy as np\n'), ((6829, 6850), 'numpy.squeeze', 'np.squeeze', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (6839, 6850), True, 'import numpy as np\n'), ((6859, 6880), 'numpy.squeeze', 'np.squeeze', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (6869, 6880), True, 'import numpy as np\n'), ((6889, 6910), 'numpy.squeeze', 'np.squeeze', (['w'], {'axis': '(0)'}), '(w, axis=0)\n', (6899, 6910), True, 'import numpy as np\n'), ((6919, 6940), 'numpy.squeeze', 'np.squeeze', (['h'], {'axis': '(0)'}), '(h, axis=0)\n', (6929, 6940), True, 'import numpy as np\n'), ((6981, 7002), 'numpy.squeeze', 'np.squeeze', (['v'], {'axis': '(0)'}), '(v, axis=0)\n', (6991, 7002), True, 'import numpy as np\n'), ((9019, 9065), 'numpy.array', 'np.array', (['[joint2d[k] for k in keypoint_names]'], {}), '([joint2d[k] for k in keypoint_names])\n', (9027, 9065), True, 'import numpy as np\n'), ((12568, 12591), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (12589, 12591), False, 'import cv2\n'), ((1150, 1163), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (1161, 1163), True, 'import tensorflow as tf\n'), ((3279, 3323), 'cv2.VideoCapture', 'cv2.VideoCapture', (['GST_STR', 'cv2.CAP_GSTREAMER'], {}), '(GST_STR, cv2.CAP_GSTREAMER)\n', (3295, 3323), False, 'import cv2\n'), ((9286, 9300), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (9297, 9300), False, 'import cv2\n'), ((1177, 1209), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['args.model', '"""rb"""'], {}), "(args.model, 'rb')\n", (1191, 1209), True, 'import tensorflow as tf\n'), ((1331, 1373), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (1350, 1373), True, 'import tensorflow as tf\n'), ((1698, 1730), 'tensorflow.compat.v1.get_default_graph', 'tf.compat.v1.get_default_graph', ([], {}), '()\n', (1728, 1730), True, 'import tensorflow as tf\n'), ((2191, 2223), 'tensorflow.compat.v1.get_default_graph', 'tf.compat.v1.get_default_graph', ([], {}), '()\n', (2221, 2223), True, 'import tensorflow as tf\n'), ((3390, 3409), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(1)'], {}), '(1)\n', (3406, 3409), False, 'import cv2\n'), ((3446, 3456), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3454, 3456), False, 'import sys\n'), ((6221, 6241), 'hand_dataset.image_utils.normalize_rgb', 'normalize_rgb', (['image'], {}), '(image)\n', (6234, 6241), False, 'from hand_dataset.image_utils import normalize_rgb\n'), ((9447, 9474), 'cv2.resize', 'cv2.resize', (['img', '(300, 300)'], {}), '(img, (300, 300))\n', (9457, 9474), False, 'import cv2\n'), ((9584, 9616), 'numpy.expand_dims', 'np.expand_dims', (['image_np'], {'axis': '(0)'}), '(image_np, axis=0)\n', (9598, 9616), True, 'import numpy as np\n'), ((9637, 9648), 'time.time', 'time.time', ([], {}), '()\n', (9646, 9648), False, 'import time\n'), ((12467, 12502), 'cv2.imshow', 'cv2.imshow', (['"""detection result"""', 'img'], {}), "('detection result', img)\n", (12477, 12502), False, 'import cv2\n'), ((7497, 7516), 'numpy.argmax', 'np.argmax', (['delta[i]'], {}), '(delta[i])\n', (7506, 7516), True, 'import numpy as np\n'), ((8346, 8363), 'numpy.argmax', 'np.argmax', (['target'], {}), '(target)\n', (8355, 8363), True, 'import numpy as np\n'), ((9769, 9780), 'time.time', 'time.time', ([], {}), '()\n', (9778, 9780), False, 'import time\n'), ((2089, 2121), 'tensorflow.compat.v1.get_default_graph', 'tf.compat.v1.get_default_graph', ([], {}), '()\n', (2119, 2121), True, 'import tensorflow as tf\n'), ((12320, 12425), 'cv2.putText', 'cv2.putText', (['img', 'information', '(xmin, ymax)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 0, 255)', '(1)', 'cv2.LINE_AA'], {}), '(img, information, (xmin, ymax), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,\n 0, 255), 1, cv2.LINE_AA)\n', (12331, 12425), False, 'import cv2\n'), ((10317, 10339), 'numpy.array', 'np.array', (['[h, w, h, w]'], {}), '([h, w, h, w])\n', (10325, 10339), True, 'import numpy as np\n'), ((11707, 11808), 'cv2.putText', 'cv2.putText', (['img', 'speed_info', '(10, 50)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 0, 255)', '(1)', 'cv2.LINE_AA'], {}), '(img, speed_info, (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, \n 255), 1, cv2.LINE_AA)\n', (11718, 11808), False, 'import cv2\n'), ((11864, 11926), 'cv2.rectangle', 'cv2.rectangle', (['img', '(umin, vmin)', '(umax, vmax)', '(0, 255, 0)', '(3)'], {}), '(img, (umin, vmin), (umax, vmax), (0, 255, 0), 3)\n', (11877, 11926), False, 'import cv2\n'), ((12059, 12121), 'cv2.rectangle', 'cv2.rectangle', (['img', '(xmin, ymin)', '(xmax, ymax)', '(0, 0, 255)', '(3)'], {}), '(img, (xmin, ymin), (xmax, ymax), (0, 0, 255), 3)\n', (12072, 12121), False, 'import cv2\n'), ((11216, 11240), 'numpy.array', 'np.array', (['[[vmin, umin]]'], {}), '([[vmin, umin]])\n', (11224, 11240), True, 'import numpy as np\n'), ((11560, 11609), 'cv2.line', 'cv2.line', (['img', '(sx, sy)', '(tx, ty)', '(255, 0, 0)', '(5)'], {}), '(img, (sx, sy), (tx, ty), (255, 0, 0), 5)\n', (11568, 11609), False, 'import cv2\n'), ((11155, 11203), 'numpy.array', 'np.array', (['[[oriH / model.inH, oriW / model.inW]]'], {}), '([[oriH / model.inH, oriW / model.inW]])\n', (11163, 11203), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
class SelectMajorCategories:
def __init__(self, columns: list, perc: float = 0.1, minor_label='<other>', dropna=True):
self.columns = columns if columns is not None else []
self.perc = perc
self.major_categories = {}
self.minor_label = minor_label
self.dropna = dropna
def fit(self, x_df):
for col in self.columns:
col_value_counts = x_df[col].value_counts(dropna=self.dropna)
col_major_counts = col_value_counts[col_value_counts > self.perc * x_df.shape[0]]
self.major_categories[col] = col_major_counts.index
return self
def transform(self, x_df):
x_df_ = x_df.copy()
for col in self.columns:
x_df_[col][~np.isin(x_df[col], self.major_categories[col])] = self.minor_label
return x_df_
class CycleEncoder:
def __init__(self, period):
self.period = period
def fit(self, x_array):
pass
def transform(self, x_array):
x_array_cos = np.cos(x_array * (2*np.pi/self.period))
x_array_sin = np.sin(x_array * (2*np.pi/self.period))
return x_array_cos, x_array_sin
| [
"numpy.isin",
"numpy.sin",
"numpy.cos"
] | [((1065, 1108), 'numpy.cos', 'np.cos', (['(x_array * (2 * np.pi / self.period))'], {}), '(x_array * (2 * np.pi / self.period))\n', (1071, 1108), True, 'import numpy as np\n'), ((1127, 1170), 'numpy.sin', 'np.sin', (['(x_array * (2 * np.pi / self.period))'], {}), '(x_array * (2 * np.pi / self.period))\n', (1133, 1170), True, 'import numpy as np\n'), ((794, 840), 'numpy.isin', 'np.isin', (['x_df[col]', 'self.major_categories[col]'], {}), '(x_df[col], self.major_categories[col])\n', (801, 840), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from numcube.experimental import MultiAxis
class MultiAxisTests(unittest.TestCase):
def test_create(self):
values = np.array([(1.5, 1, "x"), (0.5, 1, "y")], dtype=[('A', float), ('B', int), ('C', str)])
a = MultiAxis(values)
self.assertEqual(a.name, ("A", "B", "C"))
self.assertEqual(len(a), 2)
| [
"numcube.experimental.MultiAxis",
"numpy.array"
] | [((167, 258), 'numpy.array', 'np.array', (["[(1.5, 1, 'x'), (0.5, 1, 'y')]"], {'dtype': "[('A', float), ('B', int), ('C', str)]"}), "([(1.5, 1, 'x'), (0.5, 1, 'y')], dtype=[('A', float), ('B', int), (\n 'C', str)])\n", (175, 258), True, 'import numpy as np\n'), ((266, 283), 'numcube.experimental.MultiAxis', 'MultiAxis', (['values'], {}), '(values)\n', (275, 283), False, 'from numcube.experimental import MultiAxis\n')] |
import unittest
from frds.measures import bank
import numpy as np
class AbsorptionRatioCase(unittest.TestCase):
def setUp(self) -> None:
# The data in the doc by <NAME>, <NAME>, and <NAME>
self.data = np.array(
[
[0.015, 0.031, 0.007, 0.034, 0.014, 0.011],
[0.012, 0.063, 0.027, 0.023, 0.073, 0.055],
[0.072, 0.043, 0.097, 0.078, 0.036, 0.083],
]
)
np.random.seed(0)
self.more_data = np.random.normal(0, 1, (50, 10))
self.more_data = np.round(self.more_data / 100, 3).T
def test_original_data(self):
# Results computed using the Matlab code by <NAME>, <NAME>, and <NAME>
self.assertAlmostEqual(bank.absorption_ratio(self.data, 0.1), 0.0, 4)
self.assertAlmostEqual(bank.absorption_ratio(self.data, 0.2), 0.7747, 4)
self.assertAlmostEqual(bank.absorption_ratio(self.data, 0.3), 0.7747, 4)
self.assertAlmostEqual(bank.absorption_ratio(self.data, 0.4), 0.7747, 4)
self.assertAlmostEqual(bank.absorption_ratio(self.data, 0.5), 0.9435, 4)
self.assertAlmostEqual(bank.absorption_ratio(self.data, 0.6), 0.9435, 4)
self.assertAlmostEqual(bank.absorption_ratio(self.data, 0.7), 0.9435, 4)
self.assertAlmostEqual(bank.absorption_ratio(self.data, 0.8), 0.9435, 4)
self.assertAlmostEqual(bank.absorption_ratio(self.data, 0.9), 1, 4)
self.assertAlmostEqual(bank.absorption_ratio(self.data, 1), 1, 4)
def test_more_data(self):
# Results computed using the Matlab code by <NAME>, <NAME>, and <NAME>
self.assertAlmostEqual(bank.absorption_ratio(self.more_data, 0.1), 0.1851, 4)
self.assertAlmostEqual(bank.absorption_ratio(self.more_data, 0.2), 0.3234, 4)
self.assertAlmostEqual(bank.absorption_ratio(self.more_data, 0.3), 0.4594, 4)
self.assertAlmostEqual(bank.absorption_ratio(self.more_data, 0.4), 0.5752, 4)
self.assertAlmostEqual(bank.absorption_ratio(self.more_data, 0.5), 0.6743, 4)
self.assertAlmostEqual(bank.absorption_ratio(self.more_data, 0.6), 0.7596, 4)
self.assertAlmostEqual(bank.absorption_ratio(self.more_data, 0.7), 0.8405, 4)
self.assertAlmostEqual(bank.absorption_ratio(self.more_data, 0.8), 0.9103, 4)
self.assertAlmostEqual(bank.absorption_ratio(self.more_data, 0.9), 0.9740, 4)
self.assertAlmostEqual(bank.absorption_ratio(self.more_data, 1), 1, 4)
class MarginalExpectedShortfallCase(unittest.TestCase):
def setUp(self):
# The data in the doc by <NAME>, <NAME>, and <NAME>
self.firm_returns = np.array(
[
-0.1595,
0.1211,
-0.0806,
-0.0291,
0.0897,
-0.0254,
0.1210,
0.0132,
-0.1214,
0.1901,
0.0243,
]
)
self.market_returns = np.array(
[
-0.0205,
-0.0510,
0.0438,
0.0914,
0.0243,
-0.1051,
0.0121,
0.0221,
-0.0401,
-0.0111,
-0.0253,
]
)
# simulated data
np.random.seed(0)
self.sim_firm_returns = np.round(np.random.normal(0, 1, (100,)) / 100, 3)
self.sim_market_returns = np.round(np.random.normal(0, 1, (100,)) / 100, 3)
def test_mes(self):
# Results computed using the Matlab code by <NAME>, <NAME>, and <NAME>
mes = bank.marginal_expected_shortfall(
self.firm_returns, self.market_returns, q=0.05
)
self.assertAlmostEqual(mes, -0.0254, 4)
def test_simulated_data(self):
# Results computed using the Matlab code by <NAME>, <NAME>, and <NAME>
mes = bank.marginal_expected_shortfall(
self.sim_firm_returns, self.sim_market_returns, q=0.01
)
self.assertAlmostEqual(mes, -0.015, 4)
mes = bank.marginal_expected_shortfall(
self.sim_firm_returns, self.sim_market_returns, q=0.05
)
self.assertAlmostEqual(mes, 0.0016, 4)
class SystemicExpectedShortfallCase(unittest.TestCase):
def test_ses(self) -> None:
# Data from and results computed using the Matlab code by <NAME>, <NAME>, and <NAME>
mes_training_sample = np.array([-0.023, -0.07, 0.01])
lvg_training_sample = np.array([1.8, 1.5, 2.2])
ses_training_sample = np.array([0.3, 0.4, -0.2])
mes_firm = 0.04
lvg_firm = 1.7
ses = bank.systemic_expected_shortfall(
mes_training_sample,
lvg_training_sample,
ses_training_sample,
mes_firm,
lvg_firm,
)
self.assertAlmostEqual(ses, -0.333407572383073, 6)
class DistressInsurancePremiumCase(unittest.TestCase):
def test_dip(self):
# Data from and results computed using the Matlab code by <NAME>, <NAME>, and <NAME>
default_probabilities = np.array([0.02, 0.10, 0.03, 0.20, 0.50, 0.15])
correlations = np.array(
[
[1, -0.1260125, -0.6366762, 0.1744837, 0.4689378, 0.2831761],
[-0.1260125, 1, 0.294223, 0.673963, 0.1499695, 0.05250343],
[-0.6366762, 0.294223, 1, 0.07259309, -0.6579669, -0.0848825],
[0.1744837, 0.673963, 0.07259309, 1, 0.2483188, 0.5078022],
[0.4689378, 0.1499695, -0.6579669, 0.2483188, 1, -0.3703121],
[0.2831761, 0.05250343, -0.0848825, 0.5078022, -0.3703121, 1],
]
)
dip = bank.distress_insurance_premium(default_probabilities, correlations)
self.assertAlmostEqual(dip, 0.29, 2)
class CCACase(unittest.TestCase):
def test_cca(self):
equity = 5
volatility = 1.2
risk_free_rate = 0.02
default_barrier = 10
time_to_maturity = 20
cds_spread = 1.5
put_price, srisk_contribution = bank.cca(
equity,
volatility,
risk_free_rate,
default_barrier,
time_to_maturity,
cds_spread,
)
self.assertAlmostEqual(put_price, 6.6594, 2)
self.assertAlmostEqual(srisk_contribution, 3.3468, 3)
class bank_z_score(unittest.TestCase):
def test_z_score(self):
z = bank.z_score(0.1, 0.3, np.array([0.14, 0.15, 0.12, 0.13]))
self.assertAlmostEqual(z, 35.777088, 4) | [
"frds.measures.bank.distress_insurance_premium",
"frds.measures.bank.cca",
"frds.measures.bank.absorption_ratio",
"frds.measures.bank.marginal_expected_shortfall",
"numpy.random.seed",
"frds.measures.bank.systemic_expected_shortfall",
"numpy.array",
"numpy.random.normal",
"numpy.round"
] | [((223, 369), 'numpy.array', 'np.array', (['[[0.015, 0.031, 0.007, 0.034, 0.014, 0.011], [0.012, 0.063, 0.027, 0.023, \n 0.073, 0.055], [0.072, 0.043, 0.097, 0.078, 0.036, 0.083]]'], {}), '([[0.015, 0.031, 0.007, 0.034, 0.014, 0.011], [0.012, 0.063, 0.027,\n 0.023, 0.073, 0.055], [0.072, 0.043, 0.097, 0.078, 0.036, 0.083]])\n', (231, 369), True, 'import numpy as np\n'), ((459, 476), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (473, 476), True, 'import numpy as np\n'), ((502, 534), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(50, 10)'], {}), '(0, 1, (50, 10))\n', (518, 534), True, 'import numpy as np\n'), ((2635, 2741), 'numpy.array', 'np.array', (['[-0.1595, 0.1211, -0.0806, -0.0291, 0.0897, -0.0254, 0.121, 0.0132, -0.1214,\n 0.1901, 0.0243]'], {}), '([-0.1595, 0.1211, -0.0806, -0.0291, 0.0897, -0.0254, 0.121, 0.0132,\n -0.1214, 0.1901, 0.0243])\n', (2643, 2741), True, 'import numpy as np\n'), ((2982, 3089), 'numpy.array', 'np.array', (['[-0.0205, -0.051, 0.0438, 0.0914, 0.0243, -0.1051, 0.0121, 0.0221, -0.0401,\n -0.0111, -0.0253]'], {}), '([-0.0205, -0.051, 0.0438, 0.0914, 0.0243, -0.1051, 0.0121, 0.0221,\n -0.0401, -0.0111, -0.0253])\n', (2990, 3089), True, 'import numpy as np\n'), ((3333, 3350), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3347, 3350), True, 'import numpy as np\n'), ((3635, 3720), 'frds.measures.bank.marginal_expected_shortfall', 'bank.marginal_expected_shortfall', (['self.firm_returns', 'self.market_returns'], {'q': '(0.05)'}), '(self.firm_returns, self.market_returns, q=0.05\n )\n', (3667, 3720), False, 'from frds.measures import bank\n'), ((3915, 4008), 'frds.measures.bank.marginal_expected_shortfall', 'bank.marginal_expected_shortfall', (['self.sim_firm_returns', 'self.sim_market_returns'], {'q': '(0.01)'}), '(self.sim_firm_returns, self.\n sim_market_returns, q=0.01)\n', (3947, 4008), False, 'from frds.measures import bank\n'), ((4088, 4181), 'frds.measures.bank.marginal_expected_shortfall', 'bank.marginal_expected_shortfall', (['self.sim_firm_returns', 'self.sim_market_returns'], {'q': '(0.05)'}), '(self.sim_firm_returns, self.\n sim_market_returns, q=0.05)\n', (4120, 4181), False, 'from frds.measures import bank\n'), ((4460, 4491), 'numpy.array', 'np.array', (['[-0.023, -0.07, 0.01]'], {}), '([-0.023, -0.07, 0.01])\n', (4468, 4491), True, 'import numpy as np\n'), ((4522, 4547), 'numpy.array', 'np.array', (['[1.8, 1.5, 2.2]'], {}), '([1.8, 1.5, 2.2])\n', (4530, 4547), True, 'import numpy as np\n'), ((4578, 4604), 'numpy.array', 'np.array', (['[0.3, 0.4, -0.2]'], {}), '([0.3, 0.4, -0.2])\n', (4586, 4604), True, 'import numpy as np\n'), ((4667, 4786), 'frds.measures.bank.systemic_expected_shortfall', 'bank.systemic_expected_shortfall', (['mes_training_sample', 'lvg_training_sample', 'ses_training_sample', 'mes_firm', 'lvg_firm'], {}), '(mes_training_sample, lvg_training_sample,\n ses_training_sample, mes_firm, lvg_firm)\n', (4699, 4786), False, 'from frds.measures import bank\n'), ((5121, 5164), 'numpy.array', 'np.array', (['[0.02, 0.1, 0.03, 0.2, 0.5, 0.15]'], {}), '([0.02, 0.1, 0.03, 0.2, 0.5, 0.15])\n', (5129, 5164), True, 'import numpy as np\n'), ((5191, 5594), 'numpy.array', 'np.array', (['[[1, -0.1260125, -0.6366762, 0.1744837, 0.4689378, 0.2831761], [-0.1260125,\n 1, 0.294223, 0.673963, 0.1499695, 0.05250343], [-0.6366762, 0.294223, 1,\n 0.07259309, -0.6579669, -0.0848825], [0.1744837, 0.673963, 0.07259309, \n 1, 0.2483188, 0.5078022], [0.4689378, 0.1499695, -0.6579669, 0.2483188,\n 1, -0.3703121], [0.2831761, 0.05250343, -0.0848825, 0.5078022, -\n 0.3703121, 1]]'], {}), '([[1, -0.1260125, -0.6366762, 0.1744837, 0.4689378, 0.2831761], [-\n 0.1260125, 1, 0.294223, 0.673963, 0.1499695, 0.05250343], [-0.6366762, \n 0.294223, 1, 0.07259309, -0.6579669, -0.0848825], [0.1744837, 0.673963,\n 0.07259309, 1, 0.2483188, 0.5078022], [0.4689378, 0.1499695, -0.6579669,\n 0.2483188, 1, -0.3703121], [0.2831761, 0.05250343, -0.0848825, \n 0.5078022, -0.3703121, 1]])\n', (5199, 5594), True, 'import numpy as np\n'), ((5719, 5787), 'frds.measures.bank.distress_insurance_premium', 'bank.distress_insurance_premium', (['default_probabilities', 'correlations'], {}), '(default_probabilities, correlations)\n', (5750, 5787), False, 'from frds.measures import bank\n'), ((6092, 6187), 'frds.measures.bank.cca', 'bank.cca', (['equity', 'volatility', 'risk_free_rate', 'default_barrier', 'time_to_maturity', 'cds_spread'], {}), '(equity, volatility, risk_free_rate, default_barrier,\n time_to_maturity, cds_spread)\n', (6100, 6187), False, 'from frds.measures import bank\n'), ((560, 593), 'numpy.round', 'np.round', (['(self.more_data / 100)', '(3)'], {}), '(self.more_data / 100, 3)\n', (568, 593), True, 'import numpy as np\n'), ((741, 778), 'frds.measures.bank.absorption_ratio', 'bank.absorption_ratio', (['self.data', '(0.1)'], {}), '(self.data, 0.1)\n', (762, 778), False, 'from frds.measures import bank\n'), ((819, 856), 'frds.measures.bank.absorption_ratio', 'bank.absorption_ratio', (['self.data', '(0.2)'], {}), '(self.data, 0.2)\n', (840, 856), False, 'from frds.measures import bank\n'), ((900, 937), 'frds.measures.bank.absorption_ratio', 'bank.absorption_ratio', (['self.data', '(0.3)'], {}), '(self.data, 0.3)\n', (921, 937), False, 'from frds.measures import bank\n'), ((981, 1018), 'frds.measures.bank.absorption_ratio', 'bank.absorption_ratio', (['self.data', '(0.4)'], {}), '(self.data, 0.4)\n', (1002, 1018), False, 'from frds.measures import bank\n'), ((1062, 1099), 'frds.measures.bank.absorption_ratio', 'bank.absorption_ratio', (['self.data', '(0.5)'], {}), '(self.data, 0.5)\n', (1083, 1099), False, 'from frds.measures import bank\n'), ((1143, 1180), 'frds.measures.bank.absorption_ratio', 'bank.absorption_ratio', (['self.data', '(0.6)'], {}), '(self.data, 0.6)\n', (1164, 1180), False, 'from frds.measures import bank\n'), ((1224, 1261), 'frds.measures.bank.absorption_ratio', 'bank.absorption_ratio', (['self.data', '(0.7)'], {}), '(self.data, 0.7)\n', (1245, 1261), False, 'from frds.measures import bank\n'), ((1305, 1342), 'frds.measures.bank.absorption_ratio', 'bank.absorption_ratio', (['self.data', '(0.8)'], {}), '(self.data, 0.8)\n', (1326, 1342), False, 'from frds.measures import bank\n'), ((1386, 1423), 'frds.measures.bank.absorption_ratio', 'bank.absorption_ratio', (['self.data', '(0.9)'], {}), '(self.data, 0.9)\n', (1407, 1423), False, 'from frds.measures import bank\n'), ((1462, 1497), 'frds.measures.bank.absorption_ratio', 'bank.absorption_ratio', (['self.data', '(1)'], {}), '(self.data, 1)\n', (1483, 1497), False, 'from frds.measures import bank\n'), ((1646, 1688), 'frds.measures.bank.absorption_ratio', 'bank.absorption_ratio', (['self.more_data', '(0.1)'], {}), '(self.more_data, 0.1)\n', (1667, 1688), False, 'from frds.measures import bank\n'), ((1732, 1774), 'frds.measures.bank.absorption_ratio', 'bank.absorption_ratio', (['self.more_data', '(0.2)'], {}), '(self.more_data, 0.2)\n', (1753, 1774), False, 'from frds.measures import bank\n'), ((1818, 1860), 'frds.measures.bank.absorption_ratio', 'bank.absorption_ratio', (['self.more_data', '(0.3)'], {}), '(self.more_data, 0.3)\n', (1839, 1860), False, 'from frds.measures import bank\n'), ((1904, 1946), 'frds.measures.bank.absorption_ratio', 'bank.absorption_ratio', (['self.more_data', '(0.4)'], {}), '(self.more_data, 0.4)\n', (1925, 1946), False, 'from frds.measures import bank\n'), ((1990, 2032), 'frds.measures.bank.absorption_ratio', 'bank.absorption_ratio', (['self.more_data', '(0.5)'], {}), '(self.more_data, 0.5)\n', (2011, 2032), False, 'from frds.measures import bank\n'), ((2076, 2118), 'frds.measures.bank.absorption_ratio', 'bank.absorption_ratio', (['self.more_data', '(0.6)'], {}), '(self.more_data, 0.6)\n', (2097, 2118), False, 'from frds.measures import bank\n'), ((2162, 2204), 'frds.measures.bank.absorption_ratio', 'bank.absorption_ratio', (['self.more_data', '(0.7)'], {}), '(self.more_data, 0.7)\n', (2183, 2204), False, 'from frds.measures import bank\n'), ((2248, 2290), 'frds.measures.bank.absorption_ratio', 'bank.absorption_ratio', (['self.more_data', '(0.8)'], {}), '(self.more_data, 0.8)\n', (2269, 2290), False, 'from frds.measures import bank\n'), ((2334, 2376), 'frds.measures.bank.absorption_ratio', 'bank.absorption_ratio', (['self.more_data', '(0.9)'], {}), '(self.more_data, 0.9)\n', (2355, 2376), False, 'from frds.measures import bank\n'), ((2420, 2460), 'frds.measures.bank.absorption_ratio', 'bank.absorption_ratio', (['self.more_data', '(1)'], {}), '(self.more_data, 1)\n', (2441, 2460), False, 'from frds.measures import bank\n'), ((6487, 6521), 'numpy.array', 'np.array', (['[0.14, 0.15, 0.12, 0.13]'], {}), '([0.14, 0.15, 0.12, 0.13])\n', (6495, 6521), True, 'import numpy as np\n'), ((3392, 3422), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(100,)'], {}), '(0, 1, (100,))\n', (3408, 3422), True, 'import numpy as np\n'), ((3476, 3506), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(100,)'], {}), '(0, 1, (100,))\n', (3492, 3506), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
# In[16]:
def PairSelection(self, date):
'''Selects the pair of stocks with the maximum Kendall tau value.
It's called on first day of each month'''
if date.month == self.month:
return Universe.Unchanged
symbols = [ Symbol.Create(x, SecurityType.Equity, Market.USA)
for x in [
"QQQ", "XLK",
"XME", "EWG",
"TNA", "TLT",
"FAS", "FAZ",
"XLF", "XLU",
"EWC", "EWA",
"QLD", "QID"
] ]
logreturns = self._get_historical_returns(symbols, self.lookbackdays)
tau = 0
for i in range(0, len(symbols), 2):
x = logreturns[str(symbols[i])]
y = logreturns[str(symbols[i+1])]
# Estimate Kendall rank correlation for each pair
tau_ = kendalltau(x, y)[0]
if tau > tau_:
continue
tau = tau_
self.pair = symbols[i:i+2]
return [x.Value for x in self.pair]
# In[17]:
def _parameter(self, family, tau):
''' Estimate the parameters for three kinds of Archimedean copulas
according to association between Archimedean copulas and the Kendall rank correlation measure
'''
if family == 'clayton':
return 2 * tau / (1 - tau)
elif family == 'frank':
'''
debye = quad(integrand, sys.float_info.epsilon, theta)[0]/theta is first order Debye function
frank_fun is the squared difference
Minimize the frank_fun would give the parameter theta for the frank copula
'''
integrand = lambda t: t / (np.exp(t) - 1) # generate the integrand
frank_fun = lambda theta: ((tau - 1) / 4.0 - (quad(integrand, sys.float_info.epsilon, theta)[0] / theta - 1) / theta) ** 2
return minimize(frank_fun, 4, method='BFGS', tol=1e-5).x
elif family == 'gumbel':
return 1 / (1 - tau)
# In[18]:
def _lpdf_copula(self, family, theta, u, v):
'''Estimate the log probability density function of three kinds of Archimedean copulas
'''
if family == 'clayton':
pdf = (theta + 1) * ((u ** (-theta) + v ** (-theta) - 1) ** (-2 - 1 / theta)) * (u ** (-theta - 1) * v ** (-theta - 1))
elif family == 'frank':
num = -theta * (np.exp(-theta) - 1) * (np.exp(-theta * (u + v)))
denom = ((np.exp(-theta * u) - 1) * (np.exp(-theta * v) - 1) + (np.exp(-theta) - 1)) ** 2
pdf = num / denom
elif family == 'gumbel':
A = (-np.log(u)) ** theta + (-np.log(v)) ** theta
c = np.exp(-A ** (1 / theta))
pdf = c * (u * v) ** (-1) * (A ** (-2 + 2 / theta)) * ((np.log(u) * np.log(v)) ** (theta - 1)) * (1 + (theta - 1) * A ** (-1 / theta))
return np.log(pdf)
# In[19]:
def SetSignal(self, slice):
'''Computes the mispricing indices to generate the trading signals.
It's called on first day of each month'''
if self.Time.month == self.month:
return
## Compute the best copula
# Pull historical log returns used to determine copula
logreturns = self._get_historical_returns(self.pair, self.numdays)
x, y = logreturns[str(self.pair[0])], logreturns[str(self.pair[1])]
# Convert the two returns series to two uniform values u and v using the empirical distribution functions
ecdf_x, ecdf_y = ECDF(x), ECDF(y)
u, v = [ecdf_x(a) for a in x], [ecdf_y(a) for a in y]
# Compute the Akaike Information Criterion (AIC) for different copulas and choose copula with minimum AIC
tau = kendalltau(x, y)[0] # estimate Kendall'rank correlation
AIC ={} # generate a dict with key being the copula family, value = [theta, AIC]
for i in ['clayton', 'frank', 'gumbel']:
param = self._parameter(i, tau)
lpdf = [self._lpdf_copula(i, param, x, y) for (x, y) in zip(u, v)]
# Replace nan with zero and inf with finite numbers in lpdf list
lpdf = np.nan_to_num(lpdf)
loglikelihood = sum(lpdf)
AIC[i] = [param, -2 * loglikelihood + 2]
# Choose the copula with the minimum AIC
self.copula = min(AIC.items(), key = lambda x: x[1][1])[0]
## Compute the signals
# Generate the log return series of the selected trading pair
logreturns = logreturns.tail(self.lookbackdays)
x, y = logreturns[str(self.pair[0])], logreturns[str(self.pair[1])]
# Estimate Kendall'rank correlation
tau = kendalltau(x, y)[0]
# Estimate the copula parameter: theta
self.theta = self._parameter(self.copula, tau)
# Simulate the empirical distribution function for returns of selected trading pair
self.ecdf_x, self.ecdf_y = ECDF(x), ECDF(y)
# Run linear regression over the two history return series and return the desired trading size ratio
self.coef = stats.linregress(x,y).slope
self.month = self.Time.month
# In[6]:
def _parameter(self, family, tau):
''' Estimate the parameters for three kinds of Archimedean copulas
according to association between Archimedean copulas and the Kendall rank correlation measure
'''
if family == 'clayton':
return 2 * tau / (1 - tau)
elif family == 'frank':
integrand = lambda t: t / (np.exp(t) - 1) # generate the integrand
frank_fun = lambda theta: ((tau - 1) / 4.0 - (quad(integrand, sys.float_info.epsilon, theta)[0] / theta - 1) / theta) ** 2
return minimize(frank_fun, 4, method='BFGS', tol=1e-5).x
elif family == 'gumbel':
return 1 / (1 - tau)
# In[22]:
def OnData(self, slice):
'''Main event handler. Implement trading logic.'''
self.SetSignal(slice) # only executed at first day of each month
# Daily rebalance
if self.Time.day == self.day:
return
long, short = self.pair[0], self.pair[1]
# Update current price to trading pair's historical price series
for kvp in self.Securities:
symbol = kvp.Key
if symbol in self.pair:
price = kvp.Value.Price
self.window[symbol].append(price)
if len(self.window[long]) < 2 or len(self.window[short]) < 2:
return
# Compute the mispricing indices for u and v by using estimated copula
MI_u_v, MI_v_u = self._misprice_index()
# Placing orders: if long is relatively underpriced, buy the pair
if MI_u_v < self.floor_CL and MI_v_u > self.cap_CL:
self.SetHoldings(short, -self.weight_v, False, f'Coef: {self.coef}')
self.SetHoldings(long, self.weight_v * self.coef * self.Portfolio[long].Price / self.Portfolio[short].Price)
# Placing orders: if short is relatively underpriced, sell the pair
elif MI_u_v > self.cap_CL and MI_v_u < self.floor_CL:
self.SetHoldings(short, self.weight_v, False, f'Coef: {self.coef}')
self.SetHoldings(long, -self.weight_v * self.coef * self.Portfolio[long].Price / self.Portfolio[short].Price)
self.day = self.Time.day
# In[23]:
log_close_x = np.log(self.closes_by_symbol[self.x_symbol])
log_close_y = np.log(self.closes_by_symbol[self.y_symbol])
spread, beta = self.regr(log_close_x, log_close_y)
mean = np.mean(spread)
std = np.std(spread)
x_holdings = self.Portfolio[self.x_symbol]
if x_holdings.Invested:
if x_holdings.IsShort and spread[-1] <= mean or x_holdings.IsLong and spread[-1] >= mean:
self.Liquidate()
else:
if beta < 1:
x_weight = 0.5
y_weight = 0.5 / beta
else:
x_weight = 0.5 / beta
y_weight = 0.5
if spread[-1] < mean - self.threshold * std:
self.SetHoldings(self.y_symbol, -y_weight)
self.SetHoldings(self.x_symbol, x_weight)
if spread[-1] > mean + self.threshold * std:
self.SetHoldings(self.x_symbol, -x_weight)
self.SetHoldings(self.y_symbol, y_weight)
# In[ ]:
| [
"numpy.nan_to_num",
"numpy.log",
"numpy.std",
"numpy.mean",
"numpy.exp"
] | [((7129, 7173), 'numpy.log', 'np.log', (['self.closes_by_symbol[self.x_symbol]'], {}), '(self.closes_by_symbol[self.x_symbol])\n', (7135, 7173), True, 'import numpy as np\n'), ((7188, 7232), 'numpy.log', 'np.log', (['self.closes_by_symbol[self.y_symbol]'], {}), '(self.closes_by_symbol[self.y_symbol])\n', (7194, 7232), True, 'import numpy as np\n'), ((7293, 7308), 'numpy.mean', 'np.mean', (['spread'], {}), '(spread)\n', (7300, 7308), True, 'import numpy as np\n'), ((7315, 7329), 'numpy.std', 'np.std', (['spread'], {}), '(spread)\n', (7321, 7329), True, 'import numpy as np\n'), ((2898, 2909), 'numpy.log', 'np.log', (['pdf'], {}), '(pdf)\n', (2904, 2909), True, 'import numpy as np\n'), ((4083, 4102), 'numpy.nan_to_num', 'np.nan_to_num', (['lpdf'], {}), '(lpdf)\n', (4096, 4102), True, 'import numpy as np\n'), ((2467, 2491), 'numpy.exp', 'np.exp', (['(-theta * (u + v))'], {}), '(-theta * (u + v))\n', (2473, 2491), True, 'import numpy as np\n'), ((2717, 2742), 'numpy.exp', 'np.exp', (['(-A ** (1 / theta))'], {}), '(-A ** (1 / theta))\n', (2723, 2742), True, 'import numpy as np\n'), ((1776, 1785), 'numpy.exp', 'np.exp', (['t'], {}), '(t)\n', (1782, 1785), True, 'import numpy as np\n'), ((2444, 2458), 'numpy.exp', 'np.exp', (['(-theta)'], {}), '(-theta)\n', (2450, 2458), True, 'import numpy as np\n'), ((2565, 2579), 'numpy.exp', 'np.exp', (['(-theta)'], {}), '(-theta)\n', (2571, 2579), True, 'import numpy as np\n'), ((5359, 5368), 'numpy.exp', 'np.exp', (['t'], {}), '(t)\n', (5365, 5368), True, 'import numpy as np\n'), ((2511, 2529), 'numpy.exp', 'np.exp', (['(-theta * u)'], {}), '(-theta * u)\n', (2517, 2529), True, 'import numpy as np\n'), ((2538, 2556), 'numpy.exp', 'np.exp', (['(-theta * v)'], {}), '(-theta * v)\n', (2544, 2556), True, 'import numpy as np\n'), ((2661, 2670), 'numpy.log', 'np.log', (['u'], {}), '(u)\n', (2667, 2670), True, 'import numpy as np\n'), ((2685, 2694), 'numpy.log', 'np.log', (['v'], {}), '(v)\n', (2691, 2694), True, 'import numpy as np\n'), ((2807, 2816), 'numpy.log', 'np.log', (['u'], {}), '(u)\n', (2813, 2816), True, 'import numpy as np\n'), ((2819, 2828), 'numpy.log', 'np.log', (['v'], {}), '(v)\n', (2825, 2828), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import inferbeddings.nli.util as util
import logging
import pytest
logger = logging.getLogger(__name__)
def get_train(has_bos, has_eos, has_unk):
train_instances, dev_instances, test_instances = util.SNLI.generate()
all_instances = train_instances + dev_instances + test_instances
# Create a sequence of tokens containing all sentences in the dataset
token_seq = []
for instance in all_instances:
token_seq += instance['sentence1_parse_tokens'] + instance['sentence2_parse_tokens']
# Count the number of occurrences of each token
token_counts = dict()
for token in token_seq:
if token not in token_counts:
token_counts[token] = 0
token_counts[token] += 1
# Sort the tokens according to their frequency and lexicographic ordering
sorted_vocabulary = sorted(token_counts.keys(), key=lambda t: (- token_counts[t], t))
# Enumeration of tokens start at index=3:
# index=0 PADDING, index=1 START_OF_SENTENCE, index=2 END_OF_SENTENCE, index=3 UNKNOWN_WORD
bos_idx, eos_idx, unk_idx = 1, 2, 3
start_idx = 1 + (1 if has_bos else 0) + (1 if has_eos else 0) + (1 if has_unk else 0)
index_to_token = {index: token for index, token in enumerate(sorted_vocabulary, start=start_idx)}
token_to_index = {token: index for index, token in index_to_token.items()}
entailment_idx, neutral_idx, contradiction_idx = 0, 1, 2
label_to_index = {
'entailment': entailment_idx,
'neutral': neutral_idx,
'contradiction': contradiction_idx
}
max_len = None
train_dataset = util.instances_to_dataset(train_instances, token_to_index, label_to_index,
has_bos=has_bos, has_eos=has_eos, has_unk=has_unk,
bos_idx=bos_idx, eos_idx=eos_idx, unk_idx=unk_idx,
max_len=max_len)
return train_dataset
@pytest.mark.light
def test_nli_util():
has_bos, has_eos, has_unk = True, True, True
train_dataset_v1 = get_train(has_bos=has_bos, has_eos=has_eos, has_unk=has_unk)
has_bos, has_eos, has_unk = False, False, False
train_dataset_v2 = get_train(has_bos=has_bos, has_eos=has_eos, has_unk=has_unk)
np.testing.assert_allclose(np.array(train_dataset_v2['sentence1_length']) + 2, train_dataset_v1['sentence1_length'])
np.testing.assert_allclose(np.array(train_dataset_v2['sentence2_length']) + 2, train_dataset_v1['sentence2_length'])
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
pytest.main([__file__])
| [
"logging.basicConfig",
"inferbeddings.nli.util.instances_to_dataset",
"pytest.main",
"inferbeddings.nli.util.SNLI.generate",
"numpy.array",
"logging.getLogger"
] | [((122, 149), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (139, 149), False, 'import logging\n'), ((247, 267), 'inferbeddings.nli.util.SNLI.generate', 'util.SNLI.generate', ([], {}), '()\n', (265, 267), True, 'import inferbeddings.nli.util as util\n'), ((1641, 1842), 'inferbeddings.nli.util.instances_to_dataset', 'util.instances_to_dataset', (['train_instances', 'token_to_index', 'label_to_index'], {'has_bos': 'has_bos', 'has_eos': 'has_eos', 'has_unk': 'has_unk', 'bos_idx': 'bos_idx', 'eos_idx': 'eos_idx', 'unk_idx': 'unk_idx', 'max_len': 'max_len'}), '(train_instances, token_to_index, label_to_index,\n has_bos=has_bos, has_eos=has_eos, has_unk=has_unk, bos_idx=bos_idx,\n eos_idx=eos_idx, unk_idx=unk_idx, max_len=max_len)\n', (1666, 1842), True, 'import inferbeddings.nli.util as util\n'), ((2585, 2625), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (2604, 2625), False, 'import logging\n'), ((2630, 2653), 'pytest.main', 'pytest.main', (['[__file__]'], {}), '([__file__])\n', (2641, 2653), False, 'import pytest\n'), ((2342, 2388), 'numpy.array', 'np.array', (["train_dataset_v2['sentence1_length']"], {}), "(train_dataset_v2['sentence1_length'])\n", (2350, 2388), True, 'import numpy as np\n'), ((2463, 2509), 'numpy.array', 'np.array', (["train_dataset_v2['sentence2_length']"], {}), "(train_dataset_v2['sentence2_length'])\n", (2471, 2509), True, 'import numpy as np\n')] |
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
from __future__ import absolute_import, division, print_function
import numpy as np
import torch
from torch import nn
from torch.cuda.amp import GradScaler, autocast
from torchreid import metrics
from torchreid.engine.engine import Engine
from torchreid.losses import AMSoftmaxLoss, CrossEntropyLoss
from torchreid.losses import AsymmetricLoss, AMBinaryLoss
from torchreid.optim import SAM
class MultiheadEngine(Engine):
r"""AM-Softmax-loss engine for image-reid.
"""
def __init__(self, datamanager, models, optimizers, schedulers, use_gpu, save_all_chkpts,
train_patience, early_stopping, lr_decay_factor, loss_name, label_smooth,
margin_type, aug_type, decay_power, alpha, lr_finder, aug_prob,
conf_penalty, pr_product, m, amb_k, amb_t, clip_grad, symmetric_ce, enable_rsc,
should_freeze_aux_models, nncf_metainfo, compression_ctrl, initial_lr,
target_metric, use_ema_decay, ema_decay, asl_gamma_pos, asl_gamma_neg, asl_p_m,
mix_precision, **kwargs):
super().__init__(datamanager,
models=models,
optimizers=optimizers,
schedulers=schedulers,
use_gpu=use_gpu,
save_all_chkpts=save_all_chkpts,
train_patience=train_patience,
lr_decay_factor=lr_decay_factor,
early_stopping=early_stopping,
should_freeze_aux_models=should_freeze_aux_models,
nncf_metainfo=nncf_metainfo,
compression_ctrl=compression_ctrl,
initial_lr=initial_lr,
target_metric=target_metric,
lr_finder=lr_finder,
use_ema_decay=use_ema_decay,
ema_decay=ema_decay)
loss_names = loss_name.split(',')
assert len(loss_names) == 2
if loss_names[0] in ['softmax', 'am_softmax']:
sm_loss_name, multilabel_loss_name = loss_names[0], loss_names[1]
else:
sm_loss_name, multilabel_loss_name = loss_names[1], loss_names[0]
assert sm_loss_name in ['softmax', 'am_softmax']
assert multilabel_loss_name in ['am_binary', 'bce', 'asl']
if sm_loss_name == 'am_softmax' or loss_name == 'am_binary':
assert m >= 0.0
self.clip_grad = clip_grad
self.enable_rsc = enable_rsc
self.enable_sam = isinstance(self.optims[self.main_model_name], SAM)
for model_name in self.get_model_names():
assert isinstance(self.optims[model_name], SAM) == self.enable_sam, "SAM must be enabled \
for all models or none of them"
self.prev_smooth_metric = 0.
self.mix_precision = mix_precision
self.scaler = GradScaler(enabled=mix_precision)
self.ml_losses = []
self.loss_kl = nn.KLDivLoss(reduction='batchmean')
self.mixed_cls_heads_info = self.datamanager.train_loader.dataset.mixed_cls_heads_info
self.multiclass_loss = None
self.multilabel_loss = None
if self.mixed_cls_heads_info['num_multiclass_heads'] > 0:
if sm_loss_name == 'softmax':
self.multiclass_loss = CrossEntropyLoss(
use_gpu=self.use_gpu,
label_smooth=label_smooth,
conf_penalty=conf_penalty,
scale=self.am_scale
)
elif sm_loss_name == 'am_softmax':
self.multiclass_loss = AMSoftmaxLoss(
use_gpu=self.use_gpu,
label_smooth=label_smooth,
margin_type=margin_type,
aug_type=aug_type,
conf_penalty=conf_penalty,
m=m,
s=self.am_scale,
pr_product=pr_product,
symmetric_ce=symmetric_ce,
)
if self.mixed_cls_heads_info['num_multilabel_classes'] > 0:
if multilabel_loss_name == 'asl':
self.multilabel_loss = AsymmetricLoss(
gamma_neg=asl_gamma_neg,
gamma_pos=asl_gamma_pos,
probability_margin=asl_p_m,
label_smooth=label_smooth,
)
elif multilabel_loss_name == 'bce':
self.multilabel_loss = AsymmetricLoss(
gamma_neg=0,
gamma_pos=0,
probability_margin=0,
label_smooth=label_smooth,
)
elif multilabel_loss_name == 'am_binary':
self.multilabel_loss = AMBinaryLoss(
m=m,
k=amb_k,
t=amb_t,
s=self.am_scale,
gamma_neg=asl_gamma_neg,
gamma_pos=asl_gamma_pos,
label_smooth=label_smooth,
)
@staticmethod
def _valid(value):
return value is not None and value > 0
def forward_backward(self, data):
n_iter = self.epoch * self.num_batches + self.batch_idx
imgs, targets = self.parse_data_for_train(data, self.use_gpu)
model_names = self.get_model_names()
num_models = len(model_names)
assert num_models == 1 # mutual learning is not supported in case of multihead training
steps = [1, 2] if self.enable_sam and not self.lr_finder else [1]
for step in steps:
# if sam is enabled then statistics will be written each step, but will be saved only the second time
# this is made just for convenience
loss_summary = {}
for i, model_name in enumerate(model_names):
loss, model_loss_summary, acc, _ = self._single_model_losses(
self.models[model_name], imgs, targets, n_iter, model_name
)
loss_summary.update(model_loss_summary)
if i == 0: # main model
main_acc = acc
for i, model_name in enumerate(model_names):
self.optims[model_name].zero_grad()
if self.compression_ctrl:
compression_loss = self.compression_ctrl.loss()
loss_summary['compression_loss'] = compression_loss
loss += compression_loss
# backward pass
self.scaler.scale(loss).backward()
if not self.models[model_name].training:
continue
if self.clip_grad != 0 and step == 1:
self.scaler.unscale_(self.optims[model_name])
torch.nn.utils.clip_grad_norm_(self.models[model_name].parameters(), self.clip_grad)
if not self.enable_sam and step == 1:
self.scaler.step(self.optims[model_name])
self.scaler.update()
elif step == 1:
assert self.enable_sam
if self.clip_grad == 0:
# if self.clip_grad == 0 this means that unscale_ wasn't applied,
# so we manually unscale the parameters to perform SAM manipulations
self.scaler.unscale_(self.optims[model_name])
overflow = self.optims[model_name].first_step()
self.scaler.update() # update scaler after first step
if overflow:
print("Overflow occurred. Skipping step ...")
loss_summary['loss'] = loss.item()
# skip second step if overflow occurred
return loss_summary, main_acc
else:
assert self.enable_sam and step==2
# unscale the parameters to perform SAM manipulations
self.scaler.unscale_(self.optims[model_name])
self.optims[model_name].second_step()
self.scaler.update()
loss_summary['loss'] = loss.item()
return loss_summary, main_acc
def _single_model_losses(self, model, imgs, targets, n_iter, model_name):
with autocast(enabled=self.mix_precision):
model_output = model(imgs)
all_logits = model_output[0] if isinstance(model_output, (tuple, list)) else model_output
loss_summary = {}
acc = 0
trg_num_samples = targets.numel()
if trg_num_samples == 0:
raise RuntimeError("There is no samples in a batch!")
loss = 0.
for i in range(self.mixed_cls_heads_info['num_multiclass_heads']):
head_gt = targets[:,i]
head_logits = all_logits[:,self.mixed_cls_heads_info['head_idx_to_logits_range'][i][0] :
self.mixed_cls_heads_info['head_idx_to_logits_range'][i][1]]
valid_mask = head_gt >= 0
head_gt = head_gt[valid_mask].long()
head_logits = head_logits[valid_mask,:]
loss += self.multiclass_loss(head_logits, head_gt, scale=self.scales[model_name])
acc += metrics.accuracy(head_logits, head_gt)[0].item()
if self.mixed_cls_heads_info['num_multiclass_heads'] > 1:
loss /= self.mixed_cls_heads_info['num_multiclass_heads']
if self.multilabel_loss:
head_gt = targets[:,self.mixed_cls_heads_info['num_multiclass_heads']:]
head_logits = all_logits[:,self.mixed_cls_heads_info['num_single_label_classes']:]
valid_mask = head_gt >= 0
head_gt = head_gt[valid_mask].view(*valid_mask.shape)
head_logits = head_logits[valid_mask].view(*valid_mask.shape)
# multilabel_loss is assumed to perform no batch averaging
loss += self.multilabel_loss(head_logits, head_gt, scale=self.scales[model_name]) / head_logits.shape[0]
acc += metrics.accuracy_multilabel(head_logits * self.scales[model_name], head_gt).item()
acc /= self.mixed_cls_heads_info['num_multiclass_heads'] + int(self.multilabel_loss is not None)
loss_summary[model_name] = loss.item()
scaled_logits = self.scales[model_name] * all_logits
return loss, loss_summary, acc, scaled_logits
def exit_on_plateau_and_choose_best(self, accuracy):
'''
The function returns a pair (should_exit, is_candidate_for_best).
The function sets this checkpoint as a candidate for best if either it is the first checkpoint
for this LR or this checkpoint is better then the previous best.
The function sets should_exit = True if the LR is the minimal allowed
LR (i.e. self.lb_lr) and the best checkpoint is not changed for self.train_patience
epochs.
'''
# Note that we take LR of the previous iter, not self.get_current_lr(),
# since typically the method exit_on_plateau_and_choose_best is called after
# the method update_lr, so LR drop happens before.
# If we had used the method self.get_current_lr(), the last epoch
# before LR drop would be used as the first epoch with the new LR.
should_exit = False
is_candidate_for_best = False
current_metric = np.round(accuracy, 4)
if self.best_metric >= current_metric:
# one drop has been done -> start early stopping
if round(self.current_lr, 8) < round(self.initial_lr, 8):
self.iter_to_wait += 1
if self.iter_to_wait >= self.train_patience:
print("LOG:: The training should be stopped due to no improvements",
f"for {self.train_patience} epochs")
should_exit = True
else:
self.best_metric = current_metric
self.iter_to_wait = 0
is_candidate_for_best = True
return should_exit, is_candidate_for_best
@torch.no_grad()
def _evaluate(self, model, epoch, data_loader, model_name, topk, lr_finder):
mhacc, acc, mAP = metrics.evaluate_multihead_classification(data_loader, model, self.use_gpu,
self.mixed_cls_heads_info)
if self.writer is not None and not lr_finder:
self.writer.add_scalar(f'Val/{model_name}/MHAcc', mhacc, epoch + 1)
if not lr_finder:
print(f'** Results ({model_name}) **')
print(f'MHAcc: {mhacc:.2%}')
print(f'mAP: {mAP:.2%}')
print(f'avgClsAcc: {acc:.2%}')
return acc
def _finalize_training(self):
pass
| [
"torch.cuda.amp.autocast",
"torchreid.losses.AMBinaryLoss",
"torchreid.metrics.accuracy_multilabel",
"torch.nn.KLDivLoss",
"torchreid.losses.CrossEntropyLoss",
"torchreid.metrics.evaluate_multihead_classification",
"torchreid.losses.AMSoftmaxLoss",
"torchreid.metrics.accuracy",
"torch.cuda.amp.GradS... | [((12411, 12426), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12424, 12426), False, 'import torch\n'), ((3083, 3116), 'torch.cuda.amp.GradScaler', 'GradScaler', ([], {'enabled': 'mix_precision'}), '(enabled=mix_precision)\n', (3093, 3116), False, 'from torch.cuda.amp import GradScaler, autocast\n'), ((3169, 3204), 'torch.nn.KLDivLoss', 'nn.KLDivLoss', ([], {'reduction': '"""batchmean"""'}), "(reduction='batchmean')\n", (3181, 3204), False, 'from torch import nn\n'), ((11728, 11749), 'numpy.round', 'np.round', (['accuracy', '(4)'], {}), '(accuracy, 4)\n', (11736, 11749), True, 'import numpy as np\n'), ((12534, 12640), 'torchreid.metrics.evaluate_multihead_classification', 'metrics.evaluate_multihead_classification', (['data_loader', 'model', 'self.use_gpu', 'self.mixed_cls_heads_info'], {}), '(data_loader, model, self.use_gpu,\n self.mixed_cls_heads_info)\n', (12575, 12640), False, 'from torchreid import metrics\n'), ((8541, 8577), 'torch.cuda.amp.autocast', 'autocast', ([], {'enabled': 'self.mix_precision'}), '(enabled=self.mix_precision)\n', (8549, 8577), False, 'from torch.cuda.amp import GradScaler, autocast\n'), ((3521, 3638), 'torchreid.losses.CrossEntropyLoss', 'CrossEntropyLoss', ([], {'use_gpu': 'self.use_gpu', 'label_smooth': 'label_smooth', 'conf_penalty': 'conf_penalty', 'scale': 'self.am_scale'}), '(use_gpu=self.use_gpu, label_smooth=label_smooth,\n conf_penalty=conf_penalty, scale=self.am_scale)\n', (3537, 3638), False, 'from torchreid.losses import AMSoftmaxLoss, CrossEntropyLoss\n'), ((4378, 4501), 'torchreid.losses.AsymmetricLoss', 'AsymmetricLoss', ([], {'gamma_neg': 'asl_gamma_neg', 'gamma_pos': 'asl_gamma_pos', 'probability_margin': 'asl_p_m', 'label_smooth': 'label_smooth'}), '(gamma_neg=asl_gamma_neg, gamma_pos=asl_gamma_pos,\n probability_margin=asl_p_m, label_smooth=label_smooth)\n', (4392, 4501), False, 'from torchreid.losses import AsymmetricLoss, AMBinaryLoss\n'), ((3819, 4034), 'torchreid.losses.AMSoftmaxLoss', 'AMSoftmaxLoss', ([], {'use_gpu': 'self.use_gpu', 'label_smooth': 'label_smooth', 'margin_type': 'margin_type', 'aug_type': 'aug_type', 'conf_penalty': 'conf_penalty', 'm': 'm', 's': 'self.am_scale', 'pr_product': 'pr_product', 'symmetric_ce': 'symmetric_ce'}), '(use_gpu=self.use_gpu, label_smooth=label_smooth, margin_type=\n margin_type, aug_type=aug_type, conf_penalty=conf_penalty, m=m, s=self.\n am_scale, pr_product=pr_product, symmetric_ce=symmetric_ce)\n', (3832, 4034), False, 'from torchreid.losses import AMSoftmaxLoss, CrossEntropyLoss\n'), ((4684, 4778), 'torchreid.losses.AsymmetricLoss', 'AsymmetricLoss', ([], {'gamma_neg': '(0)', 'gamma_pos': '(0)', 'probability_margin': '(0)', 'label_smooth': 'label_smooth'}), '(gamma_neg=0, gamma_pos=0, probability_margin=0, label_smooth\n =label_smooth)\n', (4698, 4778), False, 'from torchreid.losses import AsymmetricLoss, AMBinaryLoss\n'), ((4966, 5100), 'torchreid.losses.AMBinaryLoss', 'AMBinaryLoss', ([], {'m': 'm', 'k': 'amb_k', 't': 'amb_t', 's': 'self.am_scale', 'gamma_neg': 'asl_gamma_neg', 'gamma_pos': 'asl_gamma_pos', 'label_smooth': 'label_smooth'}), '(m=m, k=amb_k, t=amb_t, s=self.am_scale, gamma_neg=\n asl_gamma_neg, gamma_pos=asl_gamma_pos, label_smooth=label_smooth)\n', (4978, 5100), False, 'from torchreid.losses import AsymmetricLoss, AMBinaryLoss\n'), ((10373, 10448), 'torchreid.metrics.accuracy_multilabel', 'metrics.accuracy_multilabel', (['(head_logits * self.scales[model_name])', 'head_gt'], {}), '(head_logits * self.scales[model_name], head_gt)\n', (10400, 10448), False, 'from torchreid import metrics\n'), ((9545, 9583), 'torchreid.metrics.accuracy', 'metrics.accuracy', (['head_logits', 'head_gt'], {}), '(head_logits, head_gt)\n', (9561, 9583), False, 'from torchreid import metrics\n')] |
from matplotlib import pylab as plt
from numpy import arange, cos, sin
def calculate_split(seisR, seisT, azimuth, plot=False, ax=None):
from numpy import zeros, pi
from numpy.linalg import eig
from scipy.signal import tukey
C = zeros(4).reshape(2, 2)
phis = arange(-pi / 2., pi / 2., 0.05)
dts = arange(0.0, 4, 0.05)
lams = zeros(len(phis) * len(dts)).reshape(len(dts), len(phis))
minlam = 1E25
delta=seisR.stats.delta
for ii, dt in enumerate(dts):
for jj, phi in enumerate(phis):
nsamp = int(dt / delta)
#phi is angle clockwise of seisE
phir = azimuth - phi
assert abs(azimuth) < 10
seis1 = cos(phir) * seisR.data + sin(phir) * seisT.data
seis2 = -sin(phir) * seisR.data + cos(phir) * seisT.data
seis2 = seis2 * tukey(len(seis2), alpha=0.3)
seis1 = seis1 * tukey(len(seis1), alpha=0.3)
u2 = seis2[nsamp:]
u1 = seis1[:len(u2)]
# u1=u1*tukey(len(u1),alpha=1.0)
# u1=u1*tukey(len(u1),alpha=1.0)
# u1=u1*tukey(len(u2))
# u2=u2*tukey(len(u2))
c12 = sum(u1 * u2) * delta
c21 = sum(u2 * u1) * delta
c11 = sum(u1 * u1) * delta
c22 = sum(u2 * u2) * delta
C[0, 0] = c11
C[1, 0] = c21
C[0, 1] = c12
C[1, 1] = c22
lam, v = eig(C)
# Get minimum eigenvalue, lambda2 in Silver and Chan
lams[ii, jj] = min(lam)
minlam = min(minlam, lams[ii, jj])
if minlam == lams[ii, jj]:
iimin = ii
jjmin = jj
tmp = sum(lams.T - minlam * 1.50 < 0)
dtmin = max(dts)
dtmax = min(dts)
for ii, each in enumerate(tmp):
if each > 0:
dtmin = min([dtmin, dts[ii]])
dtmax = max([dtmax, dts[ii]])
continue
if plot:
ax.contourf(dts, phis * 180 / pi, lams.T, 25)
ax.plot(dts[iimin], phis[jjmin] * 180 / pi, '+', color='white', markersize='10', zorder=500000)
# plt.colorbar()
if minlam > 0:
print('min. lambda 2 = %e' % minlam)
h = plt.contour(dts, phis * 180 / pi, lams.T, levels=[minlam * 1.00, minlam * 2.00], colors=('w'))
#ax.set_ylabel('Angle (degrees)')
#ax.set_xlabel('Split Time (s)')
ax.set_ylabel(r'$\phi^S$',rotation='horizontal')
ax.set_xlabel(r'$\delta t$')
ax.tick_params(labelsize=6)
ax.yaxis.set_label_coords(-0.05, 1.02)
ax.xaxis.set_label_coords(1.02, -0.05)
plt.xticks(rotation=90)
plt.yticks(rotation=90)
for tick in ax.yaxis.get_majorticklabels():
tick.set_horizontalalignment("right")
# plt.title('Split Range: %.1f - %.1f s' % (dtmin, dtmax))
# plt.show()
return dts[iimin], phis[jjmin] | [
"matplotlib.pylab.xticks",
"numpy.zeros",
"numpy.linalg.eig",
"matplotlib.pylab.contour",
"matplotlib.pylab.yticks",
"numpy.sin",
"numpy.arange",
"numpy.cos"
] | [((281, 314), 'numpy.arange', 'arange', (['(-pi / 2.0)', '(pi / 2.0)', '(0.05)'], {}), '(-pi / 2.0, pi / 2.0, 0.05)\n', (287, 314), False, 'from numpy import arange, cos, sin\n'), ((323, 343), 'numpy.arange', 'arange', (['(0.0)', '(4)', '(0.05)'], {}), '(0.0, 4, 0.05)\n', (329, 343), False, 'from numpy import arange, cos, sin\n'), ((2647, 2670), 'matplotlib.pylab.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (2657, 2670), True, 'from matplotlib import pylab as plt\n'), ((2679, 2702), 'matplotlib.pylab.yticks', 'plt.yticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (2689, 2702), True, 'from matplotlib import pylab as plt\n'), ((246, 254), 'numpy.zeros', 'zeros', (['(4)'], {}), '(4)\n', (251, 254), False, 'from numpy import zeros, pi\n'), ((1455, 1461), 'numpy.linalg.eig', 'eig', (['C'], {}), '(C)\n', (1458, 1461), False, 'from numpy.linalg import eig\n'), ((2237, 2332), 'matplotlib.pylab.contour', 'plt.contour', (['dts', '(phis * 180 / pi)', 'lams.T'], {'levels': '[minlam * 1.0, minlam * 2.0]', 'colors': '"""w"""'}), "(dts, phis * 180 / pi, lams.T, levels=[minlam * 1.0, minlam * \n 2.0], colors='w')\n", (2248, 2332), True, 'from matplotlib import pylab as plt\n'), ((712, 721), 'numpy.cos', 'cos', (['phir'], {}), '(phir)\n', (715, 721), False, 'from numpy import arange, cos, sin\n'), ((737, 746), 'numpy.sin', 'sin', (['phir'], {}), '(phir)\n', (740, 746), False, 'from numpy import arange, cos, sin\n'), ((806, 815), 'numpy.cos', 'cos', (['phir'], {}), '(phir)\n', (809, 815), False, 'from numpy import arange, cos, sin\n'), ((781, 790), 'numpy.sin', 'sin', (['phir'], {}), '(phir)\n', (784, 790), False, 'from numpy import arange, cos, sin\n')] |
"""
Auxilary functions for working with persistence diagrams.
"""
import itertools
import numpy as np
def union_vals(A, B):
"""Helper function for summing grid landscapes.
Extends one list to the length of the other by padding with zero lists.
"""
diff = A.shape[0] - B.shape[0]
if diff < 0:
# B has more entries, so pad A
A = np.pad(A, pad_width=((0, np.abs(diff)), (0, 0)))
return A, B
elif diff > 0:
# A has more entries, so pad B
B = np.pad(B, pad_width=((0, diff), (0, 0)))
return A, B
else:
return A, B
def union_crit_pairs(A, B):
"""Helper function for summing landscapes.
Computes the union of two sets of critical pairs.
"""
result_pairs = []
A.compute_landscape()
B.compute_landscape()
# zip functions in landscapes A and B and pad with None
for a, b in list(itertools.zip_longest(A.critical_pairs, B.critical_pairs)):
# B had more functions
if a is None:
result_pairs.append(b)
# A had more functions
elif b is None:
result_pairs.append(a)
# A, B > pos_to_slope_interp > sum_slopes > slope_to_pos_interp
else:
result_pairs.append(
slope_to_pos_interp(
sum_slopes(pos_to_slope_interp(a), pos_to_slope_interp(b),)
)
)
return result_pairs
def pos_to_slope_interp(l: list) -> list:
"""Convert positions of critical pairs to (x-value, slope) pairs.
Intended for internal use. Inverse function of `slope_to_pos_interp`.
Result
------
list
[(xi,mi)] for i in len(function in landscape)
"""
output = []
# for sequential pairs in landscape function
for [[x0, y0], [x1, y1]] in zip(l, l[1:]):
slope = (y1 - y0) / (x1 - x0)
output.append([x0, slope])
output.append([l[-1][0], 0])
return output
def slope_to_pos_interp(l: list) -> list:
"""Convert positions of (x-value, slope) pairs to critical pairs.
Intended
for internal use. Inverse function of `pos_to_slope_interp`.
Result
------
list
[(xi, yi)]_i for i in len(function in landscape)
"""
output = [[l[0][0], 0]]
# for sequential pairs in [(xi,mi)]_i
for [[x0, m], [x1, _]] in zip(l, l[1:]):
# uncover y0 and y1 from slope formula
y0 = output[-1][1]
y1 = y0 + (x1 - x0) * m
output.append([x1, y1])
return output
def sum_slopes(a: list, b: list) -> list:
"""
Sum two piecewise linear functions, each represented as a list
of pairs (xi,mi), where each xi is the x-value of critical pair and
mi is the slope. The input should be of the form of the output of the
`pos_to_slope_interp' function.
Result
------
list
"""
result = []
am, bm = 0, 0 # initialize slopes
while len(a) > 0 or len(b) > 0:
if len(a) == 0 or (len(a) > 0 and len(b) > 0 and a[0][0] > b[0][0]):
# The next critical pair comes from list b.
bx, bm = b[0]
# pop b0
b = b[1:]
result.append([bx, am + bm])
elif len(b) == 0 or (len(a) > 0 and len(b) > 0 and a[0][0] < b[0][0]):
# The next critical pair comes from list a.
ax, am = a[0]
# pop a0
a = a[1:]
result.append([ax, am + bm])
else:
# The x-values of two critical pairs coincide.
ax, am = a[0]
bx, bm = b[0]
# pop a0 and b0
a, b = a[1:], b[1:]
result.append([ax, am + bm])
return result
def ndsnap_regular(points, *grid_axes):
"""Snap points to the 2d grid determined by grid_axes"""
# https://stackoverflow.com/q/8457645/717525
snapped = []
for i, ax in enumerate(grid_axes):
diff = ax[:, np.newaxis] - points[:, i]
best = np.argmin(np.abs(diff), axis=0)
snapped.append(ax[best])
return np.array(snapped).T
def _p_norm(p: float, critical_pairs: list = []):
"""
Compute `p` norm of interpolated piecewise linear function defined from list of
critical pairs.
"""
result = 0.0
for l in critical_pairs:
for [[x0, y0], [x1, y1]] in zip(l, l[1:]):
if y0 == y1:
# horizontal line segment
result += (np.abs(y0) ** p) * (x1 - x0)
continue
# slope is well-defined
slope = (y1 - y0) / (x1 - x0)
b = y0 - slope * x0
# segment crosses the x-axis
if (y0 < 0 and y1 > 0) or (y0 > 0 and y1 < 0):
z = -b / slope
ev_x1 = (slope * x1 + b) ** (p + 1) / (slope * (p + 1))
ev_x0 = (slope * x0 + b) ** (p + 1) / (slope * (p + 1))
ev_z = (slope * z + +b) ** (p + 1) / (slope * (p + 1))
result += np.abs(ev_x1 + ev_x0 - 2 * ev_z)
# segment does not cross the x-axis
else:
ev_x1 = (slope * x1 + b) ** (p + 1) / (slope * (p + 1))
ev_x0 = (slope * x0 + b) ** (p + 1) / (slope * (p + 1))
result += np.abs(ev_x1 - ev_x0)
return (result) ** (1.0 / p)
| [
"numpy.array",
"numpy.abs",
"numpy.pad",
"itertools.zip_longest"
] | [((896, 953), 'itertools.zip_longest', 'itertools.zip_longest', (['A.critical_pairs', 'B.critical_pairs'], {}), '(A.critical_pairs, B.critical_pairs)\n', (917, 953), False, 'import itertools\n'), ((4019, 4036), 'numpy.array', 'np.array', (['snapped'], {}), '(snapped)\n', (4027, 4036), True, 'import numpy as np\n'), ((510, 550), 'numpy.pad', 'np.pad', (['B'], {'pad_width': '((0, diff), (0, 0))'}), '(B, pad_width=((0, diff), (0, 0)))\n', (516, 550), True, 'import numpy as np\n'), ((3953, 3965), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (3959, 3965), True, 'import numpy as np\n'), ((4939, 4971), 'numpy.abs', 'np.abs', (['(ev_x1 + ev_x0 - 2 * ev_z)'], {}), '(ev_x1 + ev_x0 - 2 * ev_z)\n', (4945, 4971), True, 'import numpy as np\n'), ((5208, 5229), 'numpy.abs', 'np.abs', (['(ev_x1 - ev_x0)'], {}), '(ev_x1 - ev_x0)\n', (5214, 5229), True, 'import numpy as np\n'), ((396, 408), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (402, 408), True, 'import numpy as np\n'), ((4403, 4413), 'numpy.abs', 'np.abs', (['y0'], {}), '(y0)\n', (4409, 4413), True, 'import numpy as np\n')] |
"""trainloss.py: runner for minimizing training loss of a model.
This file runs an optimizer over a model, with the objective of reducing
the training loss as much as possible. Multiple learning rates can be
specified, and the one which leads to the lowest loss is selected.
"""
import json
from argparse import ArgumentParser
import numpy as np
from eve.exp.runners.utils import EXP_SEED
np.random.seed(EXP_SEED)
from keras.optimizers import Optimizer
from eve.optim.eve import Eve
from eve.optim.monitor import EveMonitor
from eve.exp.datasets import Dataset
from eve.exp.models import Model
from eve.exp.callbacks import BatchLossHistory, EpochFullLossHistory
from eve.exp.utils import (build_subclass_object, get_subclass_names,
get_subclass_from_name, save_pkl)
def main():
"""Run experiment for training loss optimization."""
arg_parser = ArgumentParser()
arg_parser.add_argument("--opt", type=str, required=True,
choices=get_subclass_names(Optimizer))
arg_parser.add_argument("--opt-kwargs", type=json.loads, default="{}")
arg_parser.add_argument("--lrs", type=float, nargs="+", required=True)
arg_parser.add_argument("--batch-size", type=int, required=True)
arg_parser.add_argument("--epochs", type=int, required=True)
arg_parser.add_argument("--dataset", type=str, required=True,
choices=get_subclass_names(Dataset))
arg_parser.add_argument("--dataset-kwargs", type=json.loads, default="{}")
arg_parser.add_argument("--model", type=str, required=True,
choices=get_subclass_names(Model))
arg_parser.add_argument("--metrics", type=str, nargs="+")
arg_parser.add_argument("--save-path", type=str, required=True)
args = arg_parser.parse_args()
# Load data
dataset = build_subclass_object(Dataset, args.dataset, args.dataset_kwargs)
X, y = dataset.train_data
# Loop over different learning rates
best_final_loss = None
for lr in args.lrs:
print("lr {}".format(lr))
model = get_subclass_from_name(Model, args.model)(dataset)
callbacks = [
BatchLossHistory(),
EpochFullLossHistory(X, y, args.batch_size)
]
args.opt_kwargs["lr"] = lr
if args.opt == "Eve":
args.opt_kwargs["loss_min"] = model.loss_min
callbacks.append(EveMonitor())
opt = build_subclass_object(Optimizer, args.opt, args.opt_kwargs)
# Compile and train
model.model.compile(loss=model.loss, optimizer=opt,
metrics=args.metrics)
model.model.fit(X, y, batch_size=args.batch_size,
epochs=args.epochs, callbacks=callbacks)
full_losses = callbacks[1].losses
if best_final_loss is None or full_losses[-1] < best_final_loss:
best_final_loss = full_losses[-1]
best_full_losses = full_losses
best_batch_losses = callbacks[0].batch_losses
best_lr = lr
if args.opt == "Eve":
best_eve_monitor = callbacks[2]
print()
# Save results
save_data = {
"cmd_args": args,
"best_full_losses": best_full_losses,
"best_batch_losses": best_batch_losses,
"best_lr": best_lr
}
if args.opt == "Eve":
best_eve_data = best_eve_monitor.get_data()
for k, v in best_eve_data.items():
save_data["best_{}".format(k)] = v
save_pkl(save_data, args.save_path)
if __name__ == "__main__":
main()
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"eve.exp.utils.get_subclass_names",
"eve.exp.utils.get_subclass_from_name",
"eve.exp.callbacks.BatchLossHistory",
"eve.exp.utils.build_subclass_object",
"eve.exp.utils.save_pkl",
"eve.exp.callbacks.EpochFullLossHistory",
"eve.optim.monitor.EveMonitor"
... | [((392, 416), 'numpy.random.seed', 'np.random.seed', (['EXP_SEED'], {}), '(EXP_SEED)\n', (406, 416), True, 'import numpy as np\n'), ((887, 903), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (901, 903), False, 'from argparse import ArgumentParser\n'), ((1850, 1915), 'eve.exp.utils.build_subclass_object', 'build_subclass_object', (['Dataset', 'args.dataset', 'args.dataset_kwargs'], {}), '(Dataset, args.dataset, args.dataset_kwargs)\n', (1871, 1915), False, 'from eve.exp.utils import build_subclass_object, get_subclass_names, get_subclass_from_name, save_pkl\n'), ((3513, 3548), 'eve.exp.utils.save_pkl', 'save_pkl', (['save_data', 'args.save_path'], {}), '(save_data, args.save_path)\n', (3521, 3548), False, 'from eve.exp.utils import build_subclass_object, get_subclass_names, get_subclass_from_name, save_pkl\n'), ((2440, 2499), 'eve.exp.utils.build_subclass_object', 'build_subclass_object', (['Optimizer', 'args.opt', 'args.opt_kwargs'], {}), '(Optimizer, args.opt, args.opt_kwargs)\n', (2461, 2499), False, 'from eve.exp.utils import build_subclass_object, get_subclass_names, get_subclass_from_name, save_pkl\n'), ((1002, 1031), 'eve.exp.utils.get_subclass_names', 'get_subclass_names', (['Optimizer'], {}), '(Optimizer)\n', (1020, 1031), False, 'from eve.exp.utils import build_subclass_object, get_subclass_names, get_subclass_from_name, save_pkl\n'), ((1419, 1446), 'eve.exp.utils.get_subclass_names', 'get_subclass_names', (['Dataset'], {}), '(Dataset)\n', (1437, 1446), False, 'from eve.exp.utils import build_subclass_object, get_subclass_names, get_subclass_from_name, save_pkl\n'), ((1627, 1652), 'eve.exp.utils.get_subclass_names', 'get_subclass_names', (['Model'], {}), '(Model)\n', (1645, 1652), False, 'from eve.exp.utils import build_subclass_object, get_subclass_names, get_subclass_from_name, save_pkl\n'), ((2089, 2130), 'eve.exp.utils.get_subclass_from_name', 'get_subclass_from_name', (['Model', 'args.model'], {}), '(Model, args.model)\n', (2111, 2130), False, 'from eve.exp.utils import build_subclass_object, get_subclass_names, get_subclass_from_name, save_pkl\n'), ((2175, 2193), 'eve.exp.callbacks.BatchLossHistory', 'BatchLossHistory', ([], {}), '()\n', (2191, 2193), False, 'from eve.exp.callbacks import BatchLossHistory, EpochFullLossHistory\n'), ((2207, 2250), 'eve.exp.callbacks.EpochFullLossHistory', 'EpochFullLossHistory', (['X', 'y', 'args.batch_size'], {}), '(X, y, args.batch_size)\n', (2227, 2250), False, 'from eve.exp.callbacks import BatchLossHistory, EpochFullLossHistory\n'), ((2412, 2424), 'eve.optim.monitor.EveMonitor', 'EveMonitor', ([], {}), '()\n', (2422, 2424), False, 'from eve.optim.monitor import EveMonitor\n')] |
import numpy as np
from astroquery.vizier import Vizier
from astropy.io import ascii
def get_data(koi_star, nplanets, datatable = 'J/ApJS/217/16/table2'):
obs = []
errs = []
epochs = []
print("Downloading Rowe+15 data from Vizier...")
Vizier.ROW_LIMIT = 10000 #full catalog
cats = Vizier.query_constraints(catalog = datatable,
KOI = '>'+str(koi_star)+' & <' + str(koi_star+1))
data = cats[0]
for i in range(nplanets):
cur_koi = koi_star + .01*(i + 1)
cur_dat = data[data['KOI']==cur_koi]
epoch = np.array(cur_dat['n'], dtype = int) - 1 #zero ind
calculated = np.array(cur_dat['tn'], dtype = float)
ttv = np.array(cur_dat['TTVn'], dtype = float)
err = np.array(cur_dat['e_TTVn'], dtype = float)
obs.append(calculated + ttv)
errs.append(err)
epochs.append(epoch)
print("Data retrieved!")
return np.array(obs), np.array(errs), np.array(epochs)
| [
"numpy.array"
] | [((580, 616), 'numpy.array', 'np.array', (["cur_dat['tn']"], {'dtype': 'float'}), "(cur_dat['tn'], dtype=float)\n", (588, 616), True, 'import numpy as np\n'), ((627, 665), 'numpy.array', 'np.array', (["cur_dat['TTVn']"], {'dtype': 'float'}), "(cur_dat['TTVn'], dtype=float)\n", (635, 665), True, 'import numpy as np\n'), ((676, 716), 'numpy.array', 'np.array', (["cur_dat['e_TTVn']"], {'dtype': 'float'}), "(cur_dat['e_TTVn'], dtype=float)\n", (684, 716), True, 'import numpy as np\n'), ((826, 839), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (834, 839), True, 'import numpy as np\n'), ((841, 855), 'numpy.array', 'np.array', (['errs'], {}), '(errs)\n', (849, 855), True, 'import numpy as np\n'), ((857, 873), 'numpy.array', 'np.array', (['epochs'], {}), '(epochs)\n', (865, 873), True, 'import numpy as np\n'), ((514, 547), 'numpy.array', 'np.array', (["cur_dat['n']"], {'dtype': 'int'}), "(cur_dat['n'], dtype=int)\n", (522, 547), True, 'import numpy as np\n')] |
"""
Utilises the powerful tools of Selenium to safely navigate and collect data from websites without the use of an API.
"""
from typing import Tuple
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import numpy as np
import pandas as pd
from time import sleep
import uuid
class AirbnbScraper:
def __init__(self, slow_internet_speed : bool=False, config : str='default', messages : bool=False):
"""A Webscraper that crawls through Airbnb's website and gathers structured/unstructured data.
When an instance of Scraper is initialized, a Selenium Webdriver gets the homepage by use
of the `url` attribute. Then it clicks past the cookie wall (if applicable), and navigates onto
the main products hub.
Parameters
----------
slow_internet_speed : bool, default=False
The crawler is designed to allow for lag whilst loading elements on a page, but users with a
particularly slow internet speed may cause the crawler to miss elements. A `slow_internet_speed` flag
allows those users to still enjoy the potential of the scraper. It is not recommended to run the full
scraper `scrape_all()` with `slow_internet_speed` enabled. This will take > 12 hours.
config : str, defualt = 'default'
Option to confiigure the selenium webdriver to operate in 'headless' mode or 'default' mode.
messages : bool, default=False
Option to activate messages of each successful item saved by the scraper, and any errors if applied.
Attributes
----------
BATCH_ATTEMPTS : int
It is common that a Scraper can fail to find an element on a webpage for numerous reasons,
for example that the element hasn't been loaded yet. `BATCH_ATTEMPTS` allows for this and
offers up to 25 attempts for the Scraper to locate and pull data from each element it is looking
for, until the Scraper assumes that the element doesn't exist in the particular page. If
`slow_internet_speed` is enabled, the attempts limit is increased to 50.
main_url : str
The URL for Airbnb's home page, provided for the Selenium webdriver to get upon initialization
of the Scraper object.
driver : Selenium Webdriver
The webdriver that is utilized to crawl through Airbnb's website
slow_internet_speed : bool
The crawler is designed to allow for lag whilst loading elements on a page, but users with a
particularly slow internet speed may cause the crawler to miss elements. A `slow_internet_speed` flag
allows those users to still enjoy the potential of the scraper. It is not recommended to run the full
scraper `scrape_all()` with `slow_internet_speed` enabled. This will take > 12 hours.
messages : bool
Option to activate messages of each successful item saved by the scraper, and any errors if applied.
"""
self.main_url = "https://www.airbnb.co.uk/"
self.slow_internet_speed = slow_internet_speed
self.driver = None
self.BATCH_ATTEMPTS = 50 if self.slow_internet_speed else 25
self.messages = messages
self.COOKIE_CLICKED = False
# Initialising the selenium webdriver
options = webdriver.ChromeOptions()
if config == 'default':
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument("--start-maximized")
self.driver = webdriver.Chrome(options=options)
elif config == 'headless':
options.add_argument('--no-sandbox')
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument('--log-level=3')
options.add_argument('--headless')
options.add_argument('--disable-gpu')
options.add_argument("--window-size=1920, 1200")
options.add_argument('--disable-dev-shm-usage')
self.driver = webdriver.Chrome(options=options)
print('Running headless scraper. Do NOT close the program or interrupt the terminal.')
else:
raise ValueError(f'Configuration option "{config}" not recognised')
def get_categories(self, count : int):
"""Gets category names and corresponding urls for each product header in Airbnb's main products page.
This method first clicks past a cookie wall if applicable. Using the `driver` that has been initialised
with the Scraper object, this method located through and clicks each header button in the top menu bar of
the main products page. When each header is clicked, the category name and the current url of that clicked
header are stored into a zip object.
Parameters
----------
count : int , optional
When specified, the `count` parameter will set a limit to the number of headers that are clicked through
and consequently, the number of categories and corresponding urls that are returned. This parameter is optional,
and defaulted to 25 which is the number of total headers that populate Airbnb's products page.
Returns
-------
zip of < tuples of (str, str) >
A zipped object of tuples of the category name, followed by the url of opening that header.
Raises
------
ValueError
If the count parameter is 0 or negative
"""
# Getting the Airbnb url and clicking past the cookie wall
self.driver.get(self.main_url)
sleep(5 if self.slow_internet_speed else 2)
self._cookie_check_and_click()
# Click the I'm flexible to get to the product browser
flexible_button = self.driver.find_element(By.LINK_TEXT,"I’m flexible")
flexible_button.click()
sleep(5 if self.slow_internet_speed else 2)
# The count variable is an input to stop the header yield at any given index of iteration
# for example: if count was set to 3, then the loop below to collect header links/titles
# would break on the third iteration.
if count > 29: ### WRONG, MAKE MAX DYNAMIC
count = 29
if count < 1:
raise ValueError('Count must be a positive integer greater than 1')
#self._cookie_check_and_click()
# START of the header yield code. This uses seleniums webdriver
# to both click through and catch the header names and urls of each of the
header_container = self.driver.find_element(By.XPATH, '/html/body/div[5]/div/div/div[1]/div/div/div/div/div/div[1]/div/nav/div/div/div/div/div[2]/div/div[1]/div/div[3]')
headers = header_container.find_elements(By.XPATH, "./*")
headers.pop()
# First, get the text for the headers up to the 'more'. (Not all headers are visible immediately)
# if the count is lower than current visible headers, this is sliced at the bottom
categories = []
category_links = []
for header in headers:
categories.append(header.text)
categories.remove('More')
categories = categories[:count]
# Click through the visible headers to get urls for each one (except for 'More')
counted = 0
for i in range(len(headers)):
headers[i].click()
if i!= len(headers) - 1:
category_links.append(self.driver.current_url)
counted +=1
# Break the entire function if count is met
if counted == count:
return zip(categories, category_links)
sleep(3 if self.slow_internet_speed else 1)
# Click the 'More' header and get the elements for rest of headers whilet they're visible
if i == len(headers) - 1:
sleep(1.5 if self.slow_internet_speed else 0.5)
more_menu = headers[i].find_element(By.XPATH, '//*[@id="flexible-destination-more-menu"]')
more_headers = more_menu.find_elements(By.XPATH, "./*")
# The offset means indexing goes 0, 0, 1, 2, 3, 4,... because of the nature of the 'More' column
for j in range(-1,len(more_headers)-1):
if j == -1:
j+=1
# Click the 'More' header and get the elements for rest of headers whilet they're visible
# the difficulty with sich a dynamic page is that this has to be repeatedly done
more_menu = headers[i].find_element(By.XPATH, '//*[@id="flexible-destination-more-menu"]')
more_headers = more_menu.find_elements(By.XPATH, "./*")
sleep(1.5 if self.slow_internet_speed else 0.5)
# Get the category name from header
categories.append(more_headers[j].text)
more_headers[j].click()
sleep(1.5 if self.slow_internet_speed else 0.5)
# After clicking that header, get the corresponding header url for it
category_links.append(self.driver.current_url)
headers[i].click()
counted+=1
# Break the entire function if count is met
if counted == count:
return zip(categories, category_links)
def __scroll(self, driver : webdriver, SCROLL_PAUSE_TIME : int):
# Get scroll height
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to bottom
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = self.driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
return
last_height = new_height
def get_products(self, header_url : str, SCROLLING : bool = True):
""" Returns an array of the product urls for a homepage with a certain header clicked.
Parameters
----------
header_url : str
the url of the header to be opened by the `driver` where the corresponding products can be found.
SCROLLING : bool , default=True
When a header page is opened, the lazy loading of the Airbnb's website prevents all products from
being located. When `SCROLLING` is set to True, this calls a protected method that scrolls through the
entire page so that every product is loaded and therefore the url can be stored. Setting to False is a
clever way of electing to only take a sample of the products from each header page. This parameter is
optional and defaulted to True.
Returns
-------
product_links : np.array of str
A numpy array of strings containing the urls for each product that has been found.
"""
self.driver.get(header_url)
sleep(1.5 if self.slow_internet_speed else 0.5)
self._cookie_check_and_click()
self.driver.execute_script("document.body.style.zoom='75%'")
sleep(5 if self.slow_internet_speed else 2)
# Set to FALSE when testing/sampling
if SCROLLING:
pause_time = 7 if self.slow_internet_speed else 3.5
self.__scroll(self.driver, pause_time)
for i in range(self.BATCH_ATTEMPTS):
try:
# Store all links for locations listed on page in array
places_container = self.driver.find_element(By.XPATH, '//*[@id="site-content"]/div/div/div/div/div/div/div/div/div[2]/div/div/div')
places = places_container.find_elements(By.XPATH, "./*" )
product_links = np.array([])
for place in places:
href = place.find_element(By.TAG_NAME, 'a')
url = f"{href.get_attribute('href')}"
product_links = np.append(product_links,url)
except Exception as e:
pass
return product_links
@staticmethod
def string_clean(text: str, str_type : str) -> str:
""" Takes in raw text from elements on Airbnb product pages and formats them into parsable strings.
Text data from elements in a product page on Airbnb's website come in a variety of forms not so easily
understandable by machines. This static method is necessary to essentially clean the text from certain elements
in each product page.
Parameters
----------
text : str
The raw text data from the element from the product page.
str_type : {'info', 'review count', 'amenities'}
The nature of the text data differs for each element of the product webpage, thus the pythonic
strategem for cleaning the text data must do the same. Specifying which page element the text comes
from will specify which set of programmatic instructions the method needs to take in order to clean
the text data.
Returns
-------
if `str_type` is 'info':
output: list of [tuples of (str, int)]
where the strings are labels of guests, bedrooms beds, and bathrooms, and the corresponding
int is their count.
if `str_type` is 'review count`:
output: int
Number of reviews for product.
if `str_type` is 'amenities':
output: int
Number of amenities for product.
Raises
------
ValueError
If the inputted string for `str_type` doesn't match any of the accepted strings.
"""
if str_type == 'info':
output = []
# Organises the text into a clean list of
# ['x guests', 'x bedrooms', 'x beds', 'x bathrooms']
# this is much easier to be iterated over and parsed
text = text.replace('·', '')
text = text.split(' ')
clean_info = []
for i in text:
clean_info.append(i)
for val in clean_info:
label = val.split()[1]
# unlikely to happen, but if theres an anomaly in the site text,
# the certain element is ignored and this doesn't mess up the data
if label not in ['guests', 'guest', 'bedrooms', 'bedroom',
'beds', 'bed', 'bathrooms' ,'bathroom', 'private bathroom']:
pass
else:
# An element with a count of '1' (e.g. 1 bedroom) has no 's' on the end, which
# will confuse the dictionary and dataframe. So all singular instances have an 's' added
if label[-1] != 's':
label += 's'
# The output is a list of tuples: [('guests', x), ('bedrooms', x) ...]
output.append((label, float(val.split()[0])))
return output
elif str_type == 'review count':
# Gets rid of brackets if they are there
text = text.replace('(','')
text = text.replace(')','')
# Split up the number and reviews string into [x, 'Reviews']
text = text.split(' ')
output = text[0]
return int(output)
elif str_type == 'amenities':
# Simply filters out the numerical value in the text:
# "Show all xx amenities"
output = int(''.join(filter(str.isdigit, text)))
return output
else:
raise ValueError('Please specify a distinct part of the page to clean. Have you checked your spelling?')
def __scrape_product_images(self, driver : webdriver):
images_container = driver.find_element(By.XPATH, '//*[@id="site-content"]/div/div[1]/div[1]/div[2]/div/div/div/div/div/div/div/div[1]/div')
images = images_container.find_elements(By.TAG_NAME, 'img')
if images is None:
raise Exception
sources = []
for image in images:
sources.append(image.get_attribute('src'))
return sources
def scrape_product_data(self, product_url: str, ID : uuid.uuid4, category : str, message : bool=False):
"""Gets a page of an Airbnb product and scrapes structured and unstructured data. Utilises both Selenium and BeautifulSoup.
Parameters
----------
product_url : str
The url of the product page to be scraped
ID : int
The unique ID assigned to the particular product. This will be used to identify the data in a database/data lake.
category : str
The category name corresponding to where a product is found. This can be read on the headers tab on Airbnb's website.
message : bool, default=False
With the `message` flag enabled, the product scrape status will be logged to the terminal, as well as whether any
images were saved.
Returns
-------
product_dict : dict of {str : any}
Structured data stored in the form of a dictionary containing relevant and human readable information about the product.
image_data : list of [str, str, ...]
A tuple of source links for the images found on Airbnb's website. These can be transformed into image files.
"""
self._cookie_check_and_click()
# Initialising default dict and adding the passed ID and
# category parameters
product_dict = dict()
product_dict['ID'] = ID
product_dict['Category'] = category
# Getting the product page with driver
self.driver.get(product_url)
sleep(3 if self.slow_internet_speed else 0.5)
for i in range(self.BATCH_ATTEMPTS):
try:
image_data = self.__scrape_product_images(self.driver)
if image_data:
break
else:
raise Exception
except Exception as e:
continue
# Getting data from page. Looped through multiple attempts
# to allow for errors due to elements not being loaded yet
for j in range(self.BATCH_ATTEMPTS):
try:
# Product title (str)
for i in range(self.BATCH_ATTEMPTS):
try:
title_element = self.driver.find_element(By.TAG_NAME, 'h1')
title = title_element.text
product_dict['Title'] = title_element.text
break
except Exception as e:
continue
# Product Locaton (str)
for i in range(self.BATCH_ATTEMPTS):
try:
location_elem = self.driver.find_element(By.XPATH, '//*[@id="site-content"]/div/div[1]/div[1]/div[1]/div/div/div/div/section/div[2]/div[1]/span[5]/button/span')
location = location_elem.text.replace(',', '')
product_dict['Location'] = location
break
except Exception as e:
continue
# Counts for beds, bedrooms, beds and bathrooms (all int)
for i in range(self.BATCH_ATTEMPTS):
try:
info_container = self.driver.find_element(By.XPATH, '//*[@id="site-content"]/div[1]/div/div/section/div/div/div/div[1]/ol' )
info = self.string_clean(
info_container.text,
str_type = 'info')
for val in info:
product_dict[val[0]] = val[1]
break
except Exception as e:
continue
# Number of Reviews (int)
for i in range(self.BATCH_ATTEMPTS):
try:
review_elem = self.driver.find_element(By.XPATH, '//*[@id="site-content"]/div/div[1]/div[1]/div[1]/div/div/div/div/section/div[2]/div[1]/span[1]/span[3]/button')
reviews = self.string_clean(review_elem.text, 'review count')
product_dict['Review_Count'] = reviews
break
except Exception as e:
continue
# Overall star rating (float)
for i in range(self.BATCH_ATTEMPTS):
try:
rating_elem = self.driver.find_element(By.XPATH, '//*[@id="site-content"]/div/div[1]/div[1]/div[1]/div/div/div/div/section/div[2]/div[1]/span[1]/span[2]')
overall_rating = rating_elem.text.replace('·', '')
product_dict['Overall_Rate'] = float(overall_rating)
break
except Exception as e:
continue
# Price per night (float)
for i in range(self.BATCH_ATTEMPTS):
try:
price_elem = self.driver.find_element(By.XPATH, '//*[@id="site-content"]/div/div[1]/div[3]/div/div[2]/div/div/div[1]/div/div/div/div/div/div/div[1]/div[1]/div[1]/div/div/div/span[1]')
price_pNight = price_elem.text[1:] # Gets rid of £
product_dict['Price_Night'] = float(price_pNight)
break
except Exception as e:
continue
# Sub ratings (list of floats)
for i in range(self.BATCH_ATTEMPTS):
try:
subratings_container = self.driver.find_element(By.XPATH, '//*[@id="site-content"]/div/div[1]/div[4]/div/div/div/div[2]/div[2]/div/div')
subratings_elements = subratings_container.find_elements(By.XPATH, "./*")
for elem in subratings_elements:
subrating = elem.text.split('\n')
product_dict[subrating[0] + '_rate'] = subrating[1]
break
except Exception as e:
continue
# How many amneties each location has (int)
for i in range(self.BATCH_ATTEMPTS):
try:
amenities_elem = self.driver.find_element(By.XPATH, '//*[@id="site-content"]/div[5]/div/div[2]/section/div[4]/a')
amenities_count = self.string_clean(amenities_elem.text, 'amenities')
product_dict['amenities_count'] = amenities_count
break
except Exception as e:
continue
# Product URL (str)
product_dict['url'] = product_url
# Catches if html hasn't been parsed properly due to loading lag, and re-runs the loop
if product_dict['Title'] == None \
or product_dict['Location'] == None\
or product_dict['url'] == None:
sleep(1 if self.slow_internet_speed else 0.25)
raise ValueError
else:
break
except Exception as e:
continue
if message:
if image_data:
print(f'Logged product "{title}" as {ID}. Images found: {len(image_data)}')
else:
print(f'Logged product "{title}" as {ID}. FAILED TO SAVE IMAGES.')
return product_dict, image_data
def scrape_all(self, sample : bool = False):
"""Crawls through the entire "I'm Feeling Lucky section" of Airbnb and collects structured and unstructured data from each product.
Structured data is stored in the form of a pandas dataframe, and unstructured data (images) are stored in a dictionary of corresponding
product IDs as keys, and tuples of source links for each product as the values.
Parameters
----------
sample : bool, default=True
Scraping the entirety of Airbnb's products hub is a large task. The `sample` logic, when set to true, severely restricts the number of products
that the crawler will try to scrape, in the event that one simply wishes to only scrape a few products, or quickly test that the module is functioning.
Returns
-------
df : pandas.DataFrame
The pandas dataframe containing all of the information for each product scraped in a neat and structured fashion.
image_dict : dict of {int : tuple of (str, str, ...)}
Image data is stored in a dictionary of corresponding product IDs as keys, and tuples of source links for each product as the values.
"""
# Primary key, pandas dataframe and a missing data count initialised
#ID = 1000
df = pd.DataFrame()
image_dict = dict()
# Establishing parameters to the called functions that are dependant on the boolean condition of sample
scroll = not sample
to_count = 2 if sample else 25
try:
# Getting the zipped object of header names and urls
categories = self.get_categories(count = to_count)
# Iterating through each category yielded
for header, link in categories:
# All product links are gathered into self.product_links.
# When a new category is iterated, self.product_links is reassigned with the new products
# For a sample, scrolling is locked so only top 20 products are accounted for
links = self.get_products(link, SCROLLING=scroll)
# Iterating over each product url in a category
for prod_url in links:
try:
ID = uuid.uuid4()
# Calling the scrape_product() function and logging data to the initialised pandas dataframe
product, images = self.scrape_product_data(prod_url, ID, header, message=self.messages)
df = df.append(product, ignore_index=True)
image_dict[ID] = images
except Exception as e:
# When a product page fails to give information, this is logged as missing data and doesn't break code
print(f'Error on product{ID}: {e}')
finally:
# Regardless of errors or interruptions, all yielded data is returned in a pandas dataframe
self.driver.quit()
return df, image_dict
def _cookie_check_and_click(self):
if self.COOKIE_CLICKED:
return
else:
for i in range(self.BATCH_ATTEMPTS):
try:
cookie_button = self.driver.find_element(By.XPATH, '/html/body/div[5]/div/div/div[1]/div/div[2]/section/div[2]/div[2]/button')
cookie_button.click()
self.COOKIE_CLICKED = True
return
except Exception as e:
pass
| [
"pandas.DataFrame",
"uuid.uuid4",
"time.sleep",
"numpy.append",
"selenium.webdriver.ChromeOptions",
"numpy.array",
"selenium.webdriver.Chrome"
] | [((3508, 3533), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (3531, 3533), False, 'from selenium import webdriver\n'), ((5847, 5890), 'time.sleep', 'sleep', (['(5 if self.slow_internet_speed else 2)'], {}), '(5 if self.slow_internet_speed else 2)\n', (5852, 5890), False, 'from time import sleep\n'), ((6114, 6157), 'time.sleep', 'sleep', (['(5 if self.slow_internet_speed else 2)'], {}), '(5 if self.slow_internet_speed else 2)\n', (6119, 6157), False, 'from time import sleep\n'), ((11449, 11496), 'time.sleep', 'sleep', (['(1.5 if self.slow_internet_speed else 0.5)'], {}), '(1.5 if self.slow_internet_speed else 0.5)\n', (11454, 11496), False, 'from time import sleep\n'), ((11613, 11656), 'time.sleep', 'sleep', (['(5 if self.slow_internet_speed else 2)'], {}), '(5 if self.slow_internet_speed else 2)\n', (11618, 11656), False, 'from time import sleep\n'), ((18341, 18386), 'time.sleep', 'sleep', (['(3 if self.slow_internet_speed else 0.5)'], {}), '(3 if self.slow_internet_speed else 0.5)\n', (18346, 18386), False, 'from time import sleep\n'), ((25678, 25692), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (25690, 25692), True, 'import pandas as pd\n'), ((3729, 3762), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'options': 'options'}), '(options=options)\n', (3745, 3762), False, 'from selenium import webdriver\n'), ((7914, 7957), 'time.sleep', 'sleep', (['(3 if self.slow_internet_speed else 1)'], {}), '(3 if self.slow_internet_speed else 1)\n', (7919, 7957), False, 'from time import sleep\n'), ((10042, 10066), 'time.sleep', 'sleep', (['SCROLL_PAUSE_TIME'], {}), '(SCROLL_PAUSE_TIME)\n', (10047, 10066), False, 'from time import sleep\n'), ((4224, 4257), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'options': 'options'}), '(options=options)\n', (4240, 4257), False, 'from selenium import webdriver\n'), ((8114, 8161), 'time.sleep', 'sleep', (['(1.5 if self.slow_internet_speed else 0.5)'], {}), '(1.5 if self.slow_internet_speed else 0.5)\n', (8119, 8161), False, 'from time import sleep\n'), ((12228, 12240), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (12236, 12240), True, 'import numpy as np\n'), ((9001, 9048), 'time.sleep', 'sleep', (['(1.5 if self.slow_internet_speed else 0.5)'], {}), '(1.5 if self.slow_internet_speed else 0.5)\n', (9006, 9048), False, 'from time import sleep\n'), ((9229, 9276), 'time.sleep', 'sleep', (['(1.5 if self.slow_internet_speed else 0.5)'], {}), '(1.5 if self.slow_internet_speed else 0.5)\n', (9234, 9276), False, 'from time import sleep\n'), ((12436, 12465), 'numpy.append', 'np.append', (['product_links', 'url'], {}), '(product_links, url)\n', (12445, 12465), True, 'import numpy as np\n'), ((23836, 23882), 'time.sleep', 'sleep', (['(1 if self.slow_internet_speed else 0.25)'], {}), '(1 if self.slow_internet_speed else 0.25)\n', (23841, 23882), False, 'from time import sleep\n'), ((26644, 26656), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (26654, 26656), False, 'import uuid\n')] |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.predictors.mask_rcnn_box_predictor."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import box_predictor_builder
from object_detection.builders import hyperparams_builder
from object_detection.predictors import mask_rcnn_box_predictor as box_predictor
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class MaskRCNNBoxPredictorTest(test_case.TestCase):
def _build_arg_scope_with_hyperparams(self,
op_type=hyperparams_pb2.Hyperparams.FC):
hyperparams = hyperparams_pb2.Hyperparams()
hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(hyperparams_text_proto, hyperparams)
hyperparams.op = op_type
return hyperparams_builder.build(hyperparams, is_training=True)
def test_get_boxes_with_five_classes(self):
def graph_fn(image_features):
mask_box_predictor = box_predictor_builder.build_mask_rcnn_box_predictor(
is_training=False,
num_classes=5,
fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(),
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4,
)
box_predictions = mask_box_predictor.predict(
[image_features],
num_predictions_per_location=[1],
scope='BoxPredictor',
prediction_stage=2)
return (box_predictions[box_predictor.BOX_ENCODINGS],
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND])
image_features = np.random.rand(2, 7, 7, 3).astype(np.float32)
(box_encodings,
class_predictions_with_background) = self.execute(graph_fn,
[image_features])
self.assertAllEqual(box_encodings.shape, [2, 1, 5, 4])
self.assertAllEqual(class_predictions_with_background.shape, [2, 1, 6])
def test_get_boxes_with_five_classes_share_box_across_classes(self):
def graph_fn(image_features):
mask_box_predictor = box_predictor_builder.build_mask_rcnn_box_predictor(
is_training=False,
num_classes=5,
fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(),
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4,
share_box_across_classes=True
)
box_predictions = mask_box_predictor.predict(
[image_features],
num_predictions_per_location=[1],
scope='BoxPredictor',
prediction_stage=2)
return (box_predictions[box_predictor.BOX_ENCODINGS],
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND])
image_features = np.random.rand(2, 7, 7, 3).astype(np.float32)
(box_encodings,
class_predictions_with_background) = self.execute(graph_fn,
[image_features])
self.assertAllEqual(box_encodings.shape, [2, 1, 1, 4])
self.assertAllEqual(class_predictions_with_background.shape, [2, 1, 6])
def test_value_error_on_predict_instance_masks_with_no_conv_hyperparms(self):
with self.assertRaises(ValueError):
box_predictor_builder.build_mask_rcnn_box_predictor(
is_training=False,
num_classes=5,
fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(),
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4,
predict_instance_masks=True)
def test_get_instance_masks(self):
def graph_fn(image_features):
mask_box_predictor = box_predictor_builder.build_mask_rcnn_box_predictor(
is_training=False,
num_classes=5,
fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(),
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4,
conv_hyperparams_fn=self._build_arg_scope_with_hyperparams(
op_type=hyperparams_pb2.Hyperparams.CONV),
predict_instance_masks=True)
box_predictions = mask_box_predictor.predict(
[image_features],
num_predictions_per_location=[1],
scope='BoxPredictor',
prediction_stage=3)
return (box_predictions[box_predictor.MASK_PREDICTIONS],)
image_features = np.random.rand(2, 7, 7, 3).astype(np.float32)
mask_predictions = self.execute(graph_fn, [image_features])
self.assertAllEqual(mask_predictions.shape, [2, 1, 5, 14, 14])
def test_do_not_return_instance_masks_without_request(self):
image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32)
mask_box_predictor = box_predictor_builder.build_mask_rcnn_box_predictor(
is_training=False,
num_classes=5,
fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(),
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4)
box_predictions = mask_box_predictor.predict(
[image_features],
num_predictions_per_location=[1],
scope='BoxPredictor',
prediction_stage=2)
self.assertEqual(len(box_predictions), 2)
self.assertTrue(box_predictor.BOX_ENCODINGS in box_predictions)
self.assertTrue(box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND
in box_predictions)
if __name__ == '__main__':
tf.test.main()
| [
"object_detection.protos.hyperparams_pb2.Hyperparams",
"numpy.random.rand",
"tensorflow.compat.v1.test.main",
"object_detection.utils.tf_version.is_tf2",
"google.protobuf.text_format.Merge",
"object_detection.builders.hyperparams_builder.build",
"tensorflow.compat.v1.random_uniform"
] | [((1230, 1249), 'object_detection.utils.tf_version.is_tf2', 'tf_version.is_tf2', ([], {}), '()\n', (1247, 1249), False, 'from object_detection.utils import tf_version\n'), ((6323, 6337), 'tensorflow.compat.v1.test.main', 'tf.test.main', ([], {}), '()\n', (6335, 6337), True, 'import tensorflow.compat.v1 as tf\n'), ((1478, 1507), 'object_detection.protos.hyperparams_pb2.Hyperparams', 'hyperparams_pb2.Hyperparams', ([], {}), '()\n', (1505, 1507), False, 'from object_detection.protos import hyperparams_pb2\n'), ((1716, 1770), 'google.protobuf.text_format.Merge', 'text_format.Merge', (['hyperparams_text_proto', 'hyperparams'], {}), '(hyperparams_text_proto, hyperparams)\n', (1733, 1770), False, 'from google.protobuf import text_format\n'), ((1811, 1867), 'object_detection.builders.hyperparams_builder.build', 'hyperparams_builder.build', (['hyperparams'], {'is_training': '(True)'}), '(hyperparams, is_training=True)\n', (1836, 1867), False, 'from object_detection.builders import hyperparams_builder\n'), ((5565, 5614), 'tensorflow.compat.v1.random_uniform', 'tf.random_uniform', (['[2, 7, 7, 3]'], {'dtype': 'tf.float32'}), '([2, 7, 7, 3], dtype=tf.float32)\n', (5582, 5614), True, 'import tensorflow.compat.v1 as tf\n'), ((2597, 2623), 'numpy.random.rand', 'np.random.rand', (['(2)', '(7)', '(7)', '(3)'], {}), '(2, 7, 7, 3)\n', (2611, 2623), True, 'import numpy as np\n'), ((3730, 3756), 'numpy.random.rand', 'np.random.rand', (['(2)', '(7)', '(7)', '(3)'], {}), '(2, 7, 7, 3)\n', (3744, 3756), True, 'import numpy as np\n'), ((5303, 5329), 'numpy.random.rand', 'np.random.rand', (['(2)', '(7)', '(7)', '(3)'], {}), '(2, 7, 7, 3)\n', (5317, 5329), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.