code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
'''
Created on 25 Dec 2016
@author: dusted-ipro
'''
import numpy as np
import collections
from environment.sensing import closestStar, closestStarSubSet
from environment import world
class baseClan(object):
'''
Base Clan Class
'''
def __init__(self, starIdx, coords, planetIdx, clanId, energyConsumeRate):
'''
Constructor
'''
self.clanId = clanId
self.clanName = 'clan_{}'.format(self.clanId)
#Where's home - these are the planet coords - not star
self.originCoords = coords
self.originName = ''
#Closest Star - index in world.starCoords
self.originStarIdx = starIdx
#Origin Planet
self.originPlanetIdx = planetIdx
#Array of all the clans agents - dict key lookup to world.agents
self.agents = []
#n Agents * n Agents matrix of link strength (2d array)
self.societyNet = np.zeros((len(self.agents), len(self.agents)))
#n Agents * n Agents matrix of demand and supply (3d array)
self.tradeNet = np.zeros((len(self.agents), len(self.agents), 2))
#Resource Knowledge {starIdx:{planetIdx:{'rawMat':X, 'energy':Y}}, planetIdx:{'rawMat':X, 'energy':Y}}}
#Initially populated with all stars - and no knowledge
self.resourceKnowledge = {}
#Agent job queues
#Explorer - (starIdx, starCoords) - unknown places
self.explorerQ = collections.deque()
#Harvestor - this is basically the resource knowledge sorted by distance
self.harvestorQ = collections.deque()
#Clan Global Resource storage
self.demands = {7:{'store':10.0}}
#{tradableId:storeAmount, ...}
self.store = {0:{'store':0.0}, 1:{'store':0.0}, 2:{'store':0.0},
3:{'store':0.0}, 4:{'store':0.0}, 5:{'store':0.0},
6:{'store':0.0}, 7:{'store':0.0}, 8:{'store':0.0},
9:{'store':0.0}, 10:{'store':0.0}}
self.consumes = {7:{'consumeRate':energyConsumeRate, 'store':10.0}}
def popn(self):
'''
Get the clans total population
::return int total number of agents
'''
return len(self.agents)
def agentPositions(self):
'''
Get the locations of all agents in this clan
::return array of agent.positions
'''
return [agent.position for agent in self.agents]
def societyStrength(self):
'''
Get some basic measure of the society network strength
::return float 0 (weak) -> 1(strong)
'''
return np.sum(self.societyNet)/len(self.agents)
def closestUnknownStar(self, coords):
'''
Get the star index of the closest clan unknown (unscanned) system
'''
uk = []
for k in self.resourceKnowledge.keys():
if self.resourceKnowledge[k] == {}:
uk.append(world.starCoords[k])
return closestStarSubSet(coords, uk)
def rxResourceKnowledge(self, inputKnowledge):
'''
Receive resource knowledge from an explorer or other
'''
for inK in inputKnowledge.keys():
#Check if star knowledge exists
if inK not in self.resourceKnowledge.keys():
#New star
self.resourceKnowledge[inK] = inputKnowledge[inK]
#Add all the planets to the harvestorQ - just the star and planet indexes as tuples
for p in inputKnowledge[inK].keys():
self.harvestorQ.append((inK, p, inputKnowledge[inK][p]))
else:
#Star exists - check we have all the planets
for inP in inputKnowledge[inK]:
if inP not in self.resourceKnowledge[inK].keys():
self.resourceKnowledge[inK][inP] = inputKnowledge[inK][inP]
#Add it to the resource Q
self.harvestorQ.append((inK, inP, inputKnowledge[inK][inP]))
#Reorder Harvestor Q - closest first
self.orderHarvestorQ()
def orderHarvestorQ(self):
'''
Take a moment to re-order the harvestorQ by distance from clan origin
So we can just pop the first one off at any stage
'''
dists = []
for i in self.harvestorQ:
dists.append(np.linalg.norm(world.stars[i[0]].planets[i[1]].position-self.originCoords))
chk = sorted(zip(dists, self.harvestorQ))
self.harvestorQ.clear()
for i in chk:
self.harvestorQ.append(i[1])
| [
"numpy.sum",
"environment.sensing.closestStarSubSet",
"collections.deque",
"numpy.linalg.norm"
] | [((1430, 1449), 'collections.deque', 'collections.deque', ([], {}), '()\n', (1447, 1449), False, 'import collections\n'), ((1557, 1576), 'collections.deque', 'collections.deque', ([], {}), '()\n', (1574, 1576), False, 'import collections\n'), ((2963, 2992), 'environment.sensing.closestStarSubSet', 'closestStarSubSet', (['coords', 'uk'], {}), '(coords, uk)\n', (2980, 2992), False, 'from environment.sensing import closestStar, closestStarSubSet\n'), ((2607, 2630), 'numpy.sum', 'np.sum', (['self.societyNet'], {}), '(self.societyNet)\n', (2613, 2630), True, 'import numpy as np\n'), ((4359, 4435), 'numpy.linalg.norm', 'np.linalg.norm', (['(world.stars[i[0]].planets[i[1]].position - self.originCoords)'], {}), '(world.stars[i[0]].planets[i[1]].position - self.originCoords)\n', (4373, 4435), True, 'import numpy as np\n')] |
import numpy as np
import perceptron.PERCEPTRON as p
training_inputs = []
training_inputs.append(np.array([1, 1]))
training_inputs.append(np.array([1, 0]))
training_inputs.append(np.array([0, 1]))
training_inputs.append(np.array([0, 0]))
labels = np.array([1, 0, 0, 0])
perceptron = p.Perceptron(2)
perceptron.train(training_inputs, labels)
inputs = np.array([1, 1])
print(perceptron.predict(inputs))
inputs = np.array([0, 1])
print(perceptron.predict(inputs))
| [
"numpy.array",
"perceptron.PERCEPTRON.Perceptron"
] | [((249, 271), 'numpy.array', 'np.array', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (257, 271), True, 'import numpy as np\n'), ((286, 301), 'perceptron.PERCEPTRON.Perceptron', 'p.Perceptron', (['(2)'], {}), '(2)\n', (298, 301), True, 'import perceptron.PERCEPTRON as p\n'), ((354, 370), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (362, 370), True, 'import numpy as np\n'), ((415, 431), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (423, 431), True, 'import numpy as np\n'), ((98, 114), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (106, 114), True, 'import numpy as np\n'), ((139, 155), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (147, 155), True, 'import numpy as np\n'), ((180, 196), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (188, 196), True, 'import numpy as np\n'), ((221, 237), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (229, 237), True, 'import numpy as np\n')] |
"""
Core IO methods for the PypeIt setup.
THIS MODULE IS NOW DEPRECATED AND LIKELY TO BE REMOVED
"""
from __future__ import (print_function, absolute_import, division, unicode_literals)
import os
import glob
import string
import numpy as np
import yaml
import linetools.utils
from pypeit import msgs
from pypeit import utils
from pypeit.core import parse
from pypeit.core import framematch
from pypeit import debugger
def dummy_setup_dict(file_list, setup):
"""
Generates a dummy setup_dict.
.. todo::
- Describe how this is used.
- Improve the description of setup and/or point to the function
used to generate it.
Args:
file_list (:obj:`list`):
List of files to set as science exposures.
setup (:obj:`str`):
String representation of the instrument setup.
Returns:
dict: Dictionary with the instrument setup.
"""
# setup_dict
setup_dict = {}
setup_dict[setup[0]] = {}
# Fill with dummy dicts
for ii in range(1,20): # Dummy detectors
setup_dict[setup[0]][parse.get_dnum(ii)] = dict(binning='1x1')
setup_dict[setup[0]][setup[-2:]] = {}
# Fill up filenames
setup_dict[setup[0]][setup[-2:]]['sci'] = file_list
# Write
# TODO: Why is this called here?
_ = write_calib(setup_dict)
return setup_dict
def calib_set(isetup_dict, fitstbl, sci_ID):
""" Generate a calibration dataset string
Parameters
----------
isetup_dict : dict
fitstbl : PypeItMetaData
sci_ID : int
ID of the science frame
Returns
-------
cb_str : str
Ordering is 'aa', 'ab', ...
"""
cb_strs = []
default = 'aa'
for ll in ['a', 'b', 'c', 'd']:
for lower in string.ascii_lowercase:
cb_strs.append(ll+lower)
# Build cbset from sciexp
new_cbset = {}
cbkeys = ['arc', 'bias', 'trace', 'pixelflat', 'science']
for cbkey in cbkeys:
new_cbset[cbkey], _ = fitstbl.find_frame_files(cbkey, sci_ID=sci_ID)
# Uninitialized?
if default not in isetup_dict.keys():
isetup_dict[default] = new_cbset
return default
# Not in default
for cbkey in cbkeys:
def_names = np.array(isetup_dict[default][cbkey])
if np.array_equal(def_names, new_cbset[cbkey]):
_ = new_cbset.pop(cbkey)
# Science only or exactly the same?
if len(new_cbset) == 0:
return default
elif len(new_cbset) == 1:
assert list(new_cbset.keys()) == ['science']
if new_cbset['science'][0] not in isetup_dict[default]['science']:
isetup_dict[default]['science'] += new_cbset['science']
return default
# New calib set?
for cb_str in cb_strs[1:]:
mtch = True
if cb_str not in isetup_dict.keys():
isetup_dict[cb_str] = new_cbset
break
for key in new_cbset:
if key not in isetup_dict[cb_str]:
mtch = False
break
if key in ['science']:
continue
ar_names = np.array(isetup_dict[default][cbkey])
mtch &= np.array_equal(ar_names, new_cbset[key])
if mtch:
# Add sci frames
for sciframe in new_cbset['science']:
break
# Return
return cb_str
def det_setup(isetup_dict, ddict):
""" Return detector setup on config dict or add to it if new
Parameters
----------
isetup_dict : dict
Selected setup_dict
ddict : dict
detector dict
Returns
-------
dkey : str
Name of the detector string associated to the input ddict
May be new or previously used
"""
det_str = [parse.get_dnum(i+1, prefix=False) for i in range(99)]
# Init
for dkey in det_str:
mtch = True
if dkey not in isetup_dict.keys():
isetup_dict[dkey] = ddict
break
for key in isetup_dict[dkey].keys():
mtch &= isetup_dict[dkey][key] == ddict[key]
if mtch:
break
# Return
return dkey
def is_equal(val1, val2, tol=1e-3):
if isinstance(val1,float):
return np.isclose(val1,val2,rtol=tol)
return val1 == val2
def instr_setup(sci_ID, det, fitstbl, setup_dict=None, must_exist=False, skip_cset=False,
config_name=None, copy=False):
"""
DEPRECATED
Define the instrument configuration.
.. todo::
- This needs to be made a class object and pulled out of core.
configuration ID: A, B, C
detector number: 01, 02, 03, ..
calibration ID: aa, ab, ac, ..
Args:
sci_ID (:obj:`int`):
The selected science frame (binary)
det (:obj:`int`):
The 1-indexed detector
fitstbl (:class:`pypeit.metadata.PypeItMetaData`):
The fits file metadata used by PypeIt
setup_dict (:obj:`dict`, optional):
The dictionary with the instrument configurations that have
already been parsed for this execution of PypeIt. If None,
the dictionary is instantiated from scratch; otherwise, any
new setup information is added to the output dictionary.
must_exist (:obj:`bool`, optional);
The setup must exist in the provided `setup_dict`. If not,
the function will raise an error.
skip_cset (:obj:`bool`, optional):
Skip setting the calibration identifier. This should only
be True when first generating instrument .setup file.
config_name (:obj:`str`, optional):
Can be used to choose a specific configuration ID.
copy (:obj:`bool`, optional):
Do not perform any in-place additions to the setup
dictionary. Instead copy any input dictionary and return
the modified dictionary. By default, modifications are made
to the input dictionary, which is returned instead of a
modified copy.
Returns:
str, dict: Returns the string identifier for the instrument
configuration and a dictionary with the configuration data. The
latter is either a new object, a modified copy of the input
dictionary, or the input dictionary after it has been modified
in place.
"""
debugger.set_trace()
# Labels
cfig_str = string.ascii_uppercase
cstr = '--'
# Find the first arc exposure tied to this science exposure
idx = np.where(fitstbl.find_frames('arc', sci_ID=sci_ID))[0][0]
# Use this exposure to pull the relevant information for the instrument setup
dispname = fitstbl['dispname'][idx] if 'dispname' in fitstbl.keys() else 'none'
dispangle = fitstbl['dispangle'][idx] if 'dispangle' in fitstbl.keys() else 'none'
dichroic = fitstbl['dichroic'][idx] if 'dichroic' in fitstbl.keys() else 'none'
decker = fitstbl['decker'][idx] if 'decker' in fitstbl.keys() else 'none'
slitwid = fitstbl['slitwid'][idx] if 'slitwid' in fitstbl.keys() else 'none'
slitlen = fitstbl['slitlen'][idx] if 'slitlen' in fitstbl.keys() else 'none'
binning = fitstbl['binning'][idx] if 'binning' in fitstbl.keys() else 'none'
# Generate the relevant dictionaries
cdict = dict(disperser={'name':dispname, 'angle':dispangle},
dichroic=dichroic,
slit={'decker':decker, 'slitwid':slitwid, 'slitlen':slitlen})
ddict = {'binning':binning,
'det':det,
'namp':fitstbl.spectrograph.detector[det-1]['numamplifiers']}
# Configuration
setupID = None
if setup_dict is None:
# Generate new configuration dictionary
setupID = 'A' if config_name is None else config_name
_setup_dict = dict()
_setup_dict[setupID] = {}
_setup_dict[setupID][cstr] = cdict
else:
# Try to find the setup in the existing configuration dictionary
for ckey in setup_dict.keys():
mtch = True
for key in setup_dict[ckey][cstr].keys():
# Dict?
if isinstance(setup_dict[ckey][cstr][key], dict):
for ikey in setup_dict[ckey][cstr][key].keys():
mtch &= is_equal(setup_dict[ckey][cstr][key][ikey],cdict[key][ikey])
else:
mtch &= is_equal(setup_dict[ckey][cstr][key], cdict[key])
if mtch:
setupID = ckey
break
# Augment setup_dict?
_setup_dict = setup_dict.copy() if copy else setup_dict
if setupID is None:
if must_exist:
msgs.error('This setup ID is not present in the setup_dict.')
maxs = max(_setup_dict.keys())
setupID = cfig_str[cfig_str.index(maxs)+1]
_setup_dict[setupID] = {}
_setup_dict[setupID][cstr] = cdict
# Detector
dkey = det_setup(_setup_dict[setupID], ddict)
# Calib set
if not skip_cset:
calib_key = calib_set(_setup_dict[setupID], fitstbl, sci_ID)
else:
calib_key = '--'
# Finish and return
return '{:s}_{:s}_{:s}'.format(setupID, dkey, calib_key), _setup_dict
# TODO: This is out of date!
#def get_setup_file(settings_argflag, spectrograph=None):
# """ Passes back name of setup file
# Also checks for existing setup files
#
# Parameters
# ----------
# spectrograph : str, optional
#
# Returns
# -------
# setup_file : str
# Name for the setup file
# nexist : int
# Number of existing setup files (0 or 1)
#
# """
# if spectrograph is None:
# spectrograph = settings_argflag['run']['spectrograph']
# setup_files = glob.glob('./{:s}*.setups'.format(spectrograph))
# nexist = len(setup_files)
# # Require 1 or 0
# if nexist == 1:
# return setup_files[0], nexist
# elif nexist == 0:
# setup_file = settings_argflag['run']['redname'].replace('.pypeit', '.setups')
# if os.path.isfile(setup_file):
# nexist = 1
# #date = str(datetime.date.today().strftime('%Y-%b-%d'))
# #return '{:s}_{:s}.setups'.format(spectrograph,date), nexist
# return setup_file, nexist
# else:
# msgs.error("Found more than one .setup file in the working directory. Limit to one.")
def write_calib(calib_file, setup_dict):
""" Output setup_dict with full calibrations to hard drive
Parameters
----------
setup_dict : dict
setup dict
"""
# Write
ydict = utils.yamlify(setup_dict)
with open(calib_file, 'w') as yamlf:
yamlf.write(yaml.dump(ydict))
def write_setup(setup_dict, ofile, use_json=False):
""" Output setup_dict to hard drive
Parameters
----------
setup_dict : dict
setup dict
use_json : bool, optional
Output with JSON instead of YAML (not recommended)
Returns
-------
"""
# Write
# if setup_file is None:
# setup_file, nexist = get_setup_file()
if use_json:
gddict = linetools.utils.jsonify(setup_dict)
linetools.utils.savejson(setup_file, gddict, easy_to_read=True)
return
ydict = utils.yamlify(setup_dict)
with open(ofile, 'w') as yamlf:
yamlf.write(yaml.dump(ydict))
def load_sorted(sorted_file):
""" Load a .sorted file (mainly to generate a .pypeit file)
Parameters
----------
sorted_file : str
Returns
-------
all_setups : list
list of all setups eg. ['A','B']
all_setuplines : list
list of lists of all setup lines which describe the setup
all_setupfiles : list
list of lists of all setup files including the header
"""
all_setups, all_setuplines, all_setupfiles = [], [], []
try:
with open(sorted_file, 'r') as ff:
# Should begin with ####
fline = ff.readline()
if fline[0:4] != '####':
msgs.error('Bad .sorted fomatting')
# Loop on setups
while fline[0:5] != '##end':
# Setup lines
setuplines = []
while fline[0:2] != '#-':
fline = ff.readline()
# Setup name
if 'Setup' in fline:
all_setups.append(fline[6:].strip())
#
setuplines.append(fline)
all_setuplines.append(setuplines[:-1])
# Data files
datafiles = []
while fline[0:2] != '##':
fline = ff.readline()
datafiles.append(fline)
all_setupfiles.append(datafiles[:-1])
except:
debugger.set_trace()
# Return
return all_setups, all_setuplines, all_setupfiles
def write_sorted(group_file, fitstbl, group_dict, setup_dict):
""" Write the .sorted file
Parameters
----------
group_file : str
fitstbl : PypeItMetaData
group_dict : dict
setup_dict : dict
Returns
-------
"""
# TODO: Not sure we should do this. Instead just check that
# frametype is in the relevant keys
if 'frametype' not in fitstbl.keys():
fitstbl.get_frame_types()
# Output file
ff = open(group_file, 'w')
# Keys
setups = list(group_dict.keys())
setups.sort()
ftypes = list(group_dict[setups[0]].keys())
ftypes.sort()
# Loop on Setup
asciiord = np.array(fitstbl.spectrograph.metadata_keys())
for setup in setups:
ff.write('##########################################################\n')
in_setup = []
ff.write('Setup {:s}\n'.format(setup))
ydict = utils.yamlify(setup_dict[setup])
ff.write(yaml.dump(ydict))
ff.write('#---------------------------------------------------------\n')
# ID files
for key in ftypes:
# Sort
gfiles = group_dict[setup][key]
gfiles.sort()
for ifile in gfiles:
mt = np.where(fitstbl['filename'] == ifile)[0]
if (len(mt) > 0) and (mt not in in_setup):
in_setup.append(mt[0])
# Write overlapping keys
gdkeys = np.in1d(asciiord, np.array(fitstbl.keys()))
subtbl = fitstbl[asciiord[gdkeys].tolist()][np.array(in_setup)]
subtbl.write(ff, format='ascii.fixed_width')
ff.write('##end\n')
ff.close()
def build_group_dict(fitstbl, setupIDs, all_sci_idx, all_sci_ID):
"""
Build a dictionary with the list of exposures of each type for a
given instrumental setup.
Args:
fitstbl (:class:`pypeit.metadata.PypeItMetaData`):
The metadata table for the fits files to reduce.
setupIDs (:obj:`list`):
The list of setups.
all_sci_idx (:obj:`list`):
The indices of the science frames in the data table.
TODO: Why is this needed?
all_sci_ID (:obj:`list`):
The ID number assigned to each science frame.
Returns:
dict: A dictionary with the list of file names associated with
each instrument configuration. It also provides the list of
science and standard star targets. TODO: How are the latter
used, if at all?
"""
type_keys = framematch.FrameTypeBitMask().keys() + ['None']
group_dict = {}
for sc,setupID in enumerate(setupIDs):
#scidx = sciexp[sc]._idx_sci[0]
scidx = all_sci_idx[sc]
sci_ID = all_sci_ID[sc]
# Set group_key
config_key = setupID[0]
# Plan init
if config_key not in group_dict.keys():
group_dict[config_key] = {}
for key in type_keys:
# if key not in ['None', 'dark']:
# group_dict[config_key][key] = []
group_dict[config_key][key] = []
group_dict[config_key]['sciobj'] = []
group_dict[config_key]['stdobj'] = []
# Fill group_dict too
for key in type_keys:
# if key in ['None', 'dark']:
# continue
indices = np.where(fitstbl.find_frames(key))[0] if key in ['None', 'dark'] \
else np.where(fitstbl.find_frames(key, sci_ID=sci_ID))[0]
for idx in indices:
# Only add if new
if fitstbl['filename'][idx] not in group_dict[config_key][key]:
group_dict[config_key][key].append(fitstbl['filename'][idx])
# TODO: How is this used?
if key == 'standard' and 'target' in fitstbl.keys(): # Add target name
group_dict[config_key]['stdobj'].append(fitstbl['target'][idx])
# TODO: How is this used?
if key == 'science' and 'target' in fitstbl.keys(): # Add target name
group_dict[config_key]['sciobj'].append(fitstbl['target'][scidx])
return group_dict
| [
"pypeit.msgs.error",
"numpy.isclose",
"pypeit.core.framematch.FrameTypeBitMask",
"yaml.dump",
"numpy.where",
"pypeit.utils.yamlify",
"numpy.array",
"numpy.array_equal",
"pypeit.debugger.set_trace",
"pypeit.core.parse.get_dnum"
] | [((6332, 6352), 'pypeit.debugger.set_trace', 'debugger.set_trace', ([], {}), '()\n', (6350, 6352), False, 'from pypeit import debugger\n'), ((10523, 10548), 'pypeit.utils.yamlify', 'utils.yamlify', (['setup_dict'], {}), '(setup_dict)\n', (10536, 10548), False, 'from pypeit import utils\n'), ((11170, 11195), 'pypeit.utils.yamlify', 'utils.yamlify', (['setup_dict'], {}), '(setup_dict)\n', (11183, 11195), False, 'from pypeit import utils\n'), ((2233, 2270), 'numpy.array', 'np.array', (['isetup_dict[default][cbkey]'], {}), '(isetup_dict[default][cbkey])\n', (2241, 2270), True, 'import numpy as np\n'), ((2282, 2325), 'numpy.array_equal', 'np.array_equal', (['def_names', 'new_cbset[cbkey]'], {}), '(def_names, new_cbset[cbkey])\n', (2296, 2325), True, 'import numpy as np\n'), ((3727, 3762), 'pypeit.core.parse.get_dnum', 'parse.get_dnum', (['(i + 1)'], {'prefix': '(False)'}), '(i + 1, prefix=False)\n', (3741, 3762), False, 'from pypeit.core import parse\n'), ((4186, 4218), 'numpy.isclose', 'np.isclose', (['val1', 'val2'], {'rtol': 'tol'}), '(val1, val2, rtol=tol)\n', (4196, 4218), True, 'import numpy as np\n'), ((13671, 13703), 'pypeit.utils.yamlify', 'utils.yamlify', (['setup_dict[setup]'], {}), '(setup_dict[setup])\n', (13684, 13703), False, 'from pypeit import utils\n'), ((1095, 1113), 'pypeit.core.parse.get_dnum', 'parse.get_dnum', (['ii'], {}), '(ii)\n', (1109, 1113), False, 'from pypeit.core import parse\n'), ((3096, 3133), 'numpy.array', 'np.array', (['isetup_dict[default][cbkey]'], {}), '(isetup_dict[default][cbkey])\n', (3104, 3133), True, 'import numpy as np\n'), ((3154, 3194), 'numpy.array_equal', 'np.array_equal', (['ar_names', 'new_cbset[key]'], {}), '(ar_names, new_cbset[key])\n', (3168, 3194), True, 'import numpy as np\n'), ((10610, 10626), 'yaml.dump', 'yaml.dump', (['ydict'], {}), '(ydict)\n', (10619, 10626), False, 'import yaml\n'), ((11252, 11268), 'yaml.dump', 'yaml.dump', (['ydict'], {}), '(ydict)\n', (11261, 11268), False, 'import yaml\n'), ((12693, 12713), 'pypeit.debugger.set_trace', 'debugger.set_trace', ([], {}), '()\n', (12711, 12713), False, 'from pypeit import debugger\n'), ((13721, 13737), 'yaml.dump', 'yaml.dump', (['ydict'], {}), '(ydict)\n', (13730, 13737), False, 'import yaml\n'), ((14299, 14317), 'numpy.array', 'np.array', (['in_setup'], {}), '(in_setup)\n', (14307, 14317), True, 'import numpy as np\n'), ((8641, 8702), 'pypeit.msgs.error', 'msgs.error', (['"""This setup ID is not present in the setup_dict."""'], {}), "('This setup ID is not present in the setup_dict.')\n", (8651, 8702), False, 'from pypeit import msgs\n'), ((11922, 11957), 'pypeit.msgs.error', 'msgs.error', (['"""Bad .sorted fomatting"""'], {}), "('Bad .sorted fomatting')\n", (11932, 11957), False, 'from pypeit import msgs\n'), ((15282, 15311), 'pypeit.core.framematch.FrameTypeBitMask', 'framematch.FrameTypeBitMask', ([], {}), '()\n', (15309, 15311), False, 'from pypeit.core import framematch\n'), ((14009, 14047), 'numpy.where', 'np.where', (["(fitstbl['filename'] == ifile)"], {}), "(fitstbl['filename'] == ifile)\n", (14017, 14047), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 16 2021
@author: <NAME>
"""
# Import Module
import os
import sys
import numpy as np
# Constants
inputPath = "C:/Users/<NAME>/.spyder-py3/AI_HW1_SearchAlgs/input.txt"
SOLUTION = np.array([[1,2,3],[4,0,5],[6,7,8]])
DEPTH_LIMIT = 10
# takes a puzzle matrix and returns the position of a given num as an ordered pair
def findValue(puzzleMat, num):
pos = []
for row in range(0, len(puzzleMat)):
#print("row = " + str(row))
for col in range(0, len(puzzleMat[row])):
#print("col = " + str(col))
if(puzzleMat[row][col] == num):
pos = [row, col]
break
else:
continue
break # break from nested loop
return pos
# end findEmpty
# takes a puzzle matrix and returns the matrix with it's empty tile shifted north, or null if it can't be shifted
def moveNorth(puzzleMat):
# find empty tile
pos = findValue(puzzleMat, 0)
# if empty tile is in the top row, the empty tile cannot move north
if(pos[0] == 0):
return None
else:
resultMat = np.copy(puzzleMat)
tempVal = resultMat[pos[0] - 1][pos[1]]
resultMat[pos[0] - 1][pos[1]] = 0
resultMat[pos[0]][pos[1]] = tempVal
return resultMat
# end moveNorth()
# takes a puzzle matrix and returns the matrix with it's empty tile shifted south, or null if it can't be shifted
def moveSouth(puzzleMat):
# find empty tile
pos = findValue(puzzleMat, 0)
# if empty tile is in the top row, the empty tile cannot move north
if(pos[0] == len(puzzleMat) - 1):
return None
else:
resultMat = np.copy(puzzleMat)
tempVal = resultMat[pos[0] + 1][pos[1]]
resultMat[pos[0] + 1][pos[1]] = 0
resultMat[pos[0]][pos[1]] = tempVal
return resultMat
# end moveSouth()
# takes a puzzle matrix and returns the matrix with it's empty tile shifted east, or null if it can't be shifted
def moveEast(puzzleMat):
# find empty tile
pos = findValue(puzzleMat, 0)
# if empty tile is in the top row, the empty tile cannot move north
if(pos[1] == len(puzzleMat[pos[1]]) - 1):
return None
else:
resultMat = np.copy(puzzleMat)
tempVal = resultMat[pos[0]][pos[1] + 1]
resultMat[pos[0]][pos[1] + 1] = 0
resultMat[pos[0]][pos[1]] = tempVal
return resultMat
# end moveEast()
# takes a puzzle matrix and returns the matrix with it's empty tile shifted west, or null if it can't be shifted
def moveWest(puzzleMat):
# find empty tile
pos = findValue(puzzleMat, 0)
# if empty tile is in the top row, the empty tile cannot move north
if(pos[1] == 0):
return None
else:
resultMat = np.copy(puzzleMat)
tempVal = resultMat[pos[0]][pos[1] - 1]
resultMat[pos[0]][pos[1] - 1] = 0
resultMat[pos[0]][pos[1]] = tempVal
return resultMat
# end moveWest()
# takes a puzzle matrix and counts how many tiles are in the wrong position, compared to the solution matrix
def countWrongPositions(puzzleMat):
wrong = 0
for row in range(0, len(puzzleMat)):
#print("row = " + str(row))
for col in range(0, len(puzzleMat[row])):
#print("col = " + str(col))
if(puzzleMat[row][col] != SOLUTION[row][col]):
wrong += 1
return wrong
# end countWrongPositions()
# calculates the manhattan distance for every value in puzzleMat compared to the Solution Mat and returns the total
def countManhattanDistances(puzzleMat):
totalDist = 0
for row in range(0, len(puzzleMat)):
#print("row = " + str(row))
for col in range(0, len(puzzleMat[row])):
#print("col = " + str(col))
if(puzzleMat[row][col] != SOLUTION[row][col]):
pos = findValue(SOLUTION, puzzleMat[row][col])
totalDist += abs(pos[0] - row)
totalDist += abs(pos[1] - col)
return totalDist
# end countManhattanDistances()
def queueLinearSearch(queue, findCost):
for i in range(0, len(queue)):
if(findCost <= queue[i].cost):
return i
return len(queue)
class TreeNode:
def __init__(self, parentNode, puzzleMat, depth, depthLimit, statesVisited):
self.parent = parentNode
self.currentChild = None # we don't need to keep the whole tree stored in memory
self.puzzle = puzzleMat
self.depth = depth
self.cost = depth
self.depthLimit = depthLimit
self.statesVisited = statesVisited
# end init()
def compareTo(self, node):
if(self.cost < node.cost):
return -1
elif(self.cost > node.cost):
return 1
else:
return 0
# end compareTo
def avoidRepeats(self, newPuzzle):
parent = self.parent
while(parent is not None):
if(np.array_equal(newPuzzle, parent.puzzle)):
return False
else:
parent = parent.parent
return True
def dfs(self):
if(np.array_equal(self.puzzle, SOLUTION)):
self.currentChild = None
return True
elif(self.depth >= self.depthLimit):
self.currentChild = None
return False
else:
# check children
for i in range(0, 4):
nextPuzzle = None
if(i == 0):
nextPuzzle = moveNorth(self.puzzle)
elif(i == 1):
nextPuzzle = moveEast(self.puzzle)
elif(i == 2):
nextPuzzle = moveSouth(self.puzzle)
elif(i == 3):
nextPuzzle = moveWest(self.puzzle)
if(nextPuzzle is not None):
if(self.avoidRepeats(nextPuzzle)):
self.currentChild = TreeNode(self, nextPuzzle, self.depth + 1, self.depthLimit, self.statesVisited + 1)
if(self.currentChild.dfs()):
self.statesVisited = self.currentChild.statesVisited
return True
else:
self.statesVisited = self.currentChild.statesVisited
# end for i
# end else
# end dfs()
def ids(self):
limit = self.depthLimit
for d in range(0, limit + 1):
self.depthLimit = d
#print("Depth limit = " + str(self.depthLimit))
self.statesVisited += 1 # the root node is visited multiple times
if(self.dfs()):
return True
return False
# end ids()
def astar1(self):
queue = []
queue.append(self)
currentNode = None
numEnqueued = 1
while(len(queue) > 0):
currentNode = queue.pop(0)
if(np.array_equal(currentNode.puzzle, SOLUTION)):
break
elif(currentNode.depth >= currentNode.depthLimit):
currentNode = None
else:
# check children
for i in range(0, 4):
nextPuzzle = None
if(i == 0):
nextPuzzle = moveNorth(currentNode.puzzle)
elif(i == 1):
nextPuzzle = moveEast(currentNode.puzzle)
elif(i == 2):
nextPuzzle = moveSouth(currentNode.puzzle)
elif(i == 3):
nextPuzzle = moveWest(currentNode.puzzle)
if(nextPuzzle is not None):
#if(self.avoidRepeats(nextPuzzle)):
numEnqueued += 1
newNode = TreeNode(currentNode, nextPuzzle, currentNode.depth + 1, currentNode.depthLimit, currentNode.statesVisited + 1)
newNode.cost += countWrongPositions(nextPuzzle)
if(len(queue) == 0):
queue.append(newNode)
else:
i = queueLinearSearch(queue, newNode.cost)
queue.insert(i, newNode)
# end if
# end for i
# end else
# end while
if(currentNode is not None):
currentNode.statesVisited = numEnqueued
previousNode = currentNode.parent
while(previousNode is not None):
previousNode.statesVisited = numEnqueued
previousNode.currentChild = currentNode
currentNode = previousNode
previousNode = currentNode.parent
return True
else:
self.statesVisited = numEnqueued
return False
# end astar1()
def astar2(self):
queue = []
queue.append(self)
currentNode = None
numEnqueued = 1
while(len(queue) > 0):
currentNode = queue.pop(0)
if(np.array_equal(currentNode.puzzle, SOLUTION)):
break
elif(currentNode.depth >= currentNode.depthLimit):
currentNode = None
else:
# check children
for i in range(0, 4):
nextPuzzle = None
if(i == 0):
nextPuzzle = moveNorth(currentNode.puzzle)
elif(i == 1):
nextPuzzle = moveEast(currentNode.puzzle)
elif(i == 2):
nextPuzzle = moveSouth(currentNode.puzzle)
elif(i == 3):
nextPuzzle = moveWest(currentNode.puzzle)
if(nextPuzzle is not None):
#if(self.avoidRepeats(nextPuzzle)):
numEnqueued += 1
newNode = TreeNode(currentNode, nextPuzzle, currentNode.depth + 1, currentNode.depthLimit, currentNode.statesVisited + 1)
newNode.cost += countManhattanDistances(nextPuzzle)
if(len(queue) == 0):
queue.append(newNode)
else:
i = queueLinearSearch(queue, newNode.cost)
queue.insert(i, newNode)
# end if
# end for i
# end else
# end while
if(currentNode is not None):
currentNode.statesVisited = numEnqueued
previousNode = currentNode.parent
while(previousNode is not None):
previousNode.statesVisited = numEnqueued
previousNode.currentChild = currentNode
currentNode = previousNode
previousNode = currentNode.parent
return True
else:
self.statesVisited = numEnqueued
return False
# end astar2()
def printMoves(self):
print("Initial State: ")
print(self.puzzle)
numMoves = 0
numStates = 0
child = self.currentChild
while(child is not None):
print("\n")
print(child.puzzle)
numStates = child.statesVisited
numMoves += 1
child = child.currentChild
print("Number of moves = " + str(numMoves))
print("Number of states enqueued = " + str(numStates))
# end printMoves()
# end TreeNode
def parseMatrix(line):
puzzleMat = []
tempLine = []
i = 0
for c in line:
if(c.isdigit()):
tempLine.append(int(c))
i += 1
if(i >= 3):
puzzleMat.append(tempLine)
tempLine = []
i = 0
elif(c == '*'):
tempLine.append(0)
i += 1
if(i >= 2):
puzzleMat.append(tempLine)
tempLine = []
i = 0
puzzleMat = np.array(puzzleMat)
return puzzleMat
# end parseMatrix()
def readInput(path):
assert os.path.isfile(path)
with open(path, "r") as f:
lines = f.readlines()
#print(lines)
return lines
# end readInput()
if __name__ == "__main__":
args = sys.argv
#print(args)
if(len(args) < 3):
#TODO: write error msg
print("error: cmd syntax is 'homework1' <search alg> <input file path>")
sys.exit()
lines = readInput(args[2])
puzzleMat = parseMatrix(lines[0])
if(args[1] == "dfs"):
root = TreeNode(None, puzzleMat, 0, DEPTH_LIMIT, 1)
if(root.dfs()):
root.printMoves()
else:
print("DFS failed after " + str(root.statesVisited) + " states enqueued")
elif(args[1] == "ids"):
root = TreeNode(None, puzzleMat, 0, DEPTH_LIMIT, 0)
if(root.ids()):
root.printMoves()
else:
print("IDS failed after " + str(root.statesVisited) + " states enqueued")
elif(args[1] == "astar1"):
root = TreeNode(None, puzzleMat, 0, DEPTH_LIMIT, 0)
if(root.astar1()):
root.printMoves()
else:
print("A*1 failed after " + str(root.statesVisited) + " states enqueued")
elif(args[1] == "astar2"):
root = TreeNode(None, puzzleMat, 0, DEPTH_LIMIT, 0)
if(root.astar2()):
root.printMoves()
else:
print("A*2 failed after " + str(root.statesVisited) + " states enqueued")
else:
print("error: search algs available are - dfs, ids, astar1, astar2")
# end main()
| [
"numpy.copy",
"os.path.isfile",
"numpy.array",
"numpy.array_equal",
"sys.exit"
] | [((225, 268), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 0, 5], [6, 7, 8]]'], {}), '([[1, 2, 3], [4, 0, 5], [6, 7, 8]])\n', (233, 268), True, 'import numpy as np\n'), ((12188, 12207), 'numpy.array', 'np.array', (['puzzleMat'], {}), '(puzzleMat)\n', (12196, 12207), True, 'import numpy as np\n'), ((12282, 12302), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (12296, 12302), False, 'import os\n'), ((1127, 1145), 'numpy.copy', 'np.copy', (['puzzleMat'], {}), '(puzzleMat)\n', (1134, 1145), True, 'import numpy as np\n'), ((1685, 1703), 'numpy.copy', 'np.copy', (['puzzleMat'], {}), '(puzzleMat)\n', (1692, 1703), True, 'import numpy as np\n'), ((2249, 2267), 'numpy.copy', 'np.copy', (['puzzleMat'], {}), '(puzzleMat)\n', (2256, 2267), True, 'import numpy as np\n'), ((2787, 2805), 'numpy.copy', 'np.copy', (['puzzleMat'], {}), '(puzzleMat)\n', (2794, 2805), True, 'import numpy as np\n'), ((5163, 5200), 'numpy.array_equal', 'np.array_equal', (['self.puzzle', 'SOLUTION'], {}), '(self.puzzle, SOLUTION)\n', (5177, 5200), True, 'import numpy as np\n'), ((12633, 12643), 'sys.exit', 'sys.exit', ([], {}), '()\n', (12641, 12643), False, 'import sys\n'), ((4979, 5019), 'numpy.array_equal', 'np.array_equal', (['newPuzzle', 'parent.puzzle'], {}), '(newPuzzle, parent.puzzle)\n', (4993, 5019), True, 'import numpy as np\n'), ((7007, 7051), 'numpy.array_equal', 'np.array_equal', (['currentNode.puzzle', 'SOLUTION'], {}), '(currentNode.puzzle, SOLUTION)\n', (7021, 7051), True, 'import numpy as np\n'), ((9188, 9232), 'numpy.array_equal', 'np.array_equal', (['currentNode.puzzle', 'SOLUTION'], {}), '(currentNode.puzzle, SOLUTION)\n', (9202, 9232), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import io
from sklearn.utils.validation import check_array, column_or_1d, check_consistent_length
from sklearn.utils import assert_all_finite
from sklearn.utils.multiclass import type_of_target
from ._utils import _assure_2d_array
class DoubleMLData:
"""Double machine learning data-backend.
:class:`DoubleMLData` objects can be initialized from
:class:`pandas.DataFrame`'s as well as :class:`numpy.ndarray`'s.
Parameters
----------
data : :class:`pandas.DataFrame`
The data.
y_col : str
The outcome variable.
d_cols : str or list
The treatment variable(s).
x_cols : None, str or list
The covariates.
If ``None``, all variables (columns of ``data``) which are neither specified as outcome variable ``y_col``, nor
treatment variables ``d_cols``, nor instrumental variables ``z_cols`` are used as covariates.
Default is ``None``.
z_cols : None, str or list
The instrumental variable(s).
Default is ``None``.
use_other_treat_as_covariate : bool
Indicates whether in the multiple-treatment case the other treatment variables should be added as covariates.
Default is ``True``.
force_all_x_finite : bool or str
Indicates whether to raise an error on infinite values and / or missings in the covariates ``x``.
Possible values are: ``True`` (neither missings ``np.nan``, ``pd.NA`` nor infinite values ``np.inf`` are
allowed), ``False`` (missings and infinite values are allowed), ``'allow-nan'`` (only missings are allowed).
Note that the choice ``False`` and ``'allow-nan'`` are only reasonable if the machine learning methods used
for the nuisance functions are capable to provide valid predictions with missings and / or infinite values
in the covariates ``x``.
Default is ``True``.
Examples
--------
>>> from doubleml import DoubleMLData
>>> from doubleml.datasets import make_plr_CCDDHNR2018
>>> # initialization from pandas.DataFrame
>>> df = make_plr_CCDDHNR2018(return_type='DataFrame')
>>> obj_dml_data_from_df = DoubleMLData(df, 'y', 'd')
>>> # initialization from np.ndarray
>>> (x, y, d) = make_plr_CCDDHNR2018(return_type='array')
>>> obj_dml_data_from_array = DoubleMLData.from_arrays(x, y, d)
"""
def __init__(self,
data,
y_col,
d_cols,
x_cols=None,
z_cols=None,
use_other_treat_as_covariate=True,
force_all_x_finite=True):
if not isinstance(data, pd.DataFrame):
raise TypeError('data must be of pd.DataFrame type. '
f'{str(data)} of type {str(type(data))} was passed.')
if not data.columns.is_unique:
raise ValueError('Invalid pd.DataFrame: '
'Contains duplicate column names.')
self._data = data
self.y_col = y_col
self.d_cols = d_cols
self.z_cols = z_cols
self.x_cols = x_cols
self._check_disjoint_sets_y_d_x_z()
self.use_other_treat_as_covariate = use_other_treat_as_covariate
self.force_all_x_finite = force_all_x_finite
self._binary_treats = self._check_binary_treats()
self._set_y_z()
# by default, we initialize to the first treatment variable
self.set_x_d(self.d_cols[0])
def __str__(self):
data_info = f'Outcome variable: {self.y_col}\n' \
f'Treatment variable(s): {self.d_cols}\n' \
f'Covariates: {self.x_cols}\n' \
f'Instrument variable(s): {self.z_cols}\n' \
f'No. Observations: {self.n_obs}\n'
buf = io.StringIO()
self.data.info(verbose=False, buf=buf)
df_info = buf.getvalue()
res = '================== DoubleMLData Object ==================\n' + \
'\n------------------ Data summary ------------------\n' + data_info + \
'\n------------------ DataFrame info ------------------\n' + df_info
return res
@classmethod
def from_arrays(cls, x, y, d, z=None, use_other_treat_as_covariate=True,
force_all_x_finite=True):
"""
Initialize :class:`DoubleMLData` from :class:`numpy.ndarray`'s.
Parameters
----------
x : :class:`numpy.ndarray`
Array of covariates.
y : :class:`numpy.ndarray`
Array of the outcome variable.
d : :class:`numpy.ndarray`
Array of treatment variables.
z : None or :class:`numpy.ndarray`
Array of instrumental variables.
Default is ``None``.
use_other_treat_as_covariate : bool
Indicates whether in the multiple-treatment case the other treatment variables should be added as covariates.
Default is ``True``.
force_all_x_finite : bool or str
Indicates whether to raise an error on infinite values and / or missings in the covariates ``x``.
Possible values are: ``True`` (neither missings ``np.nan``, ``pd.NA`` nor infinite values ``np.inf`` are
allowed), ``False`` (missings and infinite values are allowed), ``'allow-nan'`` (only missings are allowed).
Note that the choice ``False`` and ``'allow-nan'`` are only reasonable if the machine learning methods used
for the nuisance functions are capable to provide valid predictions with missings and / or infinite values
in the covariates ``x``.
Default is ``True``.
Examples
--------
>>> from doubleml import DoubleMLData
>>> from doubleml.datasets import make_plr_CCDDHNR2018
>>> (x, y, d) = make_plr_CCDDHNR2018(return_type='array')
>>> obj_dml_data_from_array = DoubleMLData.from_arrays(x, y, d)
"""
if isinstance(force_all_x_finite, str):
if force_all_x_finite != 'allow-nan':
raise ValueError("Invalid force_all_x_finite " + force_all_x_finite + ". " +
"force_all_x_finite must be True, False or 'allow-nan'.")
elif not isinstance(force_all_x_finite, bool):
raise TypeError("Invalid force_all_x_finite. " +
"force_all_x_finite must be True, False or 'allow-nan'.")
x = check_array(x, ensure_2d=False, allow_nd=False,
force_all_finite=force_all_x_finite)
d = check_array(d, ensure_2d=False, allow_nd=False)
y = column_or_1d(y, warn=True)
x = _assure_2d_array(x)
d = _assure_2d_array(d)
y_col = 'y'
if z is None:
check_consistent_length(x, y, d)
z_cols = None
else:
z = check_array(z, ensure_2d=False, allow_nd=False)
z = _assure_2d_array(z)
check_consistent_length(x, y, d, z)
if z.shape[1] == 1:
z_cols = ['z']
else:
z_cols = [f'z{i + 1}' for i in np.arange(z.shape[1])]
if d.shape[1] == 1:
d_cols = ['d']
else:
d_cols = [f'd{i+1}' for i in np.arange(d.shape[1])]
x_cols = [f'X{i+1}' for i in np.arange(x.shape[1])]
if z is None:
data = pd.DataFrame(np.column_stack((x, y, d)),
columns=x_cols + [y_col] + d_cols)
else:
data = pd.DataFrame(np.column_stack((x, y, d, z)),
columns=x_cols + [y_col] + d_cols + z_cols)
return cls(data, y_col, d_cols, x_cols, z_cols, use_other_treat_as_covariate, force_all_x_finite)
@property
def data(self):
"""
The data.
"""
return self._data
@property
def x(self):
"""
Array of covariates;
Dynamic! May depend on the currently set treatment variable;
To get an array of all covariates (independent of the currently set treatment variable)
call ``obj.data[obj.x_cols].values``.
"""
return self._X.values
@property
def y(self):
"""
Array of outcome variable.
"""
return self._y.values
@property
def d(self):
"""
Array of treatment variable;
Dynamic! Depends on the currently set treatment variable;
To get an array of all treatment variables (independent of the currently set treatment variable)
call ``obj.data[obj.d_cols].values``.
"""
return self._d.values
@property
def z(self):
"""
Array of instrumental variables.
"""
if self.z_cols is not None:
return self._z.values
else:
return None
@property
def all_variables(self):
"""
All variables available in the dataset.
"""
return self.data.columns
@property
def n_treat(self):
"""
The number of treatment variables.
"""
return len(self.d_cols)
@property
def n_instr(self):
"""
The number of instruments.
"""
if self.z_cols is not None:
n_instr = len(self.z_cols)
else:
n_instr = 0
return n_instr
@property
def n_obs(self):
"""
The number of observations.
"""
return self.data.shape[0]
@property
def binary_treats(self):
"""
Series with logical(s) indicating whether the treatment variable(s) are binary with values 0 and 1.
"""
return self._binary_treats
@property
def x_cols(self):
"""
The covariates.
"""
return self._x_cols
@x_cols.setter
def x_cols(self, value):
reset_value = hasattr(self, '_x_cols')
if value is not None:
if isinstance(value, str):
value = [value]
if not isinstance(value, list):
raise TypeError('The covariates x_cols must be of str or list type (or None). '
f'{str(value)} of type {str(type(value))} was passed.')
if not len(set(value)) == len(value):
raise ValueError('Invalid covariates x_cols: '
'Contains duplicate values.')
if not set(value).issubset(set(self.all_variables)):
raise ValueError('Invalid covariates x_cols. '
'At least one covariate is no data column.')
assert set(value).issubset(set(self.all_variables))
self._x_cols = value
else:
# x_cols defaults to all columns but y_col, d_cols and z_cols
if self.z_cols is not None:
y_d_z = set.union({self.y_col}, set(self.d_cols), set(self.z_cols))
x_cols = [col for col in self.data.columns if col not in y_d_z]
else:
y_d = set.union({self.y_col}, set(self.d_cols))
x_cols = [col for col in self.data.columns if col not in y_d]
self._x_cols = x_cols
if reset_value:
self._check_disjoint_sets()
# by default, we initialize to the first treatment variable
self.set_x_d(self.d_cols[0])
@property
def d_cols(self):
"""
The treatment variable(s).
"""
return self._d_cols
@d_cols.setter
def d_cols(self, value):
reset_value = hasattr(self, '_d_cols')
if isinstance(value, str):
value = [value]
if not isinstance(value, list):
raise TypeError('The treatment variable(s) d_cols must be of str or list type. '
f'{str(value)} of type {str(type(value))} was passed.')
if not len(set(value)) == len(value):
raise ValueError('Invalid treatment variable(s) d_cols: '
'Contains duplicate values.')
if not set(value).issubset(set(self.all_variables)):
raise ValueError('Invalid treatment variable(s) d_cols. '
'At least one treatment variable is no data column.')
self._d_cols = value
if reset_value:
self._check_disjoint_sets()
# by default, we initialize to the first treatment variable
self.set_x_d(self.d_cols[0])
@property
def y_col(self):
"""
The outcome variable.
"""
return self._y_col
@y_col.setter
def y_col(self, value):
reset_value = hasattr(self, '_y_col')
if not isinstance(value, str):
raise TypeError('The outcome variable y_col must be of str type. '
f'{str(value)} of type {str(type(value))} was passed.')
if value not in self.all_variables:
raise ValueError('Invalid outcome variable y_col. '
f'{value} is no data column.')
self._y_col = value
if reset_value:
self._check_disjoint_sets()
self._set_y_z()
@property
def z_cols(self):
"""
The instrumental variable(s).
"""
return self._z_cols
@z_cols.setter
def z_cols(self, value):
reset_value = hasattr(self, '_z_cols')
if value is not None:
if isinstance(value, str):
value = [value]
if not isinstance(value, list):
raise TypeError('The instrumental variable(s) z_cols must be of str or list type (or None). '
f'{str(value)} of type {str(type(value))} was passed.')
if not len(set(value)) == len(value):
raise ValueError('Invalid instrumental variable(s) z_cols: '
'Contains duplicate values.')
if not set(value).issubset(set(self.all_variables)):
raise ValueError('Invalid instrumental variable(s) z_cols. '
'At least one instrumental variable is no data column.')
self._z_cols = value
else:
self._z_cols = None
if reset_value:
self._check_disjoint_sets()
self._set_y_z()
@property
def use_other_treat_as_covariate(self):
"""
Indicates whether in the multiple-treatment case the other treatment variables should be added as covariates.
"""
return self._use_other_treat_as_covariate
@use_other_treat_as_covariate.setter
def use_other_treat_as_covariate(self, value):
reset_value = hasattr(self, '_use_other_treat_as_covariate')
if not isinstance(value, bool):
raise TypeError('use_other_treat_as_covariate must be True or False. '
f'Got {str(value)}.')
self._use_other_treat_as_covariate = value
if reset_value:
# by default, we initialize to the first treatment variable
self.set_x_d(self.d_cols[0])
@property
def force_all_x_finite(self):
"""
Indicates whether to raise an error on infinite values and / or missings in the covariates ``x``.
"""
return self._force_all_x_finite
@force_all_x_finite.setter
def force_all_x_finite(self, value):
reset_value = hasattr(self, '_force_all_x_finite')
if isinstance(value, str):
if value != 'allow-nan':
raise ValueError("Invalid force_all_x_finite " + value + ". " +
"force_all_x_finite must be True, False or 'allow-nan'.")
elif not isinstance(value, bool):
raise TypeError("Invalid force_all_x_finite. " +
"force_all_x_finite must be True, False or 'allow-nan'.")
self._force_all_x_finite = value
if reset_value:
# by default, we initialize to the first treatment variable
self.set_x_d(self.d_cols[0])
def _set_y_z(self):
assert_all_finite(self.data.loc[:, self.y_col])
self._y = self.data.loc[:, self.y_col]
if self.z_cols is None:
self._z = None
else:
assert_all_finite(self.data.loc[:, self.z_cols])
self._z = self.data.loc[:, self.z_cols]
def set_x_d(self, treatment_var):
"""
Function that assigns the role for the treatment variables in the multiple-treatment case.
Parameters
----------
treatment_var : str
Active treatment variable that will be set to d.
"""
if not isinstance(treatment_var, str):
raise TypeError('treatment_var must be of str type. '
f'{str(treatment_var)} of type {str(type(treatment_var))} was passed.')
if treatment_var not in self.d_cols:
raise ValueError('Invalid treatment_var. '
f'{treatment_var} is not in d_cols.')
if self.use_other_treat_as_covariate:
# note that the following line needs to be adapted in case an intersection of x_cols and d_cols as allowed
# (see https://github.com/DoubleML/doubleml-for-py/issues/83)
xd_list = self.x_cols + self.d_cols
xd_list.remove(treatment_var)
else:
xd_list = self.x_cols
assert_all_finite(self.data.loc[:, treatment_var])
if self.force_all_x_finite:
assert_all_finite(self.data.loc[:, xd_list],
allow_nan=self.force_all_x_finite == 'allow-nan')
self._d = self.data.loc[:, treatment_var]
self._X = self.data.loc[:, xd_list]
def _check_binary_treats(self):
is_binary = pd.Series(dtype=bool, index=self.d_cols)
for treatment_var in self.d_cols:
this_d = self.data.loc[:, treatment_var]
binary_treat = (type_of_target(this_d) == 'binary')
zero_one_treat = np.all((np.power(this_d, 2) - this_d) == 0)
is_binary[treatment_var] = (binary_treat & zero_one_treat)
return is_binary
def _check_disjoint_sets(self):
# this function can be extended in inherited subclasses
self._check_disjoint_sets_y_d_x_z()
def _check_disjoint_sets_y_d_x_z(self):
y_col_set = {self.y_col}
x_cols_set = set(self.x_cols)
d_cols_set = set(self.d_cols)
if not y_col_set.isdisjoint(x_cols_set):
raise ValueError(f'{str(self.y_col)} cannot be set as outcome variable ``y_col`` and covariate in '
'``x_cols``.')
if not y_col_set.isdisjoint(d_cols_set):
raise ValueError(f'{str(self.y_col)} cannot be set as outcome variable ``y_col`` and treatment variable in '
'``d_cols``.')
# note that the line xd_list = self.x_cols + self.d_cols in method set_x_d needs adaption if an intersection of
# x_cols and d_cols as allowed (see https://github.com/DoubleML/doubleml-for-py/issues/83)
if not d_cols_set.isdisjoint(x_cols_set):
raise ValueError('At least one variable/column is set as treatment variable (``d_cols``) and as covariate'
'(``x_cols``). Consider using parameter ``use_other_treat_as_covariate``.')
if self.z_cols is not None:
z_cols_set = set(self.z_cols)
if not y_col_set.isdisjoint(z_cols_set):
raise ValueError(f'{str(self.y_col)} cannot be set as outcome variable ``y_col`` and instrumental '
'variable in ``z_cols``.')
if not d_cols_set.isdisjoint(z_cols_set):
raise ValueError('At least one variable/column is set as treatment variable (``d_cols``) and '
'instrumental variable in ``z_cols``.')
if not x_cols_set.isdisjoint(z_cols_set):
raise ValueError('At least one variable/column is set as covariate (``x_cols``) and instrumental '
'variable in ``z_cols``.')
class DoubleMLClusterData(DoubleMLData):
"""Double machine learning data-backend for data with cluster variables.
:class:`DoubleMLClusterData` objects can be initialized from
:class:`pandas.DataFrame`'s as well as :class:`numpy.ndarray`'s.
Parameters
----------
data : :class:`pandas.DataFrame`
The data.
y_col : str
The outcome variable.
d_cols : str or list
The treatment variable(s).
cluster_cols : str or list
The cluster variable(s).
x_cols : None, str or list
The covariates.
If ``None``, all variables (columns of ``data``) which are neither specified as outcome variable ``y_col``, nor
treatment variables ``d_cols``, nor instrumental variables ``z_cols`` are used as covariates.
Default is ``None``.
z_cols : None, str or list
The instrumental variable(s).
Default is ``None``.
use_other_treat_as_covariate : bool
Indicates whether in the multiple-treatment case the other treatment variables should be added as covariates.
Default is ``True``.
force_all_x_finite : bool or str
Indicates whether to raise an error on infinite values and / or missings in the covariates ``x``.
Possible values are: ``True`` (neither missings ``np.nan``, ``pd.NA`` nor infinite values ``np.inf`` are
allowed), ``False`` (missings and infinite values are allowed), ``'allow-nan'`` (only missings are allowed).
Note that the choice ``False`` and ``'allow-nan'`` are only reasonable if the machine learning methods used
for the nuisance functions are capable to provide valid predictions with missings and / or infinite values
in the covariates ``x``.
Default is ``True``.
Examples
--------
>>> from doubleml import DoubleMLClusterData
>>> from doubleml.datasets import make_pliv_multiway_cluster_CKMS2021
>>> # initialization from pandas.DataFrame
>>> df = make_pliv_multiway_cluster_CKMS2021(return_type='DataFrame')
>>> obj_dml_data_from_df = DoubleMLClusterData(df, 'Y', 'D', ['cluster_var_i', 'cluster_var_j'], z_cols='Z')
>>> # initialization from np.ndarray
>>> (x, y, d, cluster_vars, z) = make_pliv_multiway_cluster_CKMS2021(return_type='array')
>>> obj_dml_data_from_array = DoubleMLClusterData.from_arrays(x, y, d, cluster_vars, z)
"""
def __init__(self,
data,
y_col,
d_cols,
cluster_cols,
x_cols=None,
z_cols=None,
use_other_treat_as_covariate=True,
force_all_x_finite=True):
# we need to set cluster_cols (needs _data) before call to the super __init__ because of the x_cols setter
if not isinstance(data, pd.DataFrame):
raise TypeError('data must be of pd.DataFrame type. '
f'{str(data)} of type {str(type(data))} was passed.')
if not data.columns.is_unique:
raise ValueError('Invalid pd.DataFrame: '
'Contains duplicate column names.')
self._data = data
self.cluster_cols = cluster_cols
self._set_cluster_vars()
super().__init__(data,
y_col,
d_cols,
x_cols,
z_cols,
use_other_treat_as_covariate,
force_all_x_finite)
self._check_disjoint_sets_cluster_cols()
def __str__(self):
data_info = f'Outcome variable: {self.y_col}\n' \
f'Treatment variable(s): {self.d_cols}\n' \
f'Cluster variable(s): {self.cluster_cols}\n' \
f'Covariates: {self.x_cols}\n' \
f'Instrument variable(s): {self.z_cols}\n' \
f'No. Observations: {self.n_obs}\n'
buf = io.StringIO()
self.data.info(verbose=False, buf=buf)
df_info = buf.getvalue()
res = '================== DoubleMLClusterData Object ==================\n' + \
'\n------------------ Data summary ------------------\n' + data_info + \
'\n------------------ DataFrame info ------------------\n' + df_info
return res
@classmethod
def from_arrays(cls, x, y, d, cluster_vars, z=None, use_other_treat_as_covariate=True,
force_all_x_finite=True):
"""
Initialize :class:`DoubleMLClusterData` from :class:`numpy.ndarray`'s.
Parameters
----------
x : :class:`numpy.ndarray`
Array of covariates.
y : :class:`numpy.ndarray`
Array of the outcome variable.
d : :class:`numpy.ndarray`
Array of treatment variables.
cluster_vars : :class:`numpy.ndarray`
Array of cluster variables.
z : None or :class:`numpy.ndarray`
Array of instrumental variables.
Default is ``None``.
use_other_treat_as_covariate : bool
Indicates whether in the multiple-treatment case the other treatment variables should be added as covariates.
Default is ``True``.
force_all_x_finite : bool or str
Indicates whether to raise an error on infinite values and / or missings in the covariates ``x``.
Possible values are: ``True`` (neither missings ``np.nan``, ``pd.NA`` nor infinite values ``np.inf`` are
allowed), ``False`` (missings and infinite values are allowed), ``'allow-nan'`` (only missings are allowed).
Note that the choice ``False`` and ``'allow-nan'`` are only reasonable if the machine learning methods used
for the nuisance functions are capable to provide valid predictions with missings and / or infinite values
in the covariates ``x``.
Default is ``True``.
Examples
--------
>>> from doubleml import DoubleMLClusterData
>>> from doubleml.datasets import make_pliv_multiway_cluster_CKMS2021
>>> (x, y, d, cluster_vars, z) = make_pliv_multiway_cluster_CKMS2021(return_type='array')
>>> obj_dml_data_from_array = DoubleMLClusterData.from_arrays(x, y, d, cluster_vars, z)
"""
dml_data = DoubleMLData.from_arrays(x, y, d, z, use_other_treat_as_covariate, force_all_x_finite)
cluster_vars = check_array(cluster_vars, ensure_2d=False, allow_nd=False)
cluster_vars = _assure_2d_array(cluster_vars)
if cluster_vars.shape[1] == 1:
cluster_cols = ['cluster_var']
else:
cluster_cols = [f'cluster_var{i + 1}' for i in np.arange(cluster_vars.shape[1])]
data = pd.concat((pd.DataFrame(cluster_vars, columns=cluster_cols), dml_data.data), axis=1)
return(cls(data, dml_data.y_col, dml_data.d_cols,
cluster_cols,
dml_data.x_cols, dml_data.z_cols,
dml_data.use_other_treat_as_covariate, dml_data.force_all_x_finite))
@property
def cluster_cols(self):
"""
The cluster variable(s).
"""
return self._cluster_cols
@cluster_cols.setter
def cluster_cols(self, value):
reset_value = hasattr(self, '_cluster_cols')
if isinstance(value, str):
value = [value]
if not isinstance(value, list):
raise TypeError('The cluster variable(s) cluster_cols must be of str or list type. '
f'{str(value)} of type {str(type(value))} was passed.')
if not len(set(value)) == len(value):
raise ValueError('Invalid cluster variable(s) cluster_cols: '
'Contains duplicate values.')
if not set(value).issubset(set(self.all_variables)):
raise ValueError('Invalid cluster variable(s) cluster_cols. '
'At least one cluster variable is no data column.')
self._cluster_cols = value
if reset_value:
self._check_disjoint_sets()
self._set_cluster_vars()
@property
def n_cluster_vars(self):
"""
The number of cluster variables.
"""
return len(self.cluster_cols)
@property
def cluster_vars(self):
"""
Array of cluster variable(s).
"""
return self._cluster_vars.values
@DoubleMLData.x_cols.setter
def x_cols(self, value):
if value is not None:
# this call might become much easier with https://github.com/python/cpython/pull/26194
super(self.__class__, self.__class__).x_cols.__set__(self, value)
else:
if self.z_cols is not None:
y_d_z = set.union({self.y_col}, set(self.d_cols), set(self.z_cols), set(self.cluster_cols))
x_cols = [col for col in self.data.columns if col not in y_d_z]
else:
y_d = set.union({self.y_col}, set(self.d_cols), set(self.cluster_cols))
x_cols = [col for col in self.data.columns if col not in y_d]
# this call might become much easier with https://github.com/python/cpython/pull/26194
super(self.__class__, self.__class__).x_cols.__set__(self, x_cols)
def _check_disjoint_sets(self):
# apply the standard checks from the DoubleMLData class
super(DoubleMLClusterData, self)._check_disjoint_sets()
self._check_disjoint_sets_cluster_cols()
def _check_disjoint_sets_cluster_cols(self):
# apply the standard checks from the DoubleMLData class
super(DoubleMLClusterData, self)._check_disjoint_sets()
# special checks for the additional cluster variables
cluster_cols_set = set(self.cluster_cols)
y_col_set = {self.y_col}
x_cols_set = set(self.x_cols)
d_cols_set = set(self.d_cols)
if not y_col_set.isdisjoint(cluster_cols_set):
raise ValueError(f'{str(self.y_col)} cannot be set as outcome variable ``y_col`` and cluster '
'variable in ``cluster_cols``.')
if not d_cols_set.isdisjoint(cluster_cols_set):
raise ValueError('At least one variable/column is set as treatment variable (``d_cols``) and '
'cluster variable in ``cluster_cols``.')
# TODO: Is the following combination allowed, or not?
if not x_cols_set.isdisjoint(cluster_cols_set):
raise ValueError('At least one variable/column is set as covariate (``x_cols``) and cluster '
'variable in ``cluster_cols``.')
if self.z_cols is not None:
z_cols_set = set(self.z_cols)
if not z_cols_set.isdisjoint(cluster_cols_set):
raise ValueError('At least one variable/column is set as instrumental variable (``z_cols``) and '
'cluster variable in ``cluster_cols``.')
def _set_cluster_vars(self):
assert_all_finite(self.data.loc[:, self.cluster_cols])
self._cluster_vars = self.data.loc[:, self.cluster_cols]
| [
"pandas.Series",
"sklearn.utils.validation.check_array",
"numpy.power",
"numpy.column_stack",
"sklearn.utils.validation.check_consistent_length",
"sklearn.utils.multiclass.type_of_target",
"sklearn.utils.validation.column_or_1d",
"pandas.DataFrame",
"io.StringIO",
"sklearn.utils.assert_all_finite"... | [((3828, 3841), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (3839, 3841), False, 'import io\n'), ((6500, 6589), 'sklearn.utils.validation.check_array', 'check_array', (['x'], {'ensure_2d': '(False)', 'allow_nd': '(False)', 'force_all_finite': 'force_all_x_finite'}), '(x, ensure_2d=False, allow_nd=False, force_all_finite=\n force_all_x_finite)\n', (6511, 6589), False, 'from sklearn.utils.validation import check_array, column_or_1d, check_consistent_length\n'), ((6621, 6668), 'sklearn.utils.validation.check_array', 'check_array', (['d'], {'ensure_2d': '(False)', 'allow_nd': '(False)'}), '(d, ensure_2d=False, allow_nd=False)\n', (6632, 6668), False, 'from sklearn.utils.validation import check_array, column_or_1d, check_consistent_length\n'), ((6681, 6707), 'sklearn.utils.validation.column_or_1d', 'column_or_1d', (['y'], {'warn': '(True)'}), '(y, warn=True)\n', (6693, 6707), False, 'from sklearn.utils.validation import check_array, column_or_1d, check_consistent_length\n'), ((16162, 16209), 'sklearn.utils.assert_all_finite', 'assert_all_finite', (['self.data.loc[:, self.y_col]'], {}), '(self.data.loc[:, self.y_col])\n', (16179, 16209), False, 'from sklearn.utils import assert_all_finite\n'), ((17498, 17548), 'sklearn.utils.assert_all_finite', 'assert_all_finite', (['self.data.loc[:, treatment_var]'], {}), '(self.data.loc[:, treatment_var])\n', (17515, 17548), False, 'from sklearn.utils import assert_all_finite\n'), ((17873, 17913), 'pandas.Series', 'pd.Series', ([], {'dtype': 'bool', 'index': 'self.d_cols'}), '(dtype=bool, index=self.d_cols)\n', (17882, 17913), True, 'import pandas as pd\n'), ((24185, 24198), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (24196, 24198), False, 'import io\n'), ((26682, 26740), 'sklearn.utils.validation.check_array', 'check_array', (['cluster_vars'], {'ensure_2d': '(False)', 'allow_nd': '(False)'}), '(cluster_vars, ensure_2d=False, allow_nd=False)\n', (26693, 26740), False, 'from sklearn.utils.validation import check_array, column_or_1d, check_consistent_length\n'), ((31274, 31328), 'sklearn.utils.assert_all_finite', 'assert_all_finite', (['self.data.loc[:, self.cluster_cols]'], {}), '(self.data.loc[:, self.cluster_cols])\n', (31291, 31328), False, 'from sklearn.utils import assert_all_finite\n'), ((6828, 6860), 'sklearn.utils.validation.check_consistent_length', 'check_consistent_length', (['x', 'y', 'd'], {}), '(x, y, d)\n', (6851, 6860), False, 'from sklearn.utils.validation import check_array, column_or_1d, check_consistent_length\n'), ((6917, 6964), 'sklearn.utils.validation.check_array', 'check_array', (['z'], {'ensure_2d': '(False)', 'allow_nd': '(False)'}), '(z, ensure_2d=False, allow_nd=False)\n', (6928, 6964), False, 'from sklearn.utils.validation import check_array, column_or_1d, check_consistent_length\n'), ((7013, 7048), 'sklearn.utils.validation.check_consistent_length', 'check_consistent_length', (['x', 'y', 'd', 'z'], {}), '(x, y, d, z)\n', (7036, 7048), False, 'from sklearn.utils.validation import check_array, column_or_1d, check_consistent_length\n'), ((16342, 16390), 'sklearn.utils.assert_all_finite', 'assert_all_finite', (['self.data.loc[:, self.z_cols]'], {}), '(self.data.loc[:, self.z_cols])\n', (16359, 16390), False, 'from sklearn.utils import assert_all_finite\n'), ((17597, 17696), 'sklearn.utils.assert_all_finite', 'assert_all_finite', (['self.data.loc[:, xd_list]'], {'allow_nan': "(self.force_all_x_finite == 'allow-nan')"}), "(self.data.loc[:, xd_list], allow_nan=self.\n force_all_x_finite == 'allow-nan')\n", (17614, 17696), False, 'from sklearn.utils import assert_all_finite\n'), ((7372, 7393), 'numpy.arange', 'np.arange', (['x.shape[1]'], {}), '(x.shape[1])\n', (7381, 7393), True, 'import numpy as np\n'), ((7450, 7476), 'numpy.column_stack', 'np.column_stack', (['(x, y, d)'], {}), '((x, y, d))\n', (7465, 7476), True, 'import numpy as np\n'), ((7591, 7620), 'numpy.column_stack', 'np.column_stack', (['(x, y, d, z)'], {}), '((x, y, d, z))\n', (7606, 7620), True, 'import numpy as np\n'), ((18037, 18059), 'sklearn.utils.multiclass.type_of_target', 'type_of_target', (['this_d'], {}), '(this_d)\n', (18051, 18059), False, 'from sklearn.utils.multiclass import type_of_target\n'), ((27011, 27059), 'pandas.DataFrame', 'pd.DataFrame', (['cluster_vars'], {'columns': 'cluster_cols'}), '(cluster_vars, columns=cluster_cols)\n', (27023, 27059), True, 'import pandas as pd\n'), ((7311, 7332), 'numpy.arange', 'np.arange', (['d.shape[1]'], {}), '(d.shape[1])\n', (7320, 7332), True, 'import numpy as np\n'), ((26950, 26982), 'numpy.arange', 'np.arange', (['cluster_vars.shape[1]'], {}), '(cluster_vars.shape[1])\n', (26959, 26982), True, 'import numpy as np\n'), ((7177, 7198), 'numpy.arange', 'np.arange', (['z.shape[1]'], {}), '(z.shape[1])\n', (7186, 7198), True, 'import numpy as np\n'), ((18110, 18129), 'numpy.power', 'np.power', (['this_d', '(2)'], {}), '(this_d, 2)\n', (18118, 18129), True, 'import numpy as np\n')] |
from tlxzoo.datasets import DataLoaders
from tlxzoo.module.wav2vec2 import Wav2Vec2Transform
from tlxzoo.speech.automatic_speech_recognition import AutomaticSpeechRecognition
import tensorlayerx as tlx
import numpy as np
if __name__ == '__main__':
transform = Wav2Vec2Transform(vocab_file="./demo/speech/automatic_speech_recognition/wav2vec/vocab.json")
model = AutomaticSpeechRecognition(backbone="wav2vec")
model.load_weights("./demo/speech/automatic_speech_recognition/wav2vec/model.npz")
import soundfile as sf
file = "./demo/speech/automatic_speech_recognition/wav2vec/1272-128104-0000.flac"
speech, _ = sf.read(file)
input_values, input_ids = transform(speech, "")
mask = np.ones(input_values.shape[0], dtype=np.int32)
input_values = tlx.convert_to_tensor([input_values])
pixel_mask = tlx.convert_to_tensor([mask])
logits = model(inputs=input_values, pixel_mask=pixel_mask)
predicted_ids = tlx.argmax(logits, axis=-1)
transcription = transform.ids_to_string(predicted_ids[0])
print(transcription) | [
"tlxzoo.module.wav2vec2.Wav2Vec2Transform",
"tensorlayerx.argmax",
"numpy.ones",
"tlxzoo.speech.automatic_speech_recognition.AutomaticSpeechRecognition",
"tensorlayerx.convert_to_tensor",
"soundfile.read"
] | [((266, 364), 'tlxzoo.module.wav2vec2.Wav2Vec2Transform', 'Wav2Vec2Transform', ([], {'vocab_file': '"""./demo/speech/automatic_speech_recognition/wav2vec/vocab.json"""'}), "(vocab_file=\n './demo/speech/automatic_speech_recognition/wav2vec/vocab.json')\n", (283, 364), False, 'from tlxzoo.module.wav2vec2 import Wav2Vec2Transform\n'), ((373, 419), 'tlxzoo.speech.automatic_speech_recognition.AutomaticSpeechRecognition', 'AutomaticSpeechRecognition', ([], {'backbone': '"""wav2vec"""'}), "(backbone='wav2vec')\n", (399, 419), False, 'from tlxzoo.speech.automatic_speech_recognition import AutomaticSpeechRecognition\n'), ((638, 651), 'soundfile.read', 'sf.read', (['file'], {}), '(file)\n', (645, 651), True, 'import soundfile as sf\n'), ((716, 762), 'numpy.ones', 'np.ones', (['input_values.shape[0]'], {'dtype': 'np.int32'}), '(input_values.shape[0], dtype=np.int32)\n', (723, 762), True, 'import numpy as np\n'), ((783, 820), 'tensorlayerx.convert_to_tensor', 'tlx.convert_to_tensor', (['[input_values]'], {}), '([input_values])\n', (804, 820), True, 'import tensorlayerx as tlx\n'), ((838, 867), 'tensorlayerx.convert_to_tensor', 'tlx.convert_to_tensor', (['[mask]'], {}), '([mask])\n', (859, 867), True, 'import tensorlayerx as tlx\n'), ((952, 979), 'tensorlayerx.argmax', 'tlx.argmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (962, 979), True, 'import tensorlayerx as tlx\n')] |
import io
import json
import os.path
from typing import Any, BinaryIO, Callable, Dict, List, Optional
import numpy as np
import pandas as pd
import zstandard
from databento.common.data import BIN_COLUMNS, BIN_RECORD_MAP, DERIV_SCHEMAS
from databento.common.enums import Compression, Encoding, Schema
class Bento:
"""The abstract base class for all Bento I/O classes."""
def __init__(
self,
schema: Optional[Schema],
encoding: Optional[Encoding],
compression: Optional[Compression],
):
# Set compression
self._compression = compression or self._infer_compression()
# Set encoding
self._encoding = encoding or self._infer_encoding()
# Set schema
self._schema = schema or self._infer_schema()
self._struct_fmt = np.dtype(BIN_RECORD_MAP[self._schema])
self._struct_size = self._struct_fmt.itemsize
@property
def schema(self) -> Schema:
"""
Return the output schema.
Returns
-------
Schema
"""
return self._schema
@property
def encoding(self) -> Encoding:
"""
Return the output encoding.
Returns
-------
Encoding
"""
return self._encoding
@property
def compression(self) -> Compression:
"""
Return the output compression.
Returns
-------
Compression
"""
return self._compression
@property
def struct_fmt(self) -> np.dtype:
"""
Return the binary struct format for the schema.
Returns
-------
np.dtype
"""
return self._struct_fmt
@property
def struct_size(self) -> int:
"""
Return the schemas binary struct size in bytes.
Returns
-------
int
"""
return self._struct_size
@property
def nbytes(self) -> int:
raise NotImplementedError() # pragma: no cover
@property
def raw(self) -> bytes:
"""
Return the raw data from the I/O stream.
Returns
-------
bytes
"""
raise NotImplementedError() # pragma: no cover
def reader(self, decompress: bool = False) -> BinaryIO:
"""
Return an I/O reader for the data.
Parameters
----------
decompress : bool
If data should be decompressed (if compressed).
Returns
-------
BinaryIO
"""
raise NotImplementedError() # pragma: no cover
def writer(self) -> BinaryIO:
"""
Return a raw I/O writer for the data.
Returns
-------
BinaryIO
"""
raise NotImplementedError() # pragma: no cover
def to_file(self, path: str) -> "FileBento":
"""
Write the data to a file at the given path.
Parameters
----------
path : str
The path to write to.
Returns
-------
FileBento
"""
with open(path, mode="wb") as f:
f.write(self.raw)
return FileBento(
path=path,
schema=self._schema,
encoding=self._encoding,
compression=self._compression,
)
def to_list(self) -> List[Any]:
"""
Return the data as a list records.
- BIN encoding will return a list of `np.void` mixed dtypes.
- CSV encoding will return a list of `str`.
- JSON encoding will return a list of `Dict[str, Any]`.
Returns
-------
List[Any]
"""
if self._encoding == Encoding.BIN:
return self._prepare_list_bin()
elif self._encoding == Encoding.CSV:
return self._prepare_list_csv()
elif self._encoding == Encoding.JSON:
return self._prepare_list_json()
else: # pragma: no cover (design-time error)
raise ValueError(f"invalid encoding, was {self._encoding.value}")
def to_df(self, pretty_ts: bool = False, pretty_px: bool = False) -> pd.DataFrame:
"""
Return the data as a pd.DataFrame.
Parameters
----------
pretty_ts : bool, default False
If the type of any timestamp columns should be converted from UNIX
nanosecond `int` to `pd.Timestamp` (UTC).
pretty_px : bool, default False
If the type of any price columns should be converted from `int` to
`float` at the correct scale (using the fixed precision scalar 1e-9).
Returns
-------
pd.DataFrame
"""
if self._encoding == Encoding.BIN:
df: pd.DataFrame = self._prepare_df_bin()
elif self._encoding == Encoding.CSV:
df = self._prepare_df_csv()
elif self._encoding == Encoding.JSON:
df = self._prepare_df_json()
else: # pragma: no cover (design-time error)
raise ValueError(f"invalid encoding, was {self._encoding.value}")
if pretty_ts:
df.index = pd.to_datetime(df.index, utc=True)
for column in list(df.columns):
if column.startswith("ts_"):
df[column] = pd.to_datetime(df[column], utc=True)
if pretty_px:
for column in list(df.columns):
if (
column in ("price", "open", "high", "low", "close")
or column.startswith("bid_px") # MBP
or column.startswith("ask_px") # MBP
):
df[column] = df[column] * 1e-9
return df
def replay(self, callback: Callable[[Any], None]) -> None:
"""
Pass all data records sequentially to the given callback.
Parameters
----------
callback : callable
The callback to the data handler.
"""
if self._encoding == Encoding.BIN:
self._replay_bin(callback)
elif self._encoding in (Encoding.CSV, Encoding.JSON):
self._replay_csv_or_json(callback)
else: # pragma: no cover (design-time error)
raise ValueError(f"invalid encoding, was {self._encoding.value}")
def _should_decompress(self, decompress: bool) -> bool:
if not decompress:
return False
return self._compression == Compression.ZSTD
def _infer_compression(self) -> Optional[Compression]:
# Infer by checking for zstd header
reader: BinaryIO = self.reader()
header: bytes = reader.read(4)
if header is None:
return None
elif header == b"(\xb5/\xfd":
return Compression.ZSTD
else:
return Compression.NONE
def _infer_encoding(self) -> Optional[Encoding]:
# Infer by checking pattern of initial bytes
reader: BinaryIO = self.reader(decompress=True)
initial: bytes = reader.read(3)
if initial is None:
return None
if initial == b"ts_":
return Encoding.CSV
elif initial == b'{"t':
return Encoding.JSON
else:
return Encoding.BIN
def _infer_schema(self) -> Optional[Schema]:
if hasattr(self, "path"):
path = self.path # type: ignore # (checked above)
else: # pragma: no cover (design-time error)
raise RuntimeError("cannot infer schema without a path to read from")
# Firstly, attempt to infer from file path
extensions: List[str] = path.split(".")
# Iterate schemas in reverse order as MBP-10 needs to be checked prior to MBP-1
for schema in reversed([x for x in Schema]):
if schema.value in extensions:
return schema
raise RuntimeError(
f"unable to infer schema from `path` '{path}' "
f"(add the schema value as an extension e.g. 'my_data.mbo', "
f"or specify the schema explicitly)",
)
def _get_index_column(self) -> str:
return (
"ts_recv"
if self._schema
not in (
Schema.OHLCV_1S,
Schema.OHLCV_1M,
Schema.OHLCV_1H,
Schema.OHLCV_1D,
)
else "ts_event"
)
def _prepare_list_bin(self) -> List[np.void]:
data: bytes = self.reader(decompress=True).read()
return np.frombuffer(data, dtype=BIN_RECORD_MAP[self._schema])
def _prepare_list_csv(self) -> List[str]:
data: bytes = self.reader(decompress=True).read()
return data.decode().splitlines(keepends=False)
def _prepare_list_json(self) -> List[Dict]:
lines: List[str] = self._prepare_list_csv()
return list(map(json.loads, lines))
def _prepare_df_bin(self) -> pd.DataFrame:
df = pd.DataFrame(self.to_list())
df.set_index(self._get_index_column(), inplace=True)
# Cleanup dataframe
if self._schema == Schema.MBO:
df.drop("chan_id", axis=1, inplace=True)
df = df.reindex(columns=BIN_COLUMNS[self._schema])
df["flags"] = df["flags"] & 0xFF # Apply bitmask
df["side"] = df["side"].str.decode("utf-8")
df["action"] = df["action"].str.decode("utf-8")
elif self._schema in DERIV_SCHEMAS:
df.drop(["nwords", "type", "depth"], axis=1, inplace=True)
df = df.reindex(columns=BIN_COLUMNS[self._schema])
df["flags"] = df["flags"] & 0xFF # Apply bitmask
df["side"] = df["side"].str.decode("utf-8")
df["action"] = df["action"].str.decode("utf-8")
else:
df.drop(["nwords", "type"], axis=1, inplace=True)
return df
def _prepare_df_csv(self) -> pd.DataFrame:
data: bytes = self.reader(decompress=True).read()
df = pd.read_csv(io.BytesIO(data), index_col=self._get_index_column())
return df
def _prepare_df_json(self) -> pd.DataFrame:
jsons: List[Dict] = self.to_list()
df = pd.DataFrame.from_dict(jsons, orient="columns")
df.set_index(self._get_index_column(), inplace=True)
return df
def _replay_bin(self, callback: Callable[[Any], None]) -> None:
dtype = BIN_RECORD_MAP[self._schema]
reader: BinaryIO = self.reader(decompress=True)
while True:
raw: bytes = reader.read(self.struct_size)
record = np.frombuffer(raw, dtype=dtype)
if record.size == 0:
break
callback(record[0])
def _replay_csv_or_json(self, callback: Callable[[Any], None]) -> None:
if self._compression == Compression.NONE:
reader: BinaryIO = self.reader(decompress=True)
while True:
record: bytes = reader.readline()
if not record:
break
callback(record.decode().rstrip("\n"))
else:
for record in self.to_list():
callback(record)
class MemoryBento(Bento):
"""
Provides a data container backed by in-memory buffer streaming I/O.
Parameters
----------
schema : Schema
The data record schema.
encoding : Encoding, optional
The data encoding. If ``None`` then will be inferred.
compression : Compression, optional
The data compression mode. If ``None`` then will be inferred.
initial_bytes : bytes, optional
The initial data for the memory buffer.
"""
def __init__(
self,
schema: Schema,
encoding: Optional[Encoding] = None,
compression: Optional[Compression] = None,
initial_bytes: Optional[bytes] = None,
):
self._raw = io.BytesIO(initial_bytes=initial_bytes)
super().__init__(
schema=schema,
encoding=encoding,
compression=compression,
)
@property
def nbytes(self) -> int:
"""
Return the amount of space in bytes that the data array would use in a
contiguous representation.
Returns
-------
int
"""
return self._raw.getbuffer().nbytes
@property
def raw(self) -> bytes:
return self._raw.getvalue()
def reader(self, decompress: bool = False) -> BinaryIO:
self._raw.seek(0) # Ensure reader at start of stream
if self._should_decompress(decompress):
return zstandard.ZstdDecompressor().stream_reader(self._raw.getbuffer())
else:
return self._raw
def writer(self) -> BinaryIO:
return self._raw
class FileBento(Bento):
"""
Provides a data container backed by file streaming I/O.
Parameters
----------
path : str
The path to the data file.
schema : Schema, optional
The data record schema. If ``None`` then will be inferred.
encoding : Encoding, optional
The data encoding. If ``None`` then will be inferred.
compression : Compression, optional
The data compression mode. If ``None`` then will be inferred.
"""
def __init__(
self,
path: str,
schema: Optional[Schema] = None,
encoding: Optional[Encoding] = None,
compression: Optional[Compression] = None,
):
self._path = path
super().__init__(
schema=schema,
encoding=encoding,
compression=compression,
)
@property
def path(self) -> str:
"""
Return the path to the backing data file.
Returns
-------
str
"""
return self._path
@property
def nbytes(self) -> int:
"""
Return the amount of space occupied by the data.
Returns
-------
int
"""
return os.path.getsize(self._path)
@property
def raw(self) -> bytes:
return self.reader().read()
def reader(self, decompress: bool = False) -> BinaryIO:
f = open(self._path, mode="rb")
if self._should_decompress(decompress):
return zstandard.ZstdDecompressor().stream_reader(f)
else:
return f
def writer(self) -> BinaryIO:
return open(self._path, mode="wb")
| [
"io.BytesIO",
"pandas.DataFrame.from_dict",
"numpy.frombuffer",
"numpy.dtype",
"zstandard.ZstdDecompressor",
"pandas.to_datetime"
] | [((815, 853), 'numpy.dtype', 'np.dtype', (['BIN_RECORD_MAP[self._schema]'], {}), '(BIN_RECORD_MAP[self._schema])\n', (823, 853), True, 'import numpy as np\n'), ((8491, 8546), 'numpy.frombuffer', 'np.frombuffer', (['data'], {'dtype': 'BIN_RECORD_MAP[self._schema]'}), '(data, dtype=BIN_RECORD_MAP[self._schema])\n', (8504, 8546), True, 'import numpy as np\n'), ((10124, 10171), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['jsons'], {'orient': '"""columns"""'}), "(jsons, orient='columns')\n", (10146, 10171), True, 'import pandas as pd\n'), ((11814, 11853), 'io.BytesIO', 'io.BytesIO', ([], {'initial_bytes': 'initial_bytes'}), '(initial_bytes=initial_bytes)\n', (11824, 11853), False, 'import io\n'), ((5130, 5164), 'pandas.to_datetime', 'pd.to_datetime', (['df.index'], {'utc': '(True)'}), '(df.index, utc=True)\n', (5144, 5164), True, 'import pandas as pd\n'), ((9947, 9963), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (9957, 9963), False, 'import io\n'), ((10517, 10548), 'numpy.frombuffer', 'np.frombuffer', (['raw'], {'dtype': 'dtype'}), '(raw, dtype=dtype)\n', (10530, 10548), True, 'import numpy as np\n'), ((5287, 5323), 'pandas.to_datetime', 'pd.to_datetime', (['df[column]'], {'utc': '(True)'}), '(df[column], utc=True)\n', (5301, 5323), True, 'import pandas as pd\n'), ((12526, 12554), 'zstandard.ZstdDecompressor', 'zstandard.ZstdDecompressor', ([], {}), '()\n', (12552, 12554), False, 'import zstandard\n'), ((14183, 14211), 'zstandard.ZstdDecompressor', 'zstandard.ZstdDecompressor', ([], {}), '()\n', (14209, 14211), False, 'import zstandard\n')] |
import torch
from collections import OrderedDict
from torch.nn import utils, functional as F
from torch.optim import Adam, SGD
from torch.autograd import Variable
from torch.backends import cudnn
from model import build_model, weights_init
import scipy.misc as sm
import numpy as np
import os
from utils_bicon import *
import torchvision.utils as vutils
import cv2
from torch.optim import lr_scheduler
import torch.nn.functional as F
import math
from connect_loss import bicon_loss
import time
import sys
import PIL.Image
import scipy.io
import os
import logging
from apex import amp
EPSILON = 1e-8
p = OrderedDict()
from dataset import get_loader
base_model_cfg = 'resnet'
p['lr_bone'] = 2e-5 # Learning rate resnet:5e-5, vgg:2e-5
p['lr_branch'] = 0.025 # Learning rate
p['wd'] = 0.0005 # Weight decay
p['momentum'] = 0.90 # Momentum
lr_decay_epoch = [10,25] # [6, 9], now x3 #15
nAveGrad = 10 # Update the weights once in 'nAveGrad' forward passes
showEvery = 50
tmp_path = 'tmp_see'
save = 'save'
if not os.path.exists(save):
os.makedirs(save)
class Solver(object):
def __init__(self, train_loader, test_loader, config, save_fold=None):
self.train_loader = train_loader
self.test_loader = test_loader
self.config = config
self.loss = bicon_loss()
self.save_fold = save_fold
self.mean = torch.Tensor([123.68, 116.779, 103.939]).view(3, 1, 1) / 255.
# inference: choose the side map (see paper)
if config.visdom:
self.visual = Viz_visdom("trueUnify", 1)
self.build_model()
if self.config.pre_trained: self.net.load_state_dict(torch.load(self.config.pre_trained))
if config.mode == 'train':
self.log_output = open("%s/logs/log.txt" % config.save_fold, 'w')
else:
print('Loading pre-trained model from %s...' % self.config.model)
self.net_bone.load_state_dict(torch.load(self.config.model))
self.net_bone.eval()
def print_network(self, model, name):
num_params = 0
for p in model.parameters():
num_params += p.numel()
print(name)
print(model)
print("The number of parameters: {}".format(num_params))
def get_params(self, base_lr):
ml = []
for name, module in self.net_bone.named_children():
print(name)
if name == 'loss_weight':
ml.append({'params': module.parameters(), 'lr': p['lr_branch']})
else:
ml.append({'params': module.parameters()})
return ml
# build the network
def build_model(self):
self.net_bone = build_model(base_model_cfg)
if self.config.cuda:
self.net_bone = self.net_bone.cuda()
self.net_bone.eval() # use_global_stats = True
self.net_bone.apply(weights_init)
if self.config.mode == 'train':
if self.config.load_bone == '':
if base_model_cfg == 'vgg':
self.net_bone.base.load_pretrained_model(torch.load(self.config.vgg))
elif base_model_cfg == 'resnet':
self.net_bone.base.load_state_dict(torch.load(self.config.resnet))
if self.config.load_bone != '': self.net_bone.load_state_dict(torch.load(self.config.load_bone))
self.lr_bone = p['lr_bone']
self.lr_branch = p['lr_branch']
self.optimizer_bone = Adam(filter(lambda p: p.requires_grad, self.net_bone.parameters()), lr=self.lr_bone, weight_decay=p['wd'])
self.print_network(self.net_bone, 'trueUnify bone part')
# update the learning rate
def update_lr(self, rate):
for param_group in self.optimizer.param_groups:
param_group['lr'] = param_group['lr'] * rate
def Eval_mae(self,pred,gt):
avg_mae, img_num = 0.0, 0.0
#mae_list = [] # for debug
with torch.no_grad():
mea = torch.abs(pred - gt).mean()
return mea.item()
def test(self,epoch, test_mode=0):
mae_ls_binary = []
fmax_ls_binary = []
for batch_id, data_batch in enumerate(self.test_loader):
images_,y_test, name, im_size = data_batch['image'],data_batch['label'], data_batch['name'][0], np.asarray(data_batch['size'])
# print(i)
with torch.no_grad():
images = Variable(images_)
if self.config.cuda:
images = images.cuda()
y_test = y_test.cuda()
# print(images.size())
time_start = time.time()
_, _, up_sal_f = self.net_bone(images)
output_test = up_sal_f[-1].cuda()
torch.cuda.synchronize()
time_end = time.time()
pred=bv_test(output_test)
# print(pred.shape, y_test.shape)
mae_bi = self.Eval_mae(pred[0][0],y_test[0][0])
mae_ls_binary.append(mae_bi)
if batch_id%100 == 0:
print('test [Iteration : ' + str(batch_id) + '/' + str(len(self.test_loader)) + ']' ' ave mae f:%.3f' %(
np.mean(mae_ls_binary)))
with open('save/results.csv', 'a') as f:
f.write('%03d,%0.5f \n' % (
(epoch + 1),
np.mean(mae_ls_binary)))
return(np.mean(mae_ls_binary))
# training phase
def train(self):
scheduler = lr_scheduler.MultiStepLR(self.optimizer_bone, milestones=[15,25],
gamma=0.1)
iter_num = len(self.train_loader.dataset) // self.config.batch_size
aveGrad = 0
F_v = 0
best_mae = 1
csv = 'results.csv'
with open(os.path.join('save', csv), 'w') as f:
f.write('epoch, mae_binary\n')
self.net_bone, self.optimizer_bone = amp.initialize(self.net_bone, self.optimizer_bone, opt_level='O2')
if not os.path.exists(tmp_path):
os.mkdir(tmp_path)
self.test(0)
for epoch in range(self.config.epoch):
r_edge_loss, r_sal_loss, r_sum_loss= 0,0,0
self.net_bone.zero_grad()
for i, data_batch in enumerate(self.train_loader):
sal_image, sal_label, sal_edge, conn = data_batch['sal_image'], data_batch['sal_label'], data_batch['sal_edge'], data_batch['sal_conn']
if sal_image.size()[2:] != sal_label.size()[2:]:
print("Skip this batch")
continue
sal_image, sal_label, sal_edge, conn = Variable(sal_image), Variable(sal_label), Variable(sal_edge), Variable(conn)
if self.config.cuda:
sal_image, sal_label, sal_edge, conn = sal_image.cuda(), sal_label.cuda(), sal_edge.cuda(), conn.cuda()
up_edge, up_sal, up_sal_f = self.net_bone(sal_image)
loss_iter, r_edge_loss, r_sal_loss, r_sum_loss = self.loss(up_edge, up_sal, up_sal_f, sal_label,sal_edge, conn,None,True, False)
loss = loss_iter / (nAveGrad * self.config.batch_size)
with amp.scale_loss(loss, self.optimizer_bone) as scale_loss:
scale_loss.backward()
aveGrad += 1
if aveGrad % nAveGrad == 0:
self.optimizer_bone.step()
self.optimizer_bone.zero_grad()
aveGrad = 0
if i % showEvery == 0:
print('epoch: [%2d/%2d], iter: [%5d/%5d] || Edge : %10.4f || Sal : %10.4f || Sum : %10.4f' % (
epoch, self.config.epoch, i, iter_num, r_edge_loss*(nAveGrad * self.config.batch_size)/showEvery,
r_sal_loss*(nAveGrad * self.config.batch_size)/showEvery,
r_sum_loss*(nAveGrad * self.config.batch_size)/showEvery))
print('Learning rate: ' + str(self.lr_bone))
r_edge_loss, r_sal_loss, r_sum_loss= 0,0,0
current_mae = self.test(epoch)
if current_mae < best_mae:
best_mae = current_mae
torch.save(self.net_bone.state_dict(), '%s/models/best_moel.pth' % (self.config.save_fold))
if (epoch + 1) % self.config.epoch_save == 0:
torch.save(self.net_bone.state_dict(), '%s/models/epoch_%d_bone.pth' % (self.config.save_fold, epoch + 1))
scheduler.step()
torch.save(self.net_bone.state_dict(), '%s/models/final_bone.pth' % self.config.save_fold)
| [
"apex.amp.scale_loss",
"torch.optim.lr_scheduler.MultiStepLR",
"torch.cuda.synchronize",
"apex.amp.initialize",
"os.path.exists",
"numpy.mean",
"numpy.asarray",
"os.mkdir",
"torch.autograd.Variable",
"collections.OrderedDict",
"torch.abs",
"torch.Tensor",
"model.build_model",
"time.time",
... | [((603, 616), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (614, 616), False, 'from collections import OrderedDict\n'), ((1014, 1034), 'os.path.exists', 'os.path.exists', (['save'], {}), '(save)\n', (1028, 1034), False, 'import os\n'), ((1040, 1057), 'os.makedirs', 'os.makedirs', (['save'], {}), '(save)\n', (1051, 1057), False, 'import os\n'), ((1285, 1297), 'connect_loss.bicon_loss', 'bicon_loss', ([], {}), '()\n', (1295, 1297), False, 'from connect_loss import bicon_loss\n'), ((2665, 2692), 'model.build_model', 'build_model', (['base_model_cfg'], {}), '(base_model_cfg)\n', (2676, 2692), False, 'from model import build_model, weights_init\n'), ((5433, 5455), 'numpy.mean', 'np.mean', (['mae_ls_binary'], {}), '(mae_ls_binary)\n', (5440, 5455), True, 'import numpy as np\n'), ((5525, 5602), 'torch.optim.lr_scheduler.MultiStepLR', 'lr_scheduler.MultiStepLR', (['self.optimizer_bone'], {'milestones': '[15, 25]', 'gamma': '(0.1)'}), '(self.optimizer_bone, milestones=[15, 25], gamma=0.1)\n', (5549, 5602), False, 'from torch.optim import lr_scheduler\n'), ((5949, 6015), 'apex.amp.initialize', 'amp.initialize', (['self.net_bone', 'self.optimizer_bone'], {'opt_level': '"""O2"""'}), "(self.net_bone, self.optimizer_bone, opt_level='O2')\n", (5963, 6015), False, 'from apex import amp\n'), ((3918, 3933), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3931, 3933), False, 'import torch\n'), ((6031, 6055), 'os.path.exists', 'os.path.exists', (['tmp_path'], {}), '(tmp_path)\n', (6045, 6055), False, 'import os\n'), ((6070, 6088), 'os.mkdir', 'os.mkdir', (['tmp_path'], {}), '(tmp_path)\n', (6078, 6088), False, 'import os\n'), ((1636, 1671), 'torch.load', 'torch.load', (['self.config.pre_trained'], {}), '(self.config.pre_trained)\n', (1646, 1671), False, 'import torch\n'), ((1920, 1949), 'torch.load', 'torch.load', (['self.config.model'], {}), '(self.config.model)\n', (1930, 1949), False, 'import torch\n'), ((4288, 4318), 'numpy.asarray', 'np.asarray', (["data_batch['size']"], {}), "(data_batch['size'])\n", (4298, 4318), True, 'import numpy as np\n'), ((4359, 4374), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4372, 4374), False, 'import torch\n'), ((4418, 4435), 'torch.autograd.Variable', 'Variable', (['images_'], {}), '(images_)\n', (4426, 4435), False, 'from torch.autograd import Variable\n'), ((4627, 4638), 'time.time', 'time.time', ([], {}), '()\n', (4636, 4638), False, 'import time\n'), ((4760, 4784), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (4782, 4784), False, 'import torch\n'), ((4812, 4823), 'time.time', 'time.time', ([], {}), '()\n', (4821, 4823), False, 'import time\n'), ((5822, 5847), 'os.path.join', 'os.path.join', (['"""save"""', 'csv'], {}), "('save', csv)\n", (5834, 5847), False, 'import os\n'), ((1353, 1393), 'torch.Tensor', 'torch.Tensor', (['[123.68, 116.779, 103.939]'], {}), '([123.68, 116.779, 103.939])\n', (1365, 1393), False, 'import torch\n'), ((3310, 3343), 'torch.load', 'torch.load', (['self.config.load_bone'], {}), '(self.config.load_bone)\n', (3320, 3343), False, 'import torch\n'), ((3953, 3973), 'torch.abs', 'torch.abs', (['(pred - gt)'], {}), '(pred - gt)\n', (3962, 3973), False, 'import torch\n'), ((6688, 6707), 'torch.autograd.Variable', 'Variable', (['sal_image'], {}), '(sal_image)\n', (6696, 6707), False, 'from torch.autograd import Variable\n'), ((6709, 6728), 'torch.autograd.Variable', 'Variable', (['sal_label'], {}), '(sal_label)\n', (6717, 6728), False, 'from torch.autograd import Variable\n'), ((6730, 6748), 'torch.autograd.Variable', 'Variable', (['sal_edge'], {}), '(sal_edge)\n', (6738, 6748), False, 'from torch.autograd import Variable\n'), ((6750, 6764), 'torch.autograd.Variable', 'Variable', (['conn'], {}), '(conn)\n', (6758, 6764), False, 'from torch.autograd import Variable\n'), ((7236, 7277), 'apex.amp.scale_loss', 'amp.scale_loss', (['loss', 'self.optimizer_bone'], {}), '(loss, self.optimizer_bone)\n', (7250, 7277), False, 'from apex import amp\n'), ((3071, 3098), 'torch.load', 'torch.load', (['self.config.vgg'], {}), '(self.config.vgg)\n', (3081, 3098), False, 'import torch\n'), ((5392, 5414), 'numpy.mean', 'np.mean', (['mae_ls_binary'], {}), '(mae_ls_binary)\n', (5399, 5414), True, 'import numpy as np\n'), ((3204, 3234), 'torch.load', 'torch.load', (['self.config.resnet'], {}), '(self.config.resnet)\n', (3214, 3234), False, 'import torch\n'), ((5232, 5254), 'numpy.mean', 'np.mean', (['mae_ls_binary'], {}), '(mae_ls_binary)\n', (5239, 5254), True, 'import numpy as np\n')] |
"""A bunch of helper functions to generate a dictionary for evaluation metrics."""
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import f1_score, roc_auc_score, average_precision_score
import numpy as np
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2
}
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2
}
def auc(preds, labels):
roc_auc = roc_auc_score(y_true=labels, y_score=preds)
pr_auc = average_precision_score(y_true=labels, y_score=preds)
return {
"roc_auc": roc_auc,
"pr_auc": pr_auc
}
def metrics(preds, labels):
results = acc_and_f1(np.argmax(preds, axis=1), labels)
results.update(auc(preds[:, 1], labels))
return results
| [
"sklearn.metrics.f1_score",
"sklearn.metrics.average_precision_score",
"numpy.argmax",
"sklearn.metrics.roc_auc_score",
"scipy.stats.pearsonr",
"scipy.stats.spearmanr"
] | [((381, 418), 'sklearn.metrics.f1_score', 'f1_score', ([], {'y_true': 'labels', 'y_pred': 'preds'}), '(y_true=labels, y_pred=preds)\n', (389, 418), False, 'from sklearn.metrics import f1_score, roc_auc_score, average_precision_score\n'), ((830, 873), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', ([], {'y_true': 'labels', 'y_score': 'preds'}), '(y_true=labels, y_score=preds)\n', (843, 873), False, 'from sklearn.metrics import f1_score, roc_auc_score, average_precision_score\n'), ((887, 940), 'sklearn.metrics.average_precision_score', 'average_precision_score', ([], {'y_true': 'labels', 'y_score': 'preds'}), '(y_true=labels, y_score=preds)\n', (910, 940), False, 'from sklearn.metrics import f1_score, roc_auc_score, average_precision_score\n'), ((575, 598), 'scipy.stats.pearsonr', 'pearsonr', (['preds', 'labels'], {}), '(preds, labels)\n', (583, 598), False, 'from scipy.stats import pearsonr, spearmanr\n'), ((622, 646), 'scipy.stats.spearmanr', 'spearmanr', (['preds', 'labels'], {}), '(preds, labels)\n', (631, 646), False, 'from scipy.stats import pearsonr, spearmanr\n'), ((1068, 1092), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (1077, 1092), True, 'import numpy as np\n')] |
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import pickle
def poly(x, c2, c3):
return c2*x**2 + c3*x**3
df = pd.read_excel('curved_data.xlsx', sheet_name = 'FEA (3D, c3=0.25, F=-10)')
skip_lines = 0 # -1 of what you think it should be
plt.figure()
for i in range(1): #range(int(len(df.columns)/2)):
ii = 2*i
x = np.array(df.values[skip_lines:, ii])
y = np.array(df.values[skip_lines:, ii+1])
# Remove NaN
x = np.array(list(x[~np.isnan(list(x))]))
y = np.array(list(y[~np.isnan(list(y))]))
# Fit
popt, pcov = curve_fit(poly, x, y)
print(popt)
plt.plot(x, y, 'b', label = 'Raw %i' % i)
plt.plot(x, poly(x, *popt), 'r--', label = 'Fit %i' % i)
plt.show()
| [
"scipy.optimize.curve_fit",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.figure",
"pandas.read_excel",
"matplotlib.pyplot.show"
] | [((179, 251), 'pandas.read_excel', 'pd.read_excel', (['"""curved_data.xlsx"""'], {'sheet_name': '"""FEA (3D, c3=0.25, F=-10)"""'}), "('curved_data.xlsx', sheet_name='FEA (3D, c3=0.25, F=-10)')\n", (192, 251), True, 'import pandas as pd\n'), ((307, 319), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (317, 319), True, 'import matplotlib.pyplot as plt\n'), ((758, 768), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (766, 768), True, 'import matplotlib.pyplot as plt\n'), ((392, 428), 'numpy.array', 'np.array', (['df.values[skip_lines:, ii]'], {}), '(df.values[skip_lines:, ii])\n', (400, 428), True, 'import numpy as np\n'), ((437, 477), 'numpy.array', 'np.array', (['df.values[skip_lines:, ii + 1]'], {}), '(df.values[skip_lines:, ii + 1])\n', (445, 477), True, 'import numpy as np\n'), ((613, 634), 'scipy.optimize.curve_fit', 'curve_fit', (['poly', 'x', 'y'], {}), '(poly, x, y)\n', (622, 634), False, 'from scipy.optimize import curve_fit\n'), ((655, 694), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""b"""'], {'label': "('Raw %i' % i)"}), "(x, y, 'b', label='Raw %i' % i)\n", (663, 694), True, 'import matplotlib.pyplot as plt\n')] |
import gym
from gym import spaces
import numpy as np
import os
from collections import defaultdict
import matplotlib; matplotlib.use('Agg')
import matplotlib.pyplot as plt
class DiscretizedObservationWapper(gym.ObservationWrapper) :
def __init__(self, env, n_bins = 10, low = None, high = None) :
super().__init__(env)
assert isinstance(env.observation_space, spaces.Box)
low = self.observation_space.low if low is None else low
high = self.observation_space.high if high is None else high
self.n_bins = n_bins
self.val_bins = [np.linspace(l, h, n_bins+1) for l, h in zip(low, high)]
self.observation_space = spaces.Discrete(n_bins ** len(low))
def _covnert_to_one_number(self, digits):
return sum([d * ((self.n_bins+1)**i) for i, d in enumerate(digits)])
def observation(self, observation):
digits = [np.digitize([x], bins)[0] for x, bins in zip(observation.flatten(), self.val_bins)]
return self._covnert_to_one_number(digits)
class Q_LearningPolicy :
def __init__(self, env, alpha, alpha_decay, gamma, eps, eps_final, Q = None) :
assert isinstance(env.action_space, spaces.Discrete)
assert isinstance(env.observation_space, spaces.Discrete)
self.env = env
self.Q = Q
self.alpha = alpha
self.alpha_decay = alpha_decay
self.gamma = gamma
self.eps = eps
self.eps_final = eps_final
self.actions = range(self.env.action_space.n)
# choose next action with eps-greedy
def act(self, obs) :
if np.random.random() < self.eps:
return self.env.action_space.sample()
qvals = {a : self.Q[obs, a] for a in self.actions}
max_q = max(qvals.values())
# select random one action to break a tie
act_with_max_q = [a for a, q in qvals.items() if q == max_q]
return np.random.choice(act_with_max_q)
# update Q(s,a) with q-learning
def update_Q(self, s, a, r, s_next, done) :
max_q_next = max([self.Q[s_next, a] for a in self.actions])
self.Q[s, a] += self.alpha * (r + self.gamma * max_q_next * (1.0-done)
- self.Q[s, a])
def train(self, num_epsiode, warm_epsiode) :
eps_desc = (self.eps - self.eps_final) / warm_epsiode
rewards = []
rewards_avg = []
for epi in range(num_epsiode) :
obs = self.env.reset()
done = False
steps = 0
reward = 0
while not done :
# random choose action
action = self.act(obs)
# print(action)
obs_next, r, done, info = self.env.step(action)
reward += r
self.update_Q(obs, action, r, obs_next, done)
obs = obs_next
steps += 1
print("Episode terminates at {} steps!".format(steps))
# update training param
self.alpha *= self.alpha_decay
if self.eps > self.eps_final :
self.eps -= eps_desc
# record result
rewards.append(reward)
rewards_avg.append(np.average(rewards[-50:]))
return {'rwd' : rewards, 'rwd_avg' : rewards_avg}
# plot curve
def plot_learning_curve(filename, value_dict, xlabel='step'):
# Plot step vs the mean(last 50 episodes' rewards)
fig = plt.figure(figsize=(12, 4 * len(value_dict)))
for i, (key, values) in enumerate(value_dict.items()):
ax = fig.add_subplot(len(value_dict), 1, i + 1)
ax.plot(range(len(values)), values)
ax.set_xlabel(xlabel)
ax.set_ylabel(key)
ax.grid('k--', alpha=0.6)
root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
plt.tight_layout()
os.makedirs(os.path.join(root, 'figs'), exist_ok=True)
plt.savefig(os.path.join(root, 'figs', filename))
if __name__ == '__main__' :
# env def. as playground
g_env = gym.make("MsPacman-v0")
g_wenv = DiscretizedObservationWapper(g_env, n_bins = 8,
low=[-2.4, -2.0, -0.42, -3.5], high=[2.4, 2.0, 0.42, 3.5])
# Q-Learning learner
Q = defaultdict(float)
gamma = 0.99 # discount for future
alpha = 0.5 # soft param-update
alpha_decay = 0.998
eps = 1.0 # eps-greedy for exploration
eps_final = 0.05
# param for training iteration
num_epsiode = 1000
warm_epsiode = 800
learner = Q_LearningPolicy(g_wenv, alpha, alpha_decay, gamma, eps, eps_final, Q)
reward_dict = learner.train(num_epsiode, warm_epsiode)
plot_learning_curve('rewards_trace.png', reward_dict, xlabel = 'epsiode')
g_wenv.close() | [
"numpy.average",
"matplotlib.use",
"numpy.random.choice",
"numpy.random.random",
"numpy.digitize",
"os.path.join",
"os.path.dirname",
"numpy.linspace",
"collections.defaultdict",
"matplotlib.pyplot.tight_layout",
"gym.make"
] | [((119, 140), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (133, 140), False, 'import matplotlib\n'), ((3814, 3832), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3830, 3832), True, 'import matplotlib.pyplot as plt\n'), ((4018, 4041), 'gym.make', 'gym.make', (['"""MsPacman-v0"""'], {}), "('MsPacman-v0')\n", (4026, 4041), False, 'import gym\n'), ((4235, 4253), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (4246, 4253), False, 'from collections import defaultdict\n'), ((1907, 1939), 'numpy.random.choice', 'np.random.choice', (['act_with_max_q'], {}), '(act_with_max_q)\n', (1923, 1939), True, 'import numpy as np\n'), ((3849, 3875), 'os.path.join', 'os.path.join', (['root', '"""figs"""'], {}), "(root, 'figs')\n", (3861, 3875), False, 'import os\n'), ((3908, 3944), 'os.path.join', 'os.path.join', (['root', '"""figs"""', 'filename'], {}), "(root, 'figs', filename)\n", (3920, 3944), False, 'import os\n'), ((584, 613), 'numpy.linspace', 'np.linspace', (['l', 'h', '(n_bins + 1)'], {}), '(l, h, n_bins + 1)\n', (595, 613), True, 'import numpy as np\n'), ((1588, 1606), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1604, 1606), True, 'import numpy as np\n'), ((3770, 3795), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3785, 3795), False, 'import os\n'), ((892, 914), 'numpy.digitize', 'np.digitize', (['[x]', 'bins'], {}), '([x], bins)\n', (903, 914), True, 'import numpy as np\n'), ((3206, 3231), 'numpy.average', 'np.average', (['rewards[-50:]'], {}), '(rewards[-50:])\n', (3216, 3231), True, 'import numpy as np\n')] |
import numpy as np
import scipy.sparse as ssp
import torch
from torch.utils.data import DataLoader
from ogb.linkproppred import PygLinkPropPredDataset, Evaluator
from tqdm import tqdm
def resource_allocation(adj_matrix, link_list, batch_size=32768):
'''
cite: [Predicting missing links via local information](https://arxiv.org/pdf/0901.0553.pdf)
:param adj_matrix: Compressed Sparse Row matrix
:param link_list: torch tensor list of links, shape[m, 2]
:return: RA similarity for each link
'''
A = adj_matrix # e[i, j]
w = 1 / A.sum(axis=0)
w[np.isinf(w)] = 0
D = A.multiply(w).tocsr() # e[i,j] / log(d_j)
link_index = link_list.t()
link_loader = DataLoader(range(link_index.size(1)), batch_size)
scores = []
for idx in tqdm(link_loader):
src, dst = link_index[0, idx], link_index[1, idx]
batch_scores = np.array(np.sum(A[src].multiply(D[dst]), 1)).flatten()
scores.append(batch_scores)
scores = np.concatenate(scores, 0)
return torch.FloatTensor(scores)
def evaluate_hits(evaluator, pos_val_pred, neg_val_pred, pos_test_pred, neg_test_pred):
results = {}
for K in [20, 50, 100]:
evaluator.K = K
valid_hits = evaluator.eval({
'y_pred_pos': pos_val_pred,
'y_pred_neg': neg_val_pred,
})[f'hits@{K}']
test_hits = evaluator.eval({
'y_pred_pos': pos_test_pred,
'y_pred_neg': neg_test_pred,
})[f'hits@{K}']
results[f'Hits@{K}'] = (valid_hits, test_hits)
return results
def main():
# Data settings
print("Loading data.")
dataset_name = 'ogbl_ppa'
dataset = PygLinkPropPredDataset(name=dataset_name)
evaluator = Evaluator(name=dataset_name)
data = dataset[0]
split_edge = dataset.get_edge_split()
# graph_construct
print("Constructing graph.")
train_edges_raw = np.array(split_edge['train']['edge'])
train_edges_reverse = np.array(
[train_edges_raw[:, 1], train_edges_raw[:, 0]]).transpose()
train_edges = np.concatenate(
[train_edges_raw, train_edges_reverse], axis=0)
edge_weight = torch.ones(train_edges.shape[0], dtype=int)
A = ssp.csr_matrix(
(edge_weight, (train_edges[:, 0], train_edges[:, 1])), shape=(
data.num_nodes, data.num_nodes)
)
# test
print("Benchmark test.")
batch_size = 1024
pos_valid_edge = split_edge['valid']['edge']
neg_valid_edge = split_edge['valid']['edge_neg']
pos_test_edge = split_edge['test']['edge']
neg_test_edge = split_edge['test']['edge_neg']
model_predictor = resource_allocation # use model_name as function
pos_valid_pred = model_predictor(A, pos_valid_edge, batch_size=batch_size)
neg_valid_pred = model_predictor(A, neg_valid_edge, batch_size=batch_size)
pos_test_pred = model_predictor(A, pos_test_edge)
neg_test_pred = model_predictor(A, neg_test_edge)
eval_res = evaluate_hits(evaluator, pos_valid_pred,
neg_valid_pred, pos_test_pred, neg_test_pred)
for key, result in eval_res.items():
valid_hits, test_hits = result
print(key)
print(
f'Valid: {100 * valid_hits:.2f}%, '
f'Test: {100 * test_hits:.2f}%')
if __name__ == '__main__':
main()
| [
"ogb.linkproppred.Evaluator",
"tqdm.tqdm",
"numpy.array",
"ogb.linkproppred.PygLinkPropPredDataset",
"numpy.concatenate",
"scipy.sparse.csr_matrix",
"numpy.isinf",
"torch.FloatTensor",
"torch.ones"
] | [((780, 797), 'tqdm.tqdm', 'tqdm', (['link_loader'], {}), '(link_loader)\n', (784, 797), False, 'from tqdm import tqdm\n'), ((984, 1009), 'numpy.concatenate', 'np.concatenate', (['scores', '(0)'], {}), '(scores, 0)\n', (998, 1009), True, 'import numpy as np\n'), ((1022, 1047), 'torch.FloatTensor', 'torch.FloatTensor', (['scores'], {}), '(scores)\n', (1039, 1047), False, 'import torch\n'), ((1673, 1714), 'ogb.linkproppred.PygLinkPropPredDataset', 'PygLinkPropPredDataset', ([], {'name': 'dataset_name'}), '(name=dataset_name)\n', (1695, 1714), False, 'from ogb.linkproppred import PygLinkPropPredDataset, Evaluator\n'), ((1731, 1759), 'ogb.linkproppred.Evaluator', 'Evaluator', ([], {'name': 'dataset_name'}), '(name=dataset_name)\n', (1740, 1759), False, 'from ogb.linkproppred import PygLinkPropPredDataset, Evaluator\n'), ((1902, 1939), 'numpy.array', 'np.array', (["split_edge['train']['edge']"], {}), "(split_edge['train']['edge'])\n", (1910, 1939), True, 'import numpy as np\n'), ((2062, 2124), 'numpy.concatenate', 'np.concatenate', (['[train_edges_raw, train_edges_reverse]'], {'axis': '(0)'}), '([train_edges_raw, train_edges_reverse], axis=0)\n', (2076, 2124), True, 'import numpy as np\n'), ((2152, 2195), 'torch.ones', 'torch.ones', (['train_edges.shape[0]'], {'dtype': 'int'}), '(train_edges.shape[0], dtype=int)\n', (2162, 2195), False, 'import torch\n'), ((2204, 2318), 'scipy.sparse.csr_matrix', 'ssp.csr_matrix', (['(edge_weight, (train_edges[:, 0], train_edges[:, 1]))'], {'shape': '(data.num_nodes, data.num_nodes)'}), '((edge_weight, (train_edges[:, 0], train_edges[:, 1])), shape\n =(data.num_nodes, data.num_nodes))\n', (2218, 2318), True, 'import scipy.sparse as ssp\n'), ((581, 592), 'numpy.isinf', 'np.isinf', (['w'], {}), '(w)\n', (589, 592), True, 'import numpy as np\n'), ((1966, 2022), 'numpy.array', 'np.array', (['[train_edges_raw[:, 1], train_edges_raw[:, 0]]'], {}), '([train_edges_raw[:, 1], train_edges_raw[:, 0]])\n', (1974, 2022), True, 'import numpy as np\n')] |
#!/usr/bin/env ipython
import gpxpy
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from geopy.distance import vincenty
def gps_distance_elevation(fname):
segment = gpxpy.parse(open(fname + '.gpx', 'r')).tracks[0].segments[0]
elevation = []
loc = []
for p in segment.points:
elevation.append(p.elevation)
lat, lon = p.latitude, p.longitude
loc.append((lat, lon))
distance = np.array([0] + [vincenty(loc[i], loc[i-1]).meters for i in range(len(loc)-1)]).cumsum()
plt.plot(distance, elevation, label=fname)
plt.savefig(fname + '.png')
plt.clf()
return distance, elevation
def downsample_mountain(fname, length=30):
""" Downsample trace to specified length """
distance, elevation = gps_distance_elevation(fname)
d = np.linspace(distance[0], distance[-1], length)
e = np.interp(d, distance, elevation)
plt.plot(d, e, label=fname)
plt.savefig(fname + '_downsampled.png')
plt.clf()
assert len(d) == length
assert len(e) == length
return d, e
def get_mts():
d_m, e_m = downsample_mountain('tour-mont-blanc-anti-clockwise')
d_c, e_c = downsample_mountain('carrauntoohil')
# scale both
e_m -= np.mean(e_m)
e_m /= np.max(e_m)
e_c -= np.mean(e_c)
e_c /= np.max(e_c)
# combine
samples = np.array([e_m, e_c]).reshape(2, -1, 1)
np.save('mts.npy', samples)
return samples
| [
"numpy.mean",
"matplotlib.pyplot.savefig",
"matplotlib.use",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"numpy.max",
"numpy.array",
"numpy.linspace",
"geopy.distance.vincenty",
"numpy.interp",
"numpy.save"
] | [((55, 76), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (69, 76), False, 'import matplotlib\n'), ((556, 598), 'matplotlib.pyplot.plot', 'plt.plot', (['distance', 'elevation'], {'label': 'fname'}), '(distance, elevation, label=fname)\n', (564, 598), True, 'import matplotlib.pyplot as plt\n'), ((603, 630), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fname + '.png')"], {}), "(fname + '.png')\n", (614, 630), True, 'import matplotlib.pyplot as plt\n'), ((635, 644), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (642, 644), True, 'import matplotlib.pyplot as plt\n'), ((833, 879), 'numpy.linspace', 'np.linspace', (['distance[0]', 'distance[-1]', 'length'], {}), '(distance[0], distance[-1], length)\n', (844, 879), True, 'import numpy as np\n'), ((888, 921), 'numpy.interp', 'np.interp', (['d', 'distance', 'elevation'], {}), '(d, distance, elevation)\n', (897, 921), True, 'import numpy as np\n'), ((926, 953), 'matplotlib.pyplot.plot', 'plt.plot', (['d', 'e'], {'label': 'fname'}), '(d, e, label=fname)\n', (934, 953), True, 'import matplotlib.pyplot as plt\n'), ((958, 997), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fname + '_downsampled.png')"], {}), "(fname + '_downsampled.png')\n", (969, 997), True, 'import matplotlib.pyplot as plt\n'), ((1002, 1011), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1009, 1011), True, 'import matplotlib.pyplot as plt\n'), ((1249, 1261), 'numpy.mean', 'np.mean', (['e_m'], {}), '(e_m)\n', (1256, 1261), True, 'import numpy as np\n'), ((1273, 1284), 'numpy.max', 'np.max', (['e_m'], {}), '(e_m)\n', (1279, 1284), True, 'import numpy as np\n'), ((1296, 1308), 'numpy.mean', 'np.mean', (['e_c'], {}), '(e_c)\n', (1303, 1308), True, 'import numpy as np\n'), ((1320, 1331), 'numpy.max', 'np.max', (['e_c'], {}), '(e_c)\n', (1326, 1331), True, 'import numpy as np\n'), ((1403, 1430), 'numpy.save', 'np.save', (['"""mts.npy"""', 'samples'], {}), "('mts.npy', samples)\n", (1410, 1430), True, 'import numpy as np\n'), ((1360, 1380), 'numpy.array', 'np.array', (['[e_m, e_c]'], {}), '([e_m, e_c])\n', (1368, 1380), True, 'import numpy as np\n'), ((480, 508), 'geopy.distance.vincenty', 'vincenty', (['loc[i]', 'loc[i - 1]'], {}), '(loc[i], loc[i - 1])\n', (488, 508), False, 'from geopy.distance import vincenty\n')] |
"""
An Implementation of the 2048 game
"""
from random import randrange
from numpy import array_equal
from Tile import *
# Directions, DO NOT MODIFY
UP = 1
DOWN = 2
LEFT = 3
RIGHT = 4
# Offsets for computing tile indices in each direction.
# DO NOT MODIFY this dictionary.
OFFSETS = {UP: (1, 0),
DOWN: (-1, 0),
LEFT: (0, 1),
RIGHT: (0, -1)}
def merge(line):
"""
:param line: list()
:return: merged row or col list for 2048 game
"""
length = len(line)
# Slide all non-zeroes to the right
result_list = [val for val in line if val > 0]
result_list += [Tile(0)] * (length - len(result_list))
# Merge all numbers possible and slide while merging
for index in range(1, length):
if result_list[index-1] == result_list[index]:
result_list[index-1] += result_list[index]
result_list[index] = Tile(0)
# Slide all zeroes to right
for index_2 in range(index+1, length):
temp = result_list[index_2-1]
result_list[index_2-1] = result_list[index_2]
result_list[index_2] = temp
return result_list
class TwentyFortyEight:
"""
A 2048 Game Class
"""
def __init__(self, grid_height, grid_width):
"""
Initializer
"""
self._grid_height = grid_height
self._grid_width = grid_width
self.grid = [[]]
self.old_grid = {"grid": [[]], "move": None}
self.new_tiles = []
# Compute indices of initial tiles
_up = [(0, col) for col in range(0, grid_width)]
down = [(grid_height - 1, col) for col in range(0, grid_width)]
left = [(row, 0) for row in range(0, grid_height)]
right = [(col, grid_width - 1) for col in range(0, grid_height)]
# Dictionary of tiles
self._move_dict = {UP: _up, DOWN: down, LEFT: left, RIGHT: right}
# Dictionary specificing num_steps for offsett
self._steps_dict = {1: self._grid_height, 2: self._grid_height, 3: self._grid_width, 4: self._grid_width}
# init board and I get the row and cols of the new tiles inserted
self.reset()
def reset(self):
"""
Create a new empty board with 2 tiles and init for gameplay
"""
self.grid = [[Tile(0, row, col) for col in range(self._grid_width)] for row in range(self._grid_height)]
self.new_tile()
self.new_tile()
def __str__(self):
"""
Print version of the game
"""
return str(self.grid)
def new_tile(self):
"""
Inserts a new tile to a 0 box of grid if any and return the
"""
#Check if there is zero in grid
zero = True if 0 in [num for row in self.grid for num in row] else False
if not zero:
return
# recursive function to return a zero row and col
def get_rand():
"""
:return: A random row and col of the new tile
"""
row_ = randrange(0, self._grid_height)
col_ = randrange(0, self._grid_width)
if self.grid[row_][col_] == 0:
return row_, col_
else:
return get_rand()
# Get row and col from function
(row, col) = get_rand()
# Add new tile to the board with 90% chance of 2 and 10% chance of 4
self.grid[row][col] = Tile(4, row, col) if randrange(1, 11) == 10 else Tile(2, row, col)
# return the coordinate of the tile
self.new_tiles.append((row, col))
def get_grid_height(self):
"""
Returns height of grid
"""
return self._grid_height
def get_grid_width(self):
"""
Returns height of grid
"""
return self._grid_width
def set_tile(self, row, col, tile):
"""
:param row: int
:param col: int
:param tile: Tile
:return: void
sets value at grid[row][col] to value
"""
self.grid[row][col] = tile
def get_tile(self, row, col):
"""
Gets a tile at row col
"""
return self.grid[row][col]
def check_game_over(self):
test_grid = [[i for i in j] for j in self.grid]
test_game_class = self.__class__(len(test_grid), len(test_grid[0]))
test_game_class.grid = [[i for i in j] for j in self.grid]
directions = [UP, DOWN, LEFT, RIGHT]
for direction in directions:
test_game_class.move(direction)
for i, _ in enumerate(test_grid):
if not array_equal(test_grid[i], test_game_class.grid[i]):
return False
else:
return True
def move(self, direction):
"""
Get list of numbers in move direction - returns the row and col of the new tile inserted
"""
# store old grid before altering
self.old_grid["grid"] = [[i for i in j] for j in self.grid]
self.old_grid["move"] = direction
# Create a temporary move_list to be passed to merge
def make_move_list(start_cell, direc, num_steps):
"""
:param start_cell: Beginning cell
:param direc: Direction to move in
:param num_steps: Number of steps
:return: Returns a tuple with grid list and indexes
"""
move = []
indexes = []
for step in range(num_steps):
row_ = start_cell[0] + step * direc[0]
col_ = start_cell[1] + step * direc[1]
indexes.append((row_, col_))
move.append(self.grid[row_][col_])
return move, indexes
tiles = self._move_dict[direction]
steps = self._steps_dict[direction]
offset = OFFSETS[direction]
changed = False
for start_tile in tiles:
(move_list, indices) = make_move_list(start_tile, offset, steps)
updated_line = merge(move_list)
for index, (row, col) in enumerate(indices):
if self.grid[row][col] != updated_line[index]:
changed = True
self.grid[row][col] = updated_line[index]
if changed:
return self.new_tile() | [
"numpy.array_equal",
"random.randrange"
] | [((3051, 3082), 'random.randrange', 'randrange', (['(0)', 'self._grid_height'], {}), '(0, self._grid_height)\n', (3060, 3082), False, 'from random import randrange\n'), ((3102, 3132), 'random.randrange', 'randrange', (['(0)', 'self._grid_width'], {}), '(0, self._grid_width)\n', (3111, 3132), False, 'from random import randrange\n'), ((3465, 3481), 'random.randrange', 'randrange', (['(1)', '(11)'], {}), '(1, 11)\n', (3474, 3481), False, 'from random import randrange\n'), ((4635, 4685), 'numpy.array_equal', 'array_equal', (['test_grid[i]', 'test_game_class.grid[i]'], {}), '(test_grid[i], test_game_class.grid[i])\n', (4646, 4685), False, 'from numpy import array_equal\n')] |
import numpy as np
class VisualiserConsole:
def __init__(self):
pass
@staticmethod
def calc(data):
peak = np.average(np.abs(data)) * 2
bars = "#" * int(200 * peak / 2 ** 16)
return (peak, bars)
| [
"numpy.abs"
] | [((148, 160), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (154, 160), True, 'import numpy as np\n')] |
#!/usr/bin/python
import matplotlib.pyplot as plt
from pylab import *
import numpy as np
import csv
import string
import os
from matplotlib.ticker import ScalarFormatter
from scipy.optimize import curve_fit
filetype = ".eps"
ticksfontsize = 12
axisfontsize = 15
legentfontsize = 15
mymarkersize=5
def fitfunc(x, a, b, c ):
return a * np.power(b, x) + c
def mkPlot(bfile,outpath):
tab = np.loadtxt(bfile,
usecols=(0,1,3),
unpack = True,
delimiter = ',',
dtype = float
)
stab = tab[0].argsort()
final = tab[:,stab]
tr,tm,tk = final
# maxtr = max(tr)
plt.figure(figsize=(9, 9))
fix, ax = plt.subplots()
linx = np.array(tr)
liny = np.array(tk)
linspace = np.linspace(0,max(tr))
logspace = np.logspace(0,max(tr))
popt, pcov = curve_fit(fitfunc, linx, liny, bounds=(0, [100, 10, 20]))
print("Fitted curve: "+str(popt))
# print(pcov)
preflegend = "Execution time" # "Time with "
memlegend = "Memory usage"
legend = "" # "NA"
plt.yticks(fontsize=ticksfontsize)
plt.xticks(rotation=40, ha='right', fontsize=ticksfontsize)
plt.grid( linestyle='dotted', color='gray')
ax.set_ylabel('Time (seconds)', fontsize=axisfontsize)
plt.yscale('log')
ax2 = ax.twinx()
ax2.set_yscale("log")
ax.plot(linspace, fitfunc(linspace, *popt),color='orange',zorder=10) #, linestyle='None',marker='.')
ax.plot(tr,tk,marker='o',markersize=mymarkersize,linestyle='None',color='blue',zorder=20)
ax2.plot(tr, tm,marker='.',markersize=mymarkersize,linestyle='None',color='red',zorder=15)
ax2.set_ylabel('Memory (kbytes)', fontsize=axisfontsize)
ax2.legend([memlegend],loc=(0.05,0.68),fontsize=legentfontsize)
# ax.legend(
# [r'$F(x)\approx%5.5f * %5.4f^x +%5.4f$' % tuple(popt),
# preflegend+legend,memlegend], loc=(0.05,0.8),fontsize=legentfontsize)
(ca,cb,cc) = tuple(popt)
ax.legend(
[r'$F(x)\approx%5.5f * %5.4f^x$' % (ca,cb),
preflegend+legend,memlegend], loc=(0.05,0.8),fontsize=legentfontsize)
xlabel = "m"
if bfile.find("_A_") > 1:
xlabel = "n"
ax.set_xlabel( r'Parameter $'+xlabel+'$',fontsize=axisfontsize)
plt.xscale('linear')
posfifx = '-lin'+filetype
plt.ticklabel_format(style='sci', axis='x',useOffset=True)
plt.savefig('./plots/'+outpath #bfile.replace(".","-")+posfifx
, dpi=300
, bbox_inches="tight"
, frameone=False)
# plt.show()
if not os.path.exists('./plots'):
os.makedirs('./plots')
i = 0
for f in os.listdir("./"):
if (f.startswith("parametrised-benchmarks_")) and (f.endswith(".csv")):
ostr = f+"plot-"+string.ascii_lowercase[i]+filetype
print("Converting "+f+" to "+ostr)
mkPlot(f,ostr)
i += 1
| [
"scipy.optimize.curve_fit",
"os.path.exists",
"os.listdir",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"os.makedirs",
"numpy.power",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.ticklabel_format",
"numpy.loa... | [((2775, 2791), 'os.listdir', 'os.listdir', (['"""./"""'], {}), "('./')\n", (2785, 2791), False, 'import os\n'), ((402, 479), 'numpy.loadtxt', 'np.loadtxt', (['bfile'], {'usecols': '(0, 1, 3)', 'unpack': '(True)', 'delimiter': '""","""', 'dtype': 'float'}), "(bfile, usecols=(0, 1, 3), unpack=True, delimiter=',', dtype=float)\n", (412, 479), True, 'import numpy as np\n'), ((716, 742), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 9)'}), '(figsize=(9, 9))\n', (726, 742), True, 'import matplotlib.pyplot as plt\n'), ((762, 776), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (774, 776), True, 'import matplotlib.pyplot as plt\n'), ((790, 802), 'numpy.array', 'np.array', (['tr'], {}), '(tr)\n', (798, 802), True, 'import numpy as np\n'), ((814, 826), 'numpy.array', 'np.array', (['tk'], {}), '(tk)\n', (822, 826), True, 'import numpy as np\n'), ((920, 977), 'scipy.optimize.curve_fit', 'curve_fit', (['fitfunc', 'linx', 'liny'], {'bounds': '(0, [100, 10, 20])'}), '(fitfunc, linx, liny, bounds=(0, [100, 10, 20]))\n', (929, 977), False, 'from scipy.optimize import curve_fit\n'), ((1146, 1180), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'ticksfontsize'}), '(fontsize=ticksfontsize)\n', (1156, 1180), True, 'import matplotlib.pyplot as plt\n'), ((1185, 1244), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(40)', 'ha': '"""right"""', 'fontsize': 'ticksfontsize'}), "(rotation=40, ha='right', fontsize=ticksfontsize)\n", (1195, 1244), True, 'import matplotlib.pyplot as plt\n'), ((1252, 1294), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'linestyle': '"""dotted"""', 'color': '"""gray"""'}), "(linestyle='dotted', color='gray')\n", (1260, 1294), True, 'import matplotlib.pyplot as plt\n'), ((1361, 1378), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (1371, 1378), True, 'import matplotlib.pyplot as plt\n'), ((2373, 2393), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""linear"""'], {}), "('linear')\n", (2383, 2393), True, 'import matplotlib.pyplot as plt\n'), ((2430, 2489), 'matplotlib.pyplot.ticklabel_format', 'plt.ticklabel_format', ([], {'style': '"""sci"""', 'axis': '"""x"""', 'useOffset': '(True)'}), "(style='sci', axis='x', useOffset=True)\n", (2450, 2489), True, 'import matplotlib.pyplot as plt\n'), ((2496, 2575), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./plots/' + outpath)"], {'dpi': '(300)', 'bbox_inches': '"""tight"""', 'frameone': '(False)'}), "('./plots/' + outpath, dpi=300, bbox_inches='tight', frameone=False)\n", (2507, 2575), True, 'import matplotlib.pyplot as plt\n'), ((2701, 2726), 'os.path.exists', 'os.path.exists', (['"""./plots"""'], {}), "('./plots')\n", (2715, 2726), False, 'import os\n'), ((2732, 2754), 'os.makedirs', 'os.makedirs', (['"""./plots"""'], {}), "('./plots')\n", (2743, 2754), False, 'import os\n'), ((345, 359), 'numpy.power', 'np.power', (['b', 'x'], {}), '(b, x)\n', (353, 359), True, 'import numpy as np\n')] |
import numpy as np
import os
from qtpy import QtGui, QtCore, QtWidgets
import sharppy.sharptab.params as params
import sharppy.sharptab.winds as winds
import sharppy.sharptab.interp as interp
import sharppy.databases.inset_data as inset_data
from sharppy.sharptab.constants import *
## routine written by <NAME> and <NAME>
## <EMAIL> and <EMAIL>
__all__ = ['backgroundENS', 'plotENS']
class backgroundENS(QtWidgets.QFrame):
'''
Draw the background frame and lines for the Theta-E plot frame
'''
def __init__(self):
super(backgroundENS, self).__init__()
self.initUI()
def initUI(self):
## window configuration settings,
## sich as padding, width, height, and
## min/max plot axes
self.setStyleSheet("QFrame {"
" background-color: rgb(0, 0, 0);"
" border-width: 1px;"
" border-style: solid;"
" border-color: #3399CC;}")
if self.physicalDpiX() > 75:
fsize = 10
else:
fsize = 11
fsize = np.floor(.055 * self.size().height())
self.fsize = fsize
self.plot_font = QtGui.QFont('Helvetica', fsize + 1)
self.box_font = QtGui.QFont('Helvetica', fsize)
self.plot_metrics = QtGui.QFontMetrics( self.plot_font )
self.box_metrics = QtGui.QFontMetrics(self.box_font)
self.plot_height = self.plot_metrics.xHeight() + 5
self.box_height = self.box_metrics.xHeight() + 5
self.lpad = 40; self.rpad = 40.
self.tpad = fsize * 2 + 5; self.bpad = fsize
self.wid = self.size().width() - self.rpad
self.hgt = self.size().height() - self.bpad
self.tlx = self.rpad; self.tly = self.tpad
self.brx = self.wid; self.bry = self.hgt
self.ymax = 3000.; self.ymin = 0.
self.xmax = 3000.; self.xmin = 0.
self.plotBitMap = QtGui.QPixmap(self.width()-2, self.height()-2)
self.plotBitMap.fill(self.bg_color)
self.plotBackground()
def resizeEvent(self, e):
'''
Handles the event the window is resized
'''
self.initUI()
def plotBackground(self):
'''
Handles painting the frame.
'''
## initialize a painter object and draw the frame
qp = QtGui.QPainter()
qp.begin(self.plotBitMap)
qp.setRenderHint(qp.Antialiasing)
qp.setRenderHint(qp.TextAntialiasing)
self.draw_frame(qp)
qp.end()
def setBlackPen(self, qp):
color = QtGui.QColor('#000000')
color.setAlphaF(.5)
pen = QtGui.QPen(color, 0, QtCore.Qt.SolidLine)
brush = QtGui.QBrush(QtCore.Qt.SolidPattern)
qp.setPen(pen)
qp.setBrush(brush)
return qp
def draw_frame(self, qp):
'''
Draw the background frame.
qp: QtGui.QPainter object
'''
## set a new pen to draw with
EF1_color = "#006600"
EF2_color = "#FFCC33"
EF3_color = "#FF0000"
EF4_color = "#FF00FF"
pen = QtGui.QPen(self.fg_color, 2, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.setFont(self.plot_font)
rect1 = QtCore.QRectF(1.5, 2, self.brx, self.plot_height)
qp.drawText(rect1, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter,
'Ensemble Indices')
pen = QtGui.QPen(QtCore.Qt.blue, 1, QtCore.Qt.DashLine)
qp.setPen(pen)
ytick_fontsize = self.fsize-1
y_ticks_font = QtGui.QFont('Helvetica', ytick_fontsize)
qp.setFont(y_ticks_font)
efstp_inset_data = inset_data.condSTPData()
#texts = efstp_inset_data['ytexts']
spacing = self.bry / 10.
texts = ['0','1000','2000','3000']
y_ticks = [0,1000,2000,3000]#np.arange(self.tpad, self.bry+spacing, spacing)
for i in range(len(y_ticks)):
#print y_ticks[i]
pen = QtGui.QPen(QtGui.QColor("#0080FF"), 1, QtCore.Qt.DashLine)
qp.setPen(pen)
try:
qp.drawLine(self.tlx, self.y_to_ypix(int(texts[i])), self.brx, self.y_to_ypix(int(texts[i])))
except:
continue
color = QtGui.QColor('#000000')
pen = QtGui.QPen(color, 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
ypos = spacing*(i+1) - (spacing/4.)
ypos = self.y_to_ypix(int(texts[i])) - ytick_fontsize/2
xpos = self.tlx - 50/2.
rect = QtCore.QRect(xpos, ypos, 50, ytick_fontsize)
pen = QtGui.QPen(self.fg_color, 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, texts[i])
width = self.brx / 12
spacing = self.brx / 12
# Draw the x tick marks
qp.setFont(QtGui.QFont('Helvetica', self.fsize-1))
for i in range(np.asarray(texts).shape[0]):
pen = QtGui.QPen(QtGui.QColor("#0080FF"), 1, QtCore.Qt.DashLine)
qp.setPen(pen)
try:
qp.drawLine(self.x_to_xpix(int(texts[i])), self.tly, self.x_to_xpix(int(texts[i])),self.bry)
except:
continue
color = QtGui.QColor('#000000')
color.setAlpha(0)
pen = QtGui.QPen(color, 1, QtCore.Qt.SolidLine)
ypos = self.y_to_ypix(0)
xpos = self.x_to_xpix(float(texts[i])) - 50 / 2.
rect = QtCore.QRect(xpos, ypos, 50, ytick_fontsize)
# Change to a white pen to draw the text below the box and whisker plot
pen = QtGui.QPen(self.fg_color, 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, texts[i])
def y_to_ypix(self, y):
scl1 = self.ymax - self.ymin
scl2 = self.ymin + y
#print scl1, scl2, self.bry, self.tpad, self.tly
return self.bry - (scl2 / scl1) * (self.bry - self.tpad)
def x_to_xpix(self, x):
scl1 = self.xmax - self.xmin
scl2 = self.xmax - x
return self.brx - (scl2 / scl1) * (self.brx - self.rpad)
class plotENS(backgroundENS):
'''
Plot the data on the frame. Inherits the background class that
plots the frame.
'''
def __init__(self):
self.bg_color = QtGui.QColor('#000000')
self.fg_color = QtGui.QColor('#ffffff')
self.use_left = False
super(plotENS, self).__init__()
self.prof = None
self.pc_idx = 0
self.prof_collections = []
def addProfileCollection(self, prof_coll):
# Add a profile collection to the scatter plot
self.prof_collections.append(prof_coll)
def rmProfileCollection(self, prof_coll):
# Remove a profile collection from the scatter plot
self.prof_collections.remove(prof_coll)
def setActiveCollection(self, pc_idx, **kwargs):
# Set the active profile collection that is being shown in SPCWindow.
self.pc_idx = pc_idx
prof = self.prof_collections[pc_idx].getHighlightedProf()
self.prof = prof
self.hght = prof.hght
self.clearData()
self.plotData()
self.update()
def setProf(self, prof):
# Set the current profile being viewed in the SPC window.
self.prof = prof
# Some code to show whether or not the left or right mover is being used.
#if self.use_left:
# self.stpc = prof.left_stp_cin
#else:
# self.stpc = prof.right_stp_cin
self.clearData()
self.plotBackground()
self.plotData()
self.update()
def setPreferences(self, update_gui=True, **prefs):
# Set the preferences for the inset.
self.bg_color = QtGui.QColor(prefs['bg_color'])
self.fg_color = QtGui.QColor(prefs['fg_color'])
if update_gui:
if self.use_left:
self.stpc = self.prof.left_stp_cin
else:
self.stpc = self.prof.right_stp_cin
self.clearData()
self.plotBackground()
self.plotData()
self.update()
def setDeviant(self, deviant):
# Set the variable to indicate whether or not the right or left mover is being used.
self.use_left = deviant == 'left'
if self.use_left:
self.stpc = self.prof.left_stp_cin
else:
self.stpc = self.prof.right_stp_cin
self.clearData()
self.plotBackground()
self.plotData()
self.update()
def resizeEvent(self, e):
'''
Handles when the window is resized
'''
super(plotENS, self).resizeEvent(e)
self.plotData()
def paintEvent(self, e):
super(plotENS, self).paintEvent(e)
qp = QtGui.QPainter()
qp.begin(self)
qp.drawPixmap(1, 1, self.plotBitMap)
qp.end()
def clearData(self):
'''
Handles the clearing of the pixmap
in the frame.
'''
self.plotBitMap = QtGui.QPixmap(self.width(), self.height())
self.plotBitMap.fill(self.bg_color)
def plotData(self):
'''
Handles drawing of data on the frame.
'''
if self.prof is None:
return
## this function handles painting the plot
## create a new painter obkect
qp = QtGui.QPainter()
qp.begin(self.plotBitMap)
qp.setRenderHint(qp.Antialiasing)
qp.setRenderHint(qp.TextAntialiasing)
cur_dt = self.prof_collections[self.pc_idx].getCurrentDate()
bc_idx = 0
for idx, prof_coll in enumerate(self.prof_collections):
# Draw all unhighlighed ensemble members
if prof_coll.getCurrentDate() == cur_dt:
proflist = list(prof_coll.getCurrentProfs().values())
if idx == self.pc_idx:
for prof in proflist:
self.draw_ensemble_point(qp, prof)
else:
for prof in proflist:
self.draw_ensemble_point(qp, prof)
#%bc_idx = (bc_idx + 1) % len(self.background_colors)
bc_idx = 0
for idx, prof_coll in enumerate(self.prof_collections):
# Draw all highlighted members that aren't the active one.
if idx != self.pc_idx and (prof_coll.getCurrentDate() == cur_dt or self.all_observed):
prof = prof_coll.getHighlightedProf()
self.draw_ensemble_point(qp, prof)
#bc_idx = (bc_idx + 1) % len(self.background_colors)
def draw_ensemble_point(self, qp, prof):
# Plot the profile index on the scatter plot
if 'pbl_h' not in dir(prof): # Make sure a PBL top has been found in the profile object
ppbl_top = params.pbl_top(prof)
setattr(prof, 'pbl_h', interp.to_agl(prof, interp.hght(prof, ppbl_top)))
if 'sfcpcl' not in dir(prof): # Make sure a surface parcel has been lifted in the profile object
setattr(prof, 'sfcpcl', params.parcelx(prof, flag=1 ))
#x = self.x_to_xpix()
#y = self.y_to_ypix()
color = QtCore.Qt.red
qp.setPen(QtGui.QPen(color))
qp.setBrush(QtGui.QBrush(color))
x = self.x_to_xpix(prof.pbl_h) - 50 / 2.
y = self.y_to_ypix(prof.sfcpcl.bplus) - (self.fsize-1) / 2
qp.drawEllipse(x, y, 3, 3)
return
class DrawTest(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(DrawTest, self).__init__(parent)
# x = np.asarray([1,2,3,4])
# y = np.asarray([2,2,3,4])
length = 10
x = np.random.rand(length) + np.random.randint(0, 10, length)
y = np.random.rand(length) + np.random.randint(0, 10, length)
x = np.asarray([0, 5, 10, 0], dtype=float)
y = np.asarray([0, 5, 10, 20], dtype=float)
self.frame = plotENS()
self.frame.addProfileCollection(prof_coll)
self.frame.setActiveCollection(0)
self.setCentralWidget(self.frame)
if __name__ == "__main__":
import sys
import sharppy.io.buf_decoder as buf_decoder
path = 'ftp://ftp.meteo.psu.edu/pub/bufkit/SREF/21/sref_oun.buf'
dec = buf_decoder.BufDecoder(path)
prof_coll = dec._parse()
app = QtGui.QApplication(sys.argv)
mySW = DrawTest()
mySW.show()
sys.exit(app.exec_())
| [
"sharppy.databases.inset_data.condSTPData",
"qtpy.QtGui.QApplication",
"qtpy.QtGui.QPainter",
"qtpy.QtGui.QBrush",
"numpy.random.rand",
"qtpy.QtGui.QColor",
"qtpy.QtGui.QFontMetrics",
"numpy.asarray",
"qtpy.QtGui.QPen",
"sharppy.sharptab.params.parcelx",
"numpy.random.randint",
"sharppy.sharpt... | [((12241, 12269), 'sharppy.io.buf_decoder.BufDecoder', 'buf_decoder.BufDecoder', (['path'], {}), '(path)\n', (12263, 12269), True, 'import sharppy.io.buf_decoder as buf_decoder\n'), ((12309, 12337), 'qtpy.QtGui.QApplication', 'QtGui.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (12327, 12337), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((1146, 1181), 'qtpy.QtGui.QFont', 'QtGui.QFont', (['"""Helvetica"""', '(fsize + 1)'], {}), "('Helvetica', fsize + 1)\n", (1157, 1181), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((1206, 1237), 'qtpy.QtGui.QFont', 'QtGui.QFont', (['"""Helvetica"""', 'fsize'], {}), "('Helvetica', fsize)\n", (1217, 1237), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((1266, 1300), 'qtpy.QtGui.QFontMetrics', 'QtGui.QFontMetrics', (['self.plot_font'], {}), '(self.plot_font)\n', (1284, 1300), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((1330, 1363), 'qtpy.QtGui.QFontMetrics', 'QtGui.QFontMetrics', (['self.box_font'], {}), '(self.box_font)\n', (1348, 1363), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((2298, 2314), 'qtpy.QtGui.QPainter', 'QtGui.QPainter', ([], {}), '()\n', (2312, 2314), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((2530, 2553), 'qtpy.QtGui.QColor', 'QtGui.QColor', (['"""#000000"""'], {}), "('#000000')\n", (2542, 2553), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((2596, 2637), 'qtpy.QtGui.QPen', 'QtGui.QPen', (['color', '(0)', 'QtCore.Qt.SolidLine'], {}), '(color, 0, QtCore.Qt.SolidLine)\n', (2606, 2637), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((2654, 2690), 'qtpy.QtGui.QBrush', 'QtGui.QBrush', (['QtCore.Qt.SolidPattern'], {}), '(QtCore.Qt.SolidPattern)\n', (2666, 2690), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((3056, 3105), 'qtpy.QtGui.QPen', 'QtGui.QPen', (['self.fg_color', '(2)', 'QtCore.Qt.SolidLine'], {}), '(self.fg_color, 2, QtCore.Qt.SolidLine)\n', (3066, 3105), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((3180, 3229), 'qtpy.QtCore.QRectF', 'QtCore.QRectF', (['(1.5)', '(2)', 'self.brx', 'self.plot_height'], {}), '(1.5, 2, self.brx, self.plot_height)\n', (3193, 3229), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((3352, 3401), 'qtpy.QtGui.QPen', 'QtGui.QPen', (['QtCore.Qt.blue', '(1)', 'QtCore.Qt.DashLine'], {}), '(QtCore.Qt.blue, 1, QtCore.Qt.DashLine)\n', (3362, 3401), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((3486, 3526), 'qtpy.QtGui.QFont', 'QtGui.QFont', (['"""Helvetica"""', 'ytick_fontsize'], {}), "('Helvetica', ytick_fontsize)\n", (3497, 3526), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((3587, 3611), 'sharppy.databases.inset_data.condSTPData', 'inset_data.condSTPData', ([], {}), '()\n', (3609, 3611), True, 'import sharppy.databases.inset_data as inset_data\n'), ((6299, 6322), 'qtpy.QtGui.QColor', 'QtGui.QColor', (['"""#000000"""'], {}), "('#000000')\n", (6311, 6322), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((6347, 6370), 'qtpy.QtGui.QColor', 'QtGui.QColor', (['"""#ffffff"""'], {}), "('#ffffff')\n", (6359, 6370), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((7749, 7780), 'qtpy.QtGui.QColor', 'QtGui.QColor', (["prefs['bg_color']"], {}), "(prefs['bg_color'])\n", (7761, 7780), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((7805, 7836), 'qtpy.QtGui.QColor', 'QtGui.QColor', (["prefs['fg_color']"], {}), "(prefs['fg_color'])\n", (7817, 7836), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((8795, 8811), 'qtpy.QtGui.QPainter', 'QtGui.QPainter', ([], {}), '()\n', (8809, 8811), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((9377, 9393), 'qtpy.QtGui.QPainter', 'QtGui.QPainter', ([], {}), '()\n', (9391, 9393), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((11811, 11849), 'numpy.asarray', 'np.asarray', (['[0, 5, 10, 0]'], {'dtype': 'float'}), '([0, 5, 10, 0], dtype=float)\n', (11821, 11849), True, 'import numpy as np\n'), ((11862, 11901), 'numpy.asarray', 'np.asarray', (['[0, 5, 10, 20]'], {'dtype': 'float'}), '([0, 5, 10, 20], dtype=float)\n', (11872, 11901), True, 'import numpy as np\n'), ((4181, 4204), 'qtpy.QtGui.QColor', 'QtGui.QColor', (['"""#000000"""'], {}), "('#000000')\n", (4193, 4204), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((4223, 4264), 'qtpy.QtGui.QPen', 'QtGui.QPen', (['color', '(1)', 'QtCore.Qt.SolidLine'], {}), '(color, 1, QtCore.Qt.SolidLine)\n', (4233, 4264), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((4463, 4507), 'qtpy.QtCore.QRect', 'QtCore.QRect', (['xpos', 'ypos', '(50)', 'ytick_fontsize'], {}), '(xpos, ypos, 50, ytick_fontsize)\n', (4475, 4507), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((4526, 4575), 'qtpy.QtGui.QPen', 'QtGui.QPen', (['self.fg_color', '(1)', 'QtCore.Qt.SolidLine'], {}), '(self.fg_color, 1, QtCore.Qt.SolidLine)\n', (4536, 4575), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((4806, 4846), 'qtpy.QtGui.QFont', 'QtGui.QFont', (['"""Helvetica"""', '(self.fsize - 1)'], {}), "('Helvetica', self.fsize - 1)\n", (4817, 4846), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((5194, 5217), 'qtpy.QtGui.QColor', 'QtGui.QColor', (['"""#000000"""'], {}), "('#000000')\n", (5206, 5217), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((5266, 5307), 'qtpy.QtGui.QPen', 'QtGui.QPen', (['color', '(1)', 'QtCore.Qt.SolidLine'], {}), '(color, 1, QtCore.Qt.SolidLine)\n', (5276, 5307), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((5425, 5469), 'qtpy.QtCore.QRect', 'QtCore.QRect', (['xpos', 'ypos', '(50)', 'ytick_fontsize'], {}), '(xpos, ypos, 50, ytick_fontsize)\n', (5437, 5469), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((5572, 5621), 'qtpy.QtGui.QPen', 'QtGui.QPen', (['self.fg_color', '(1)', 'QtCore.Qt.SolidLine'], {}), '(self.fg_color, 1, QtCore.Qt.SolidLine)\n', (5582, 5621), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((10829, 10849), 'sharppy.sharptab.params.pbl_top', 'params.pbl_top', (['prof'], {}), '(prof)\n', (10843, 10849), True, 'import sharppy.sharptab.params as params\n'), ((11215, 11232), 'qtpy.QtGui.QPen', 'QtGui.QPen', (['color'], {}), '(color)\n', (11225, 11232), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((11254, 11273), 'qtpy.QtGui.QBrush', 'QtGui.QBrush', (['color'], {}), '(color)\n', (11266, 11273), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((11671, 11693), 'numpy.random.rand', 'np.random.rand', (['length'], {}), '(length)\n', (11685, 11693), True, 'import numpy as np\n'), ((11696, 11728), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', 'length'], {}), '(0, 10, length)\n', (11713, 11728), True, 'import numpy as np\n'), ((11741, 11763), 'numpy.random.rand', 'np.random.rand', (['length'], {}), '(length)\n', (11755, 11763), True, 'import numpy as np\n'), ((11766, 11798), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', 'length'], {}), '(0, 10, length)\n', (11783, 11798), True, 'import numpy as np\n'), ((3914, 3937), 'qtpy.QtGui.QColor', 'QtGui.QColor', (['"""#0080FF"""'], {}), "('#0080FF')\n", (3926, 3937), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((4927, 4950), 'qtpy.QtGui.QColor', 'QtGui.QColor', (['"""#0080FF"""'], {}), "('#0080FF')\n", (4939, 4950), False, 'from qtpy import QtGui, QtCore, QtWidgets\n'), ((11076, 11104), 'sharppy.sharptab.params.parcelx', 'params.parcelx', (['prof'], {'flag': '(1)'}), '(prof, flag=1)\n', (11090, 11104), True, 'import sharppy.sharptab.params as params\n'), ((4869, 4886), 'numpy.asarray', 'np.asarray', (['texts'], {}), '(texts)\n', (4879, 4886), True, 'import numpy as np\n'), ((10905, 10932), 'sharppy.sharptab.interp.hght', 'interp.hght', (['prof', 'ppbl_top'], {}), '(prof, ppbl_top)\n', (10916, 10932), True, 'import sharppy.sharptab.interp as interp\n')] |
import numpy as np
import pandas as pd
from scipy.stats import beta
from src.base.sampler import Sampler
class BinaryPAL_ACS_Sampler(Sampler):
def __init__(self, *args, **kwargs):
self.n_p = kwargs.pop("n_p") if "n_p" in kwargs else 25
self.M = kwargs.pop("M") if "M" in kwargs else 1
self.step_size = kwargs.pop("step_size") if "step_size" in kwargs else 0.01
super().__init__(*args, **kwargs)
def inform(self, *args, **kwargs):
super().inform(*args, **kwargs)
self.n_features = len(self.dataset.get_cf_names())
def sample(self):
self.update()
if len(self.dataset.complete.index) < self.initial_batch_size:
return self.initial_sample()
known_cf = self.dataset.complete[self.dataset.get_cf_names()]
labels = [0, 1]
class_instances = [
self.dataset.complete[self.dataset.complete == label].index
for label in labels
]
n_known = [
len(self.dataset.complete.index & class_instances[label])
for label in labels
]
weights = [np.mean(n_known) + 1 / (i + 1) for i in n_known]
evaluation_instances = np.array(
[
self.sample_from_label(class_instances[label], self.n_p)
for label in labels
]
)
sampled_instances = np.array(
[self.sample_from_label(class_instances[label], self.M) for label in labels]
)
known_data = [
known_cf[known_cf.index.isin(class_instances[label])] for label in labels
]
current_performance = np.array(
[
[self.estimate_exp_perf(i, known_data) for i in instances]
for label, instances in zip(labels, evaluation_instances)
]
)
expected_performance = np.array(
[
[
self.estimate_exp_perf(
i,
known_data,
sampled_data=sampled_instances[label],
sampled_label=list(labels).index(label),
)
for i in instances
]
for label, instances in zip(labels, evaluation_instances)
]
)
diff = expected_performance - current_performance
gain = [diff[i] * weights[i] for i in labels]
pgain = [np.mean(gain[i]) / self.M for i in labels]
return self.sample_class(np.argmax(pgain))
def sample_from_label(self, label, n):
"""Samples n pseudo-instances from the distribution of the given class."""
n_known = len(self.dataset.complete.index & label)
ratio = n_known / (n_known + 2)
samples = []
for _ in range(n):
if self.rng.random() <= ratio:
mu = self.dataset.complete[self.dataset.get_cf_names()].loc[
self.rng.choice(self.dataset.complete.index & label)
]
samples.append(
[
self.rng.normal(mu[i], self.parzen_window[i])
for i in range(self.n_features)
]
)
else:
samples.append(
[
self.rng.uniform(self.lower_bound[i], self.upper_bound[i])
for i in range(self.n_features)
]
)
return np.array(samples)
def estimate_exp_perf(
self, p_instance, known_data, sampled_data=None, sampled_label=None
):
"""Estimates the expected performance for a given pseudo-instance and its assigned
label, given the already known data."""
n, k = self.get_label_stats(
p_instance,
known_data,
sampled_data=sampled_data,
sampled_label=sampled_label,
)
max_k = max(k)
return beta.mean(max_k + 1, n - max_k + 1)
def get_label_stats(
self, p_instance, known_data, sampled_data=None, sampled_label=None
):
"""Determines the label statistics (n and k) for a given pseudo-instance and its
assigned label, given the already known data."""
n, k = 0, [0, 0]
for i, label in enumerate(known_data):
freq = 0
if sampled_data is not None and sampled_label == i:
label = label.append(
pd.DataFrame(
sampled_data,
columns=self.dataset.get_cf_names(),
index=[f"s{i}" for i in range(self.M)],
)
)
for instance in label.iterrows():
freq += np.exp(
(np.linalg.norm(instance[1] - p_instance) ** 2)
/ (2 * np.mean(self.parzen_window) ** 2)
)
n += freq
k[i] = freq
return n, k
def update(self):
"""Updates the relevant parameters to the PAL-ACS method."""
self.upper_bound = [
self.dataset.complete[cf].max() for cf in self.dataset.get_cf_names()
]
self.lower_bound = [
self.dataset.complete[cf].min() for cf in self.dataset.get_cf_names()
]
self.parzen_window = [
(self.upper_bound[i] - self.lower_bound[i]) / 10
for i in range(self.n_features)
]
def sample_class(self, lbl):
"""Sample an instance from the given (binary) class."""
if not (lbl == 0 or lbl == 1):
raise ValueError
return (
self.rng.choice(
self.dataset.incomplete[
self.dataset.incomplete[self.dataset.get_y_name()] == lbl
].index
)
if len(
self.dataset.incomplete[
self.dataset.incomplete[self.dataset.get_y_name()] == lbl
].index
)
> 0
else self.rng.choice(self.dataset.incomplete.index)
)
def get_init(self):
return 2
def to_string(self):
return "binary-pal-acs"
| [
"numpy.mean",
"numpy.argmax",
"numpy.array",
"numpy.linalg.norm",
"scipy.stats.beta.mean"
] | [((3525, 3542), 'numpy.array', 'np.array', (['samples'], {}), '(samples)\n', (3533, 3542), True, 'import numpy as np\n'), ((4006, 4041), 'scipy.stats.beta.mean', 'beta.mean', (['(max_k + 1)', '(n - max_k + 1)'], {}), '(max_k + 1, n - max_k + 1)\n', (4015, 4041), False, 'from scipy.stats import beta\n'), ((2535, 2551), 'numpy.argmax', 'np.argmax', (['pgain'], {}), '(pgain)\n', (2544, 2551), True, 'import numpy as np\n'), ((1120, 1136), 'numpy.mean', 'np.mean', (['n_known'], {}), '(n_known)\n', (1127, 1136), True, 'import numpy as np\n'), ((2458, 2474), 'numpy.mean', 'np.mean', (['gain[i]'], {}), '(gain[i])\n', (2465, 2474), True, 'import numpy as np\n'), ((4828, 4868), 'numpy.linalg.norm', 'np.linalg.norm', (['(instance[1] - p_instance)'], {}), '(instance[1] - p_instance)\n', (4842, 4868), True, 'import numpy as np\n'), ((4902, 4929), 'numpy.mean', 'np.mean', (['self.parzen_window'], {}), '(self.parzen_window)\n', (4909, 4929), True, 'import numpy as np\n')] |
import numpy as np
from .domain import Domain
from .bisection import *
class Ringleb(Domain):
gamma = 1.4
gm1 = gamma - 1.
kmin = 0.7
kmax = 1.2
qmin = 0.5
sx = 1.5
sy = 0
name = 'ringleb'
left = 0
right = 2.75
bottom = 0
top = 2.75
def compute_J(self,c):
return 1. / c + 1. / (3. * c ** 3) + 1. / (5. * c ** 5) -.5 * np.log((1. + c) / (1. - c))
def compute_rho(self,c):
return c ** (2. / self.gm1)
def compute_q2(self,c):
return 2. * (1. - c ** 2) / self.gm1
def eqn(self,x, y, c):
J = self.compute_J(c)
rho = self.compute_rho(c)
q2 = self.compute_q2(c)
return (x - J / 2.) ** 2 + y ** 2 - 1. / (4. * rho ** 2 * q2 ** 2)
def compute_c(self,x, y):
clow =.5*np.ones(x.size)
chigh = 0.9999*np.ones(x.size)
c = bisection(clow, chigh, lambda cin : self.eqn(np.ravel(x),np.ravel(y),cin) )
return c.reshape(x.shape)
def kq2xy(self, k, q):
c = np.sqrt(2. + q**2 - self.gamma * q**2)/np.sqrt(2)
J = self.compute_J(c)
rho = self.compute_rho(c)
q2 = q**2
k2 = k**2
x = (0.5/rho) * (1./q2 - 2./k**2.) + 0.5 * J + self.sx
y = np.sqrt(1. - q2/k**2.)/(k * rho * q ) + self.sy
return x,y
def xy2kq(self,x,y):
x = x - self.sx
y = y - self.sy
c = self.compute_c(x, y)
J = self.compute_J(c)
rho = self.compute_rho(c)
q2 = self.compute_q2(c)
q = np.sqrt(q2)
k = np.sqrt(2. / (1. / q2 - 2. * rho * (x - J / 2.)))
return k,q
def in_domain(self,x,y):
k,q = self.xy2kq(x,y)
c1 = np.greater_equal(q,self.qmin)
c2 = np.logical_and(np.less_equal(self.kmin, k), np.less_equal(k,self.kmax) )
return np.logical_and(c1,c2).astype(int)
def bc_id(self, bid):
if bid == -1 or bid == -2:
return -1
else:
return -2
# DEPRECATE THIS.
def bc(self,x,y):
k,q = self.xy2kq(x,y)
dk_min = -1*(np.abs(k-self.kmin) < 1e-13)
dk_max = -2*(np.abs(k-self.kmax) < 1e-13)
dq_min = -3*(np.abs(q-self.qmin) < 1e-13)
sanity_check = np.logical_not(np.logical_xor(np.logical_xor(dk_min, dk_max), dq_min))
num_wrong = np.sum(sanity_check)
if(num_wrong > 0):
print("DUPLICATE BOUNDARY CONDITIONS\n")
quit()
return dk_min+dk_max+dq_min
# -1 reflecting, -2 is inflow/outflow
def vertex2bc(self, x, y):
k,q = self.xy2kq(x,y)
dk_min = -1*(np.abs(k-self.kmin) < 1e-13)
dk_max = -1*(np.abs(k-self.kmax) < 1e-13)
dq_min = -2*(np.abs(q-self.qmin) < 1e-13)
bot = -2*(np.abs(y) < 1e-13)
bc = dk_min+dk_max+dq_min+bot
decided = np.where(np.logical_xor(np.logical_xor(np.logical_xor(dk_min, dk_max), dq_min), bot))
out = np.zeros(x.shape)
out[decided] = bc[decided]
return out
def target_ratio(self, wgt, k1, q1, k2, q2, k3, q3) :
X1,Y1 = self.kq2xy(k1,q1)
X2,Y2 = self.kq2xy(k2,q2)
X3,Y3 = self.kq2xy(k3,q3)
l1 = np.sqrt( (X3-X2)**2. + (Y3-Y2)**2. )
l3 = np.sqrt( (X3-X1)**2. + (Y3-Y1)**2. )
return l1/l3 - wgt
def curved_points(self,wgt,X1,Y1,X2,Y2):
bc1 = self.bc(X1,Y1)
# bc2 = self.bc(X2,Y2)
# sanity_check = np.logical_not( np.equal(bc1,bc2) )
# num_wrong = np.sum(sanity_check)
# if(num_wrong > 0):
# print("not the same\n")
# quit()
points = np.zeros( (X1.shape[0], wgt.size, 2) )
for qq in range(wgt.size):
# boundary -1
idx1 = np.where( bc1 == -1 )[0]
k1,q1 = self.xy2kq(X1[idx1], Y1[idx1])
k3,q3 = self.xy2kq(X2[idx1], Y2[idx1])
q2 = bisection( q1, q3, lambda qin : self.target_ratio(wgt[qq], self.kmin, q1,
self.kmin, qin,
self.kmin, q3 ) )
x2,y2 = self.kq2xy(self.kmin,q2)
points[idx1,qq,0] = x2
points[idx1,qq,1] = y2
# boundary -2
idx2 = np.where( bc1 == -2)[0]
k1,q1 = self.xy2kq(X1[idx2], Y1[idx2])
k3,q3 = self.xy2kq(X2[idx2], Y2[idx2])
q2 = bisection( q1, q3, lambda qin : self.target_ratio(wgt[qq], self.kmax, q1,
self.kmax, qin,
self.kmax, q3 ) )
x2,y2 = self.kq2xy(self.kmax,q2)
points[idx2,qq,0] = x2
points[idx2,qq,1] = y2
# boundary -3
idx3 = np.where( bc1 == -3)[0]
k1,q1 = self.xy2kq(X1[idx3], Y1[idx3])
k3,q3 = self.xy2kq(X2[idx3], Y2[idx3])
k2 = bisection( k1, k3, lambda kin : self.target_ratio(wgt[qq], k1 , self.qmin,
kin, self.qmin,
k3 , self.qmin ) )
x2,y2 = self.kq2xy(k2,self.qmin)
points[idx3,qq,0] = x2
points[idx3,qq,1] = y2
return points
def get_corner_coord(self,bc1, bc2):
k13,q13 = self.kq2xy(np.array([self.kmin]), np.array([self.qmin]))
k23,q23 = self.kq2xy(np.array([self.kmax]), np.array([self.qmin]))
corner13 = np.hstack( (k13,q13) ).reshape( (-1,2) )
corner23 = np.hstack( (k23,q23) ).reshape( (-1,2) )
corner_coords = np.zeros( (bc1.shape[0], 2) )
corner_coords[:,0] = np.where( np.logical_and(bc1 == -1,bc2 == -3) , corner13[:,0], corner23[:,0] )
corner_coords[:,1] = np.where( np.logical_and(bc1 == -1,bc2 == -3) , corner13[:,1], corner23[:,1] )
return corner_coords
| [
"numpy.abs",
"numpy.sqrt",
"numpy.ones",
"numpy.logical_and",
"numpy.hstack",
"numpy.where",
"numpy.less_equal",
"numpy.log",
"numpy.logical_xor",
"numpy.sum",
"numpy.zeros",
"numpy.array",
"numpy.ravel",
"numpy.greater_equal"
] | [((1575, 1586), 'numpy.sqrt', 'np.sqrt', (['q2'], {}), '(q2)\n', (1582, 1586), True, 'import numpy as np\n'), ((1599, 1652), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (1.0 / q2 - 2.0 * rho * (x - J / 2.0)))'], {}), '(2.0 / (1.0 / q2 - 2.0 * rho * (x - J / 2.0)))\n', (1606, 1652), True, 'import numpy as np\n'), ((1745, 1775), 'numpy.greater_equal', 'np.greater_equal', (['q', 'self.qmin'], {}), '(q, self.qmin)\n', (1761, 1775), True, 'import numpy as np\n'), ((2379, 2399), 'numpy.sum', 'np.sum', (['sanity_check'], {}), '(sanity_check)\n', (2385, 2399), True, 'import numpy as np\n'), ((3008, 3025), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (3016, 3025), True, 'import numpy as np\n'), ((3267, 3311), 'numpy.sqrt', 'np.sqrt', (['((X3 - X2) ** 2.0 + (Y3 - Y2) ** 2.0)'], {}), '((X3 - X2) ** 2.0 + (Y3 - Y2) ** 2.0)\n', (3274, 3311), True, 'import numpy as np\n'), ((3317, 3361), 'numpy.sqrt', 'np.sqrt', (['((X3 - X1) ** 2.0 + (Y3 - Y1) ** 2.0)'], {}), '((X3 - X1) ** 2.0 + (Y3 - Y1) ** 2.0)\n', (3324, 3361), True, 'import numpy as np\n'), ((3726, 3762), 'numpy.zeros', 'np.zeros', (['(X1.shape[0], wgt.size, 2)'], {}), '((X1.shape[0], wgt.size, 2))\n', (3734, 3762), True, 'import numpy as np\n'), ((5906, 5933), 'numpy.zeros', 'np.zeros', (['(bc1.shape[0], 2)'], {}), '((bc1.shape[0], 2))\n', (5914, 5933), True, 'import numpy as np\n'), ((813, 828), 'numpy.ones', 'np.ones', (['x.size'], {}), '(x.size)\n', (820, 828), True, 'import numpy as np\n'), ((852, 867), 'numpy.ones', 'np.ones', (['x.size'], {}), '(x.size)\n', (859, 867), True, 'import numpy as np\n'), ((1045, 1088), 'numpy.sqrt', 'np.sqrt', (['(2.0 + q ** 2 - self.gamma * q ** 2)'], {}), '(2.0 + q ** 2 - self.gamma * q ** 2)\n', (1052, 1088), True, 'import numpy as np\n'), ((1084, 1094), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1091, 1094), True, 'import numpy as np\n'), ((1803, 1830), 'numpy.less_equal', 'np.less_equal', (['self.kmin', 'k'], {}), '(self.kmin, k)\n', (1816, 1830), True, 'import numpy as np\n'), ((1832, 1859), 'numpy.less_equal', 'np.less_equal', (['k', 'self.kmax'], {}), '(k, self.kmax)\n', (1845, 1859), True, 'import numpy as np\n'), ((5631, 5652), 'numpy.array', 'np.array', (['[self.kmin]'], {}), '([self.kmin])\n', (5639, 5652), True, 'import numpy as np\n'), ((5654, 5675), 'numpy.array', 'np.array', (['[self.qmin]'], {}), '([self.qmin])\n', (5662, 5675), True, 'import numpy as np\n'), ((5706, 5727), 'numpy.array', 'np.array', (['[self.kmax]'], {}), '([self.kmax])\n', (5714, 5727), True, 'import numpy as np\n'), ((5729, 5750), 'numpy.array', 'np.array', (['[self.qmin]'], {}), '([self.qmin])\n', (5737, 5750), True, 'import numpy as np\n'), ((5975, 6011), 'numpy.logical_and', 'np.logical_and', (['(bc1 == -1)', '(bc2 == -3)'], {}), '(bc1 == -1, bc2 == -3)\n', (5989, 6011), True, 'import numpy as np\n'), ((6084, 6120), 'numpy.logical_and', 'np.logical_and', (['(bc1 == -1)', '(bc2 == -3)'], {}), '(bc1 == -1, bc2 == -3)\n', (6098, 6120), True, 'import numpy as np\n'), ((386, 415), 'numpy.log', 'np.log', (['((1.0 + c) / (1.0 - c))'], {}), '((1.0 + c) / (1.0 - c))\n', (392, 415), True, 'import numpy as np\n'), ((1279, 1307), 'numpy.sqrt', 'np.sqrt', (['(1.0 - q2 / k ** 2.0)'], {}), '(1.0 - q2 / k ** 2.0)\n', (1286, 1307), True, 'import numpy as np\n'), ((1876, 1898), 'numpy.logical_and', 'np.logical_and', (['c1', 'c2'], {}), '(c1, c2)\n', (1890, 1898), True, 'import numpy as np\n'), ((2127, 2148), 'numpy.abs', 'np.abs', (['(k - self.kmin)'], {}), '(k - self.kmin)\n', (2133, 2148), True, 'import numpy as np\n'), ((2177, 2198), 'numpy.abs', 'np.abs', (['(k - self.kmax)'], {}), '(k - self.kmax)\n', (2183, 2198), True, 'import numpy as np\n'), ((2227, 2248), 'numpy.abs', 'np.abs', (['(q - self.qmin)'], {}), '(q - self.qmin)\n', (2233, 2248), True, 'import numpy as np\n'), ((2318, 2348), 'numpy.logical_xor', 'np.logical_xor', (['dk_min', 'dk_max'], {}), '(dk_min, dk_max)\n', (2332, 2348), True, 'import numpy as np\n'), ((2673, 2694), 'numpy.abs', 'np.abs', (['(k - self.kmin)'], {}), '(k - self.kmin)\n', (2679, 2694), True, 'import numpy as np\n'), ((2723, 2744), 'numpy.abs', 'np.abs', (['(k - self.kmax)'], {}), '(k - self.kmax)\n', (2729, 2744), True, 'import numpy as np\n'), ((2773, 2794), 'numpy.abs', 'np.abs', (['(q - self.qmin)'], {}), '(q - self.qmin)\n', (2779, 2794), True, 'import numpy as np\n'), ((2823, 2832), 'numpy.abs', 'np.abs', (['y'], {}), '(y)\n', (2829, 2832), True, 'import numpy as np\n'), ((3845, 3864), 'numpy.where', 'np.where', (['(bc1 == -1)'], {}), '(bc1 == -1)\n', (3853, 3864), True, 'import numpy as np\n'), ((4417, 4436), 'numpy.where', 'np.where', (['(bc1 == -2)'], {}), '(bc1 == -2)\n', (4425, 4436), True, 'import numpy as np\n'), ((4990, 5009), 'numpy.where', 'np.where', (['(bc1 == -3)'], {}), '(bc1 == -3)\n', (4998, 5009), True, 'import numpy as np\n'), ((5780, 5801), 'numpy.hstack', 'np.hstack', (['(k13, q13)'], {}), '((k13, q13))\n', (5789, 5801), True, 'import numpy as np\n'), ((5840, 5861), 'numpy.hstack', 'np.hstack', (['(k23, q23)'], {}), '((k23, q23))\n', (5849, 5861), True, 'import numpy as np\n'), ((935, 946), 'numpy.ravel', 'np.ravel', (['x'], {}), '(x)\n', (943, 946), True, 'import numpy as np\n'), ((947, 958), 'numpy.ravel', 'np.ravel', (['y'], {}), '(y)\n', (955, 958), True, 'import numpy as np\n'), ((2938, 2968), 'numpy.logical_xor', 'np.logical_xor', (['dk_min', 'dk_max'], {}), '(dk_min, dk_max)\n', (2952, 2968), True, 'import numpy as np\n')] |
import numpy as np
np.set_printoptions(threshold=np.inf)
from PIL import Image
def main():
data_im = Image.open("/Users/wangbing/Downloads/compare_pic/382_4_11.png")
ade_im = Image.open("/Users/wangbing/Downloads/compare_pic/ade.png")
data_im = np.array(data_im)
ade_im = np.array(ade_im)
max_data_im = np.max(data_im)
max_ade_im = np.max(ade_im)
print("max of data = ", max_data_im)
#print("max of ade = ", max_ade_im)
print(data_im.shape)
#print(ade_im)
if __name__ == '__main__':
main()
| [
"numpy.max",
"numpy.array",
"PIL.Image.open",
"numpy.set_printoptions"
] | [((19, 56), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (38, 56), True, 'import numpy as np\n'), ((106, 170), 'PIL.Image.open', 'Image.open', (['"""/Users/wangbing/Downloads/compare_pic/382_4_11.png"""'], {}), "('/Users/wangbing/Downloads/compare_pic/382_4_11.png')\n", (116, 170), False, 'from PIL import Image\n'), ((185, 244), 'PIL.Image.open', 'Image.open', (['"""/Users/wangbing/Downloads/compare_pic/ade.png"""'], {}), "('/Users/wangbing/Downloads/compare_pic/ade.png')\n", (195, 244), False, 'from PIL import Image\n'), ((259, 276), 'numpy.array', 'np.array', (['data_im'], {}), '(data_im)\n', (267, 276), True, 'import numpy as np\n'), ((290, 306), 'numpy.array', 'np.array', (['ade_im'], {}), '(ade_im)\n', (298, 306), True, 'import numpy as np\n'), ((325, 340), 'numpy.max', 'np.max', (['data_im'], {}), '(data_im)\n', (331, 340), True, 'import numpy as np\n'), ((358, 372), 'numpy.max', 'np.max', (['ade_im'], {}), '(ade_im)\n', (364, 372), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import scipy as sp
from scipy import optimize
# load data from problem
G_letter = [["","C+","B","B+","A-","C",""],
["B-","A-","","","A+","D+","B"],
["B-","","B+","","A-","B","B+"],
["A+","","B-","A","","A-",""],
["","B-","D+","B+","","B","C+"]]
# convert letter grade to GPA
grade_map = {"A+":4.33,"A":4,"A-":3.66,"B+":3.33,"B":3,"B-":2.66,"C+":2.33,"C":2,"D+":1.66,"":0}
# construct grade matrix
G = np.vectorize(lambda x: grade_map[x],otypes=[float])(np.array(G_letter))
# get nonzero entries
nze = np.nonzero(G)
nnz = len(nze[0])
n,m = np.shape(G)
# construct flattened g matrix
g = G[np.nonzero(G)]
# construct coefficient matrix
C = np.zeros((nnz,n+m))
C[np.arange(nnz),nze[0]] = 1
C[np.arange(nnz),n+nze[1]] = 1
# set up linear program
A = np.block([[-C,-np.eye(nnz)],[C,-np.eye(nnz)]])
b = np.block([-g,g])
c = np.block([np.zeros(n+m),np.ones(nnz)])
# solve linear program
sol = sp.optimize.linprog(c,A,b)
# get easiness and aptitude
aptitude = sol.x[:n]
easiness = sol.x[n:n+m]
| [
"numpy.block",
"numpy.eye",
"numpy.ones",
"scipy.optimize.linprog",
"numpy.array",
"numpy.zeros",
"numpy.nonzero",
"numpy.shape",
"numpy.vectorize",
"numpy.arange"
] | [((592, 605), 'numpy.nonzero', 'np.nonzero', (['G'], {}), '(G)\n', (602, 605), True, 'import numpy as np\n'), ((630, 641), 'numpy.shape', 'np.shape', (['G'], {}), '(G)\n', (638, 641), True, 'import numpy as np\n'), ((731, 753), 'numpy.zeros', 'np.zeros', (['(nnz, n + m)'], {}), '((nnz, n + m))\n', (739, 753), True, 'import numpy as np\n'), ((891, 908), 'numpy.block', 'np.block', (['[-g, g]'], {}), '([-g, g])\n', (899, 908), True, 'import numpy as np\n'), ((981, 1009), 'scipy.optimize.linprog', 'sp.optimize.linprog', (['c', 'A', 'b'], {}), '(c, A, b)\n', (1000, 1009), True, 'import scipy as sp\n'), ((491, 543), 'numpy.vectorize', 'np.vectorize', (['(lambda x: grade_map[x])'], {'otypes': '[float]'}), '(lambda x: grade_map[x], otypes=[float])\n', (503, 543), True, 'import numpy as np\n'), ((543, 561), 'numpy.array', 'np.array', (['G_letter'], {}), '(G_letter)\n', (551, 561), True, 'import numpy as np\n'), ((680, 693), 'numpy.nonzero', 'np.nonzero', (['G'], {}), '(G)\n', (690, 693), True, 'import numpy as np\n'), ((753, 767), 'numpy.arange', 'np.arange', (['nnz'], {}), '(nnz)\n', (762, 767), True, 'import numpy as np\n'), ((782, 796), 'numpy.arange', 'np.arange', (['nnz'], {}), '(nnz)\n', (791, 796), True, 'import numpy as np\n'), ((922, 937), 'numpy.zeros', 'np.zeros', (['(n + m)'], {}), '(n + m)\n', (930, 937), True, 'import numpy as np\n'), ((936, 948), 'numpy.ones', 'np.ones', (['nnz'], {}), '(nnz)\n', (943, 948), True, 'import numpy as np\n'), ((855, 866), 'numpy.eye', 'np.eye', (['nnz'], {}), '(nnz)\n', (861, 866), True, 'import numpy as np\n'), ((872, 883), 'numpy.eye', 'np.eye', (['nnz'], {}), '(nnz)\n', (878, 883), True, 'import numpy as np\n')] |
import json
import random
import numpy as np
from corerl.core import ScheduledParameter
from corerl.function import KernelRepresentation
# ==================================================
# A POLK Kernel Q-Greedy Algorithm
class KGreedyQModel(object):
def __init__(self, stateCount, actionCount, config):
self.Q = KernelRepresentation(stateCount + actionCount, 2, config)
# Learning rates
self.eta = ScheduledParameter('LearningRate', config)
self.beta = ScheduledParameter('ExpectationRate', config)
# Regularization
self.lossL = config.getfloat('Regularization', 1e-4)
# Representation error budget
self.eps = config.getfloat('RepresentationError', 1.0)
# Reward discount
self.gamma = config.getfloat('RewardDiscount')
def get_q(self,x):
return self.Q(x)[0][0]
def get_g(self, x):
return self.Q(x)[0][1]
def bellman_error(self, s, a, r, s_):
x = np.concatenate((np.reshape(s, (1, -1)), np.reshape(a, (1, -1))), axis=1)
if s_ is None:
return r - self.get_q(x)
else:
a_ = self.Q.argmax(s_)
x_ = np.concatenate((np.reshape(s_, (1, -1)), np.reshape(a_, (1, -1))), axis=1)
return r + self.gamma * self.get_q(x_) - self.get_q(x)
def bellman_error2(self, x, r, x_):
if x_ is None:
return r - self.get_q(x)
else:
return r + self.gamma * self.get_q(x_) - self.get_q(x)
def model_error(self):
return 0.5 * self.lossL * self.Q.normsq()
@property
def metrics_names(self):
return ('Training Loss', 'Model Order')
def train(self, step, sample):
self.eta.step(step)
self.beta.step(step)
# Unpack sample and compute error
s, a, r, s_ = sample
x = np.concatenate((np.reshape(np.array(s), (1, -1)), np.reshape(np.array(a), (1, -1))), axis=1)
if s_ is None:
x_ = None
else:
a_ = self.Q.argmax(s_)
x_ = np.concatenate((np.reshape(np.array(s_), (1, -1)), np.reshape(np.array(a_), (1, -1))),axis=1)
delta = self.bellman_error2(x, r, x_)
# Gradient step
self.Q.shrink(1. - self.eta.value * self.lossL)
if s_ is None:
W = np.zeros((1, 2))
W[0,0] = self.eta.value * delta
W[0,1] = self.beta.value * (delta - self.get_g(x))
self.Q.append(x, W)
else:
W = np.zeros((2, 2))
W[0,0] = self.eta.value * delta
W[1,0] = - self.eta.value * self.gamma * self.get_g(x)
W[0,1] = self.beta.value * (delta - self.get_g(x))
self.Q.append(np.vstack((x, x_)), W)
# Prune
self.Q.prune(self.eps ** 2 * self.eta.value ** 2 / self.beta.value)
modelOrder_ = self.Q.model_order()
# Compute new error
loss = 0.5 * self.bellman_error2(x, r, x_) ** 2 + self.model_error() # TODO should we have model error here?
return (float(loss), float(modelOrder_))
# ==================================================
# An agent using Q-Learning
class KGreedyQAgent(object):
def __init__(self, env, config):
self.stateCount = env.stateCount
self.actionCount = env.actionCount
# ---- Configure exploration
self.epsilon = ScheduledParameter('ExplorationRate', config)
self.epsilon.step(0)
self.min_act = np.reshape(json.loads(config.get('MinAction')), (-1, 1))
self.max_act = np.reshape(json.loads(config.get('MaxAction')), (-1, 1))
self.act_mult = config.getfloat('ActMultiplier', 1)
# How many steps we have observed
self.steps = 0
self.lastSample = None
# Initialize model
self.model = KGreedyQModel(self.stateCount, self.actionCount, config)
def act(self, s, stochastic=True):
# "Decide what action to take in state s."
if stochastic and (random.random() < self.epsilon.value):
a = np.random.uniform(self.act_mult * self.min_act, self.act_mult * self.max_act)
else:
a = self.model.Q.argmax(s)
return np.reshape(np.clip(a, self.min_act, self.max_act),(-1,))
def observe(self, sample):
self.lastSample = sample
self.steps += 1
self.epsilon.step(self.steps)
def improve(self):
return self.model.train(self.steps, self.lastSample)
def bellman_error(self, s, a, r, s_):
return self.model.bellman_error(s, a, r, s_)
def model_error(self):
return self.model.model_error()
@property
def metrics_names(self):
return self.model.metrics_names
| [
"numpy.clip",
"corerl.core.ScheduledParameter",
"numpy.reshape",
"numpy.array",
"numpy.zeros",
"corerl.function.KernelRepresentation",
"numpy.vstack",
"numpy.random.uniform",
"random.random"
] | [((332, 389), 'corerl.function.KernelRepresentation', 'KernelRepresentation', (['(stateCount + actionCount)', '(2)', 'config'], {}), '(stateCount + actionCount, 2, config)\n', (352, 389), False, 'from corerl.function import KernelRepresentation\n'), ((435, 477), 'corerl.core.ScheduledParameter', 'ScheduledParameter', (['"""LearningRate"""', 'config'], {}), "('LearningRate', config)\n", (453, 477), False, 'from corerl.core import ScheduledParameter\n'), ((498, 543), 'corerl.core.ScheduledParameter', 'ScheduledParameter', (['"""ExpectationRate"""', 'config'], {}), "('ExpectationRate', config)\n", (516, 543), False, 'from corerl.core import ScheduledParameter\n'), ((3369, 3414), 'corerl.core.ScheduledParameter', 'ScheduledParameter', (['"""ExplorationRate"""', 'config'], {}), "('ExplorationRate', config)\n", (3387, 3414), False, 'from corerl.core import ScheduledParameter\n'), ((2316, 2332), 'numpy.zeros', 'np.zeros', (['(1, 2)'], {}), '((1, 2))\n', (2324, 2332), True, 'import numpy as np\n'), ((2502, 2518), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (2510, 2518), True, 'import numpy as np\n'), ((4040, 4117), 'numpy.random.uniform', 'np.random.uniform', (['(self.act_mult * self.min_act)', '(self.act_mult * self.max_act)'], {}), '(self.act_mult * self.min_act, self.act_mult * self.max_act)\n', (4057, 4117), True, 'import numpy as np\n'), ((4197, 4235), 'numpy.clip', 'np.clip', (['a', 'self.min_act', 'self.max_act'], {}), '(a, self.min_act, self.max_act)\n', (4204, 4235), True, 'import numpy as np\n'), ((997, 1019), 'numpy.reshape', 'np.reshape', (['s', '(1, -1)'], {}), '(s, (1, -1))\n', (1007, 1019), True, 'import numpy as np\n'), ((1021, 1043), 'numpy.reshape', 'np.reshape', (['a', '(1, -1)'], {}), '(a, (1, -1))\n', (1031, 1043), True, 'import numpy as np\n'), ((2719, 2737), 'numpy.vstack', 'np.vstack', (['(x, x_)'], {}), '((x, x_))\n', (2728, 2737), True, 'import numpy as np\n'), ((3985, 4000), 'random.random', 'random.random', ([], {}), '()\n', (3998, 4000), False, 'import random\n'), ((1196, 1219), 'numpy.reshape', 'np.reshape', (['s_', '(1, -1)'], {}), '(s_, (1, -1))\n', (1206, 1219), True, 'import numpy as np\n'), ((1221, 1244), 'numpy.reshape', 'np.reshape', (['a_', '(1, -1)'], {}), '(a_, (1, -1))\n', (1231, 1244), True, 'import numpy as np\n'), ((1878, 1889), 'numpy.array', 'np.array', (['s'], {}), '(s)\n', (1886, 1889), True, 'import numpy as np\n'), ((1912, 1923), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (1920, 1923), True, 'import numpy as np\n'), ((2082, 2094), 'numpy.array', 'np.array', (['s_'], {}), '(s_)\n', (2090, 2094), True, 'import numpy as np\n'), ((2117, 2129), 'numpy.array', 'np.array', (['a_'], {}), '(a_)\n', (2125, 2129), True, 'import numpy as np\n')] |
import numpy as _np
import numba as _numba
from ..util import rotation as _rotation
def get_kernel(Natoms, Ndet, pixelsize, detz, k, detangles=(0, 0), nodist=True, nf=False):
"""
returns a cpu implementation of the wavefield, used internally
Natoms: Number of atoms
Ndet: detector pixels
pixelsize: detector pixelsize
detz: detector distance
k: angular wavenumber
detangles: (theta,phi) angles of detector rotation around origin
returns a function with signature complex64(:,:)(out complex64(:,:), atompositionsandphases float64[:,4])
that will write the wavefield into out
"""
maxx, maxy = int(Ndet[0]), int(Ndet[1])
k = float(k)
pixelsize = float(pixelsize)
detdist = float(detz)
Natoms = int(Natoms)
if _np.any(detangles):
M = _rotation(*detangles, 0)
detrot = True
else:
M = _np.eye(3)
detrot = False
@_numba.njit(inline='always', fastmath=True)
def nfphase(atom, detx, dety, detz):
dist = _np.sqrt((detx - atom[0]) ** 2 + (dety - atom[1]) ** 2 + (detz - atom[2]) ** 2)
phase = (dist - detz + atom[2]) * k + atom[3]
if nodist:
px = _np.cos(phase)
py = _np.sin(phase)
else:
rdist = 1 / dist
px = rdist * _np.cos(phase)
py = rdist * _np.sin(phase)
return px, py
@_numba.njit(inline='always', fastmath=True)
def ffphase(atom, qx, qy, qz, rdist):
phase = -(atom[0] * qx + atom[1] * qy + atom[2] * qz) + atom[3]
if nodist:
px = _np.cos(phase)
py = _np.sin(phase)
else:
px = rdist * _np.cos(phase)
py = rdist * _np.sin(phase)
return px, py
@_numba.njit('complex128[:, ::1],float64[:, ::1]', parallel=True, fastmath={'contract', 'arcp', 'ninf', 'nnan'})
def kernel(ret, atom):
for x in _numba.prange(maxx):
for y in range(maxy):
detx = (x - maxx / 2) * pixelsize
dety = (y - maxy / 2) * pixelsize
detz = detdist
if detrot:
detx, dety, detz = M @ _np.array((detx, dety, detz))
accumx = 0.0
accumy = 0.0
cx = 0.0
cy = 0.0
if not nf:
rdist = 1 / _np.linalg.norm(_np.array([detx, dety, detz]))
qz = k * (detz * rdist - 1)
qx = k * (detx * rdist)
qy = k * (dety * rdist)
for i in range(Natoms):
if nf:
px, py = nfphase(atom[i, :], detx, dety, detz)
else:
px, py = ffphase(atom[i, :], qx, qy, qz, rdist)
# kahan summation
yx = px - cx
yy = py - cy
tx = accumx + yx
ty = accumy + yy
cx = (tx - accumx) - yx
cy = (ty - accumy) - yy
accumx = tx
accumy = ty
ret[x, y] = accumx + 1j * accumy
return kernel
def simulate(Nimg, simobject, Ndet, pixelsize, detz, k=None, settings='', verbose=False, detangles=(0, 0), *args, **kwargs):
"""
returns an array of simulated wavefields
parameters:
Nimg: number of wavefields to simulate
simobject: a simobject whose get() returns an Nx4 array with atoms in the first
and (x,y,z,phase) of each atom in the last dimension
Ndet: pixels on the detector
pixelsize: size of one pixel in same unit as simobjects unit (usually um)
detz: detector distance in same unit as simobjects unit (usually um)
k: angular wavenumber, if None use simobject.k
settings: string
if it contains 'scale', 1/r scaling is performed
if it contains 'nf', no far field approximation is made
detangles: (theta,phi) angles of detector rotation around origin
"""
if k is None:
k = simobject.k
if _np.size(Ndet) == 1:
Ndet = [Ndet, Ndet]
result = _np.empty((Nimg, Ndet[0], Ndet[1]), dtype=_np.complex128)
gen = simulate_gen(simobject, Ndet, pixelsize, detz, k, settings, detangles)
for n in range(0, Nimg):
if verbose:
print(n, end='', flush=True)
result[n] = next(gen)
if verbose:
print('. ', end='', flush=True)
return result
def simulate_gen(simobject, Ndet, pixelsize, detz, k=None, settings='', detangles=(0, 0), *args, **kwargs):
"""
returns a generator that yields simulated wavefields
parameters:
simobject: a simobject whose get() returns an Nx4 array with atoms in the first
and (x,y,z,phase) of each atom in the last dimension
Ndet: pixels on the detector
pixelsize: size of one pixel in same unit as simobjects unit (usually um)
detz: detector distance in same unit as simobjects unit (usually um)
k: angular wavenumber, if None use simobject.k
settings: string
if it contains 'scale', 1/r scaling is performed
if it contains 'nf', no far field approximation is made
detangles: (theta,phi) angles of detector rotation around origin
"""
if k is None:
k = simobject.k
nodist = 'scale' not in settings
nf = 'nf' in settings
if _np.size(Ndet) == 1:
Ndet = [Ndet, Ndet]
result = _np.empty((Ndet[0], Ndet[1]), dtype=_np.complex128)
f = get_kernel(simobject.N, Ndet, pixelsize, detz, k, nodist=nodist, nf=nf, detangles=detangles)
while True:
atoms = simobject.get()
f(result, atoms)
yield result
| [
"numpy.eye",
"numpy.sqrt",
"numpy.size",
"numba.njit",
"numpy.any",
"numpy.array",
"numpy.empty",
"numpy.cos",
"numpy.sin",
"numba.prange"
] | [((781, 799), 'numpy.any', '_np.any', (['detangles'], {}), '(detangles)\n', (788, 799), True, 'import numpy as _np\n'), ((922, 965), 'numba.njit', '_numba.njit', ([], {'inline': '"""always"""', 'fastmath': '(True)'}), "(inline='always', fastmath=True)\n", (933, 965), True, 'import numba as _numba\n'), ((1390, 1433), 'numba.njit', '_numba.njit', ([], {'inline': '"""always"""', 'fastmath': '(True)'}), "(inline='always', fastmath=True)\n", (1401, 1433), True, 'import numba as _numba\n'), ((1753, 1869), 'numba.njit', '_numba.njit', (['"""complex128[:, ::1],float64[:, ::1]"""'], {'parallel': '(True)', 'fastmath': "{'contract', 'arcp', 'ninf', 'nnan'}"}), "('complex128[:, ::1],float64[:, ::1]', parallel=True, fastmath={\n 'contract', 'arcp', 'ninf', 'nnan'})\n", (1764, 1869), True, 'import numba as _numba\n'), ((4132, 4189), 'numpy.empty', '_np.empty', (['(Nimg, Ndet[0], Ndet[1])'], {'dtype': '_np.complex128'}), '((Nimg, Ndet[0], Ndet[1]), dtype=_np.complex128)\n', (4141, 4189), True, 'import numpy as _np\n'), ((5440, 5491), 'numpy.empty', '_np.empty', (['(Ndet[0], Ndet[1])'], {'dtype': '_np.complex128'}), '((Ndet[0], Ndet[1]), dtype=_np.complex128)\n', (5449, 5491), True, 'import numpy as _np\n'), ((882, 892), 'numpy.eye', '_np.eye', (['(3)'], {}), '(3)\n', (889, 892), True, 'import numpy as _np\n'), ((1022, 1101), 'numpy.sqrt', '_np.sqrt', (['((detx - atom[0]) ** 2 + (dety - atom[1]) ** 2 + (detz - atom[2]) ** 2)'], {}), '((detx - atom[0]) ** 2 + (dety - atom[1]) ** 2 + (detz - atom[2]) ** 2)\n', (1030, 1101), True, 'import numpy as _np\n'), ((1909, 1928), 'numba.prange', '_numba.prange', (['maxx'], {}), '(maxx)\n', (1922, 1928), True, 'import numba as _numba\n'), ((4070, 4084), 'numpy.size', '_np.size', (['Ndet'], {}), '(Ndet)\n', (4078, 4084), True, 'import numpy as _np\n'), ((5377, 5391), 'numpy.size', '_np.size', (['Ndet'], {}), '(Ndet)\n', (5385, 5391), True, 'import numpy as _np\n'), ((1192, 1206), 'numpy.cos', '_np.cos', (['phase'], {}), '(phase)\n', (1199, 1206), True, 'import numpy as _np\n'), ((1224, 1238), 'numpy.sin', '_np.sin', (['phase'], {}), '(phase)\n', (1231, 1238), True, 'import numpy as _np\n'), ((1584, 1598), 'numpy.cos', '_np.cos', (['phase'], {}), '(phase)\n', (1591, 1598), True, 'import numpy as _np\n'), ((1616, 1630), 'numpy.sin', '_np.sin', (['phase'], {}), '(phase)\n', (1623, 1630), True, 'import numpy as _np\n'), ((1307, 1321), 'numpy.cos', '_np.cos', (['phase'], {}), '(phase)\n', (1314, 1321), True, 'import numpy as _np\n'), ((1347, 1361), 'numpy.sin', '_np.sin', (['phase'], {}), '(phase)\n', (1354, 1361), True, 'import numpy as _np\n'), ((1670, 1684), 'numpy.cos', '_np.cos', (['phase'], {}), '(phase)\n', (1677, 1684), True, 'import numpy as _np\n'), ((1710, 1724), 'numpy.sin', '_np.sin', (['phase'], {}), '(phase)\n', (1717, 1724), True, 'import numpy as _np\n'), ((2165, 2194), 'numpy.array', '_np.array', (['(detx, dety, detz)'], {}), '((detx, dety, detz))\n', (2174, 2194), True, 'import numpy as _np\n'), ((2378, 2407), 'numpy.array', '_np.array', (['[detx, dety, detz]'], {}), '([detx, dety, detz])\n', (2387, 2407), True, 'import numpy as _np\n')] |
import torch
import copy
import numpy as np
from typing import Union, Tuple
from codebook import Codebook
from psf import PSF
class ISTDeco:
'''
ISTDECO - Deconvovles 1D or 2D spatial transcriptomic data
Parameters
----------
Y : float
Input image data of shape (rounds, channels, height, width)
D : float
Codebook of shape (ncodes, rounds, channels)
sigma : tuple(float,float)
Tuple of values corresponding to the standard deviation
of the gaussian shaped psf.
b : float
Background offset parameter. Can be a constant or same shape as Y.
Must be positive. Default: 1e-5
scale : float
We can deconvolve the image data to higher/lower spatial resolution.
Scale determines the scale factor. Defalut: 1.0 (no upscaling)
Example
----------
model = ISTDeco(Y, D, sigma, b=1e-5, upscale=1)
model = model.to('cuda') # If we want GPU
X, Q, loss = model.run(niter=100)
'''
def __init__(self, Y, D, sigma, b=1e-8, scale=1):
self.input_shape = Y.shape
self.sigma = sigma
self.scale = scale
m, r, c = D.shape
_,_,self.h,self.w = Y.shape
self.hx = int(np.ceil(self.h * scale)) if self.h > 1 else self.h
self.wx = int(np.ceil(self.w * scale)) if self.w > 1 else self.w
bh = int(2*np.ceil(3*sigma[0]*scale)+1) if self.h > 1 else 1
bw = int(2*np.ceil(3*sigma[1]*scale)+1) if self.w > 1 else 1
self.psf_support_scaled = (bh, bw)
bh = int(2*np.ceil(3*sigma[0])+1) if self.h > 1 else 1
bw = int(2*np.ceil(3*sigma[1])+1) if self.w > 1 else 1
self.psf_support = (bh, bw)
# Set up X
x_shape = (m, self.hx, self.wx)
self.X = torch.ones(x_shape).float()
# Set up Y
self.Y = torch.tensor(Y).float().flatten(start_dim=0, end_dim=1)
# Set up b
self.b = torch.tensor(b).float()
if self.b.ndim == 4:
self.b = self.b.flatten(start_dim=0, end_dim=1)
# Set up codebook
self.codebook = Codebook(D)
# Set up spatial blurr
self.psf = PSF((self.h,self.w),sigma,scale)
# Prepare constant
ones_channels = torch.ones((r*c, 1, 1))
ones_space = torch.ones((1, self.h, self.w))
self.denominator = self.codebook.matmul_t(ones_channels) * \
self.psf.matmul_t(ones_space)
# Compute Yhat = DXG
self.Yhat = self.__forward(self.X)
def to(self, device):
'''
Puts tensors on a device. See pytorch doc for more info.
Useful for moving tensors to cuda.
Example
----------
model = ISTDECO(Y,D,sigma)
model.to('cuda') # Put tensors on GPU
model.to('cpu') # Put tensors on CPU
Parameters
----------
device : str
The device, for instance 'cpu' or 'cuda'
'''
self.Y = self.Y.to(device)
self.Yhat = self.Yhat.to(device)
self.X = self.X.to(device)
self.denominator = self.denominator.to(device)
self.codebook = self.codebook.to(device)
self.psf = self.psf.to(device)
self.b = self.b.to(device)
return self
def __forward(self, tensor):
return self.psf.matmul(self.codebook.matmul(tensor)) + self.b
def __compute_quality(self, tensor):
# Pool intensities spatially
tensor_blurr = torch.nn.functional.avg_pool2d(tensor, \
self.psf_support_scaled,\
stride=1,\
divisor_override=1,\
padding=tuple(t//2 for t in self.psf_support_scaled))
tensor_blurr2 = self.psf.up_pool(torch.nn.functional.relu(self.Y - self.b))
# Compute quality feature
Q = tensor_blurr / self.codebook.matmul_t(tensor_blurr2)
Q[torch.isnan(Q)] = 0
return Q
def run(self, niter=100, acc=1.0, suppress_radius=1):
'''
Run the optimization
Parameters
----------
niter : int
Number of iterations
acc : float
Factor for acceleration the multiplicative updates
If too large, a convergence may be unstalbe. Usually a
value between 1.0 and 1.5 is fine. Default: 1.0
suppress_width : int
Integer indicating the radius of the non-maxima supression footprint.
Default: 1.
Outputs
---------
X : numpy array
A multi-channel image of shape (m, sy, sx) where
m is the number of barcodes, sy and sx are the scaled height and width.
The values in X corresponds to the intensity of different barcodes.
For instance, X_kij is the intensity of the barcode with code k, localized
at i and j.
Q : numpy array
A multi-channel image of shape (m, sy, sx) where
m is the number of barcodes, sy and sx are the scaled height and width.
The values in Q are useful for elimintaing false-positive detections during
pos-processing.
loss : numpy array
An (niter,) shaped array with values
'''
loss = torch.zeros((niter,))
for i in range(niter):
loss[i] = torch.sum(self.Yhat) - \
torch.sum(self.Y*torch.log(self.Yhat+1e-9))
self.X = self.X * (self.codebook.matmul_t(self.psf.matmul_t(self.Y / self.Yhat)) / self.denominator)**acc
self.Yhat = self.__forward(self.X)
Q = self.__compute_quality(self.X)
if suppress_radius is not None:
mask = self.__nonmaxsuppress(suppress_radius, self.X)
self.X = self.X * mask
Q = Q * mask
return self.X.cpu().numpy(), Q.cpu().numpy(), loss
def __nonmaxsuppress(self, radius, tensor):
padd = [radius if self.h > 1 else 0, radius]
kernel_sz = (2*radius+1 if self.h > 1 else 1, 2*radius+1)
mask = torch.nn.functional.max_pool2d(tensor, kernel_sz, stride=1, padding=padd) == tensor
#ints = torch.nn.functional.avg_pool2d(tensor, kernel_sz, stride=1, padding=padd, divisor_override=1)
return mask
| [
"numpy.ceil",
"torch.log",
"psf.PSF",
"codebook.Codebook",
"torch.tensor",
"torch.sum",
"torch.nn.functional.relu",
"torch.nn.functional.max_pool2d",
"torch.isnan",
"torch.zeros",
"torch.ones"
] | [((2113, 2124), 'codebook.Codebook', 'Codebook', (['D'], {}), '(D)\n', (2121, 2124), False, 'from codebook import Codebook\n'), ((2176, 2211), 'psf.PSF', 'PSF', (['(self.h, self.w)', 'sigma', 'scale'], {}), '((self.h, self.w), sigma, scale)\n', (2179, 2211), False, 'from psf import PSF\n'), ((2261, 2286), 'torch.ones', 'torch.ones', (['(r * c, 1, 1)'], {}), '((r * c, 1, 1))\n', (2271, 2286), False, 'import torch\n'), ((2306, 2337), 'torch.ones', 'torch.ones', (['(1, self.h, self.w)'], {}), '((1, self.h, self.w))\n', (2316, 2337), False, 'import torch\n'), ((5560, 5581), 'torch.zeros', 'torch.zeros', (['(niter,)'], {}), '((niter,))\n', (5571, 5581), False, 'import torch\n'), ((3825, 3866), 'torch.nn.functional.relu', 'torch.nn.functional.relu', (['(self.Y - self.b)'], {}), '(self.Y - self.b)\n', (3849, 3866), False, 'import torch\n'), ((3978, 3992), 'torch.isnan', 'torch.isnan', (['Q'], {}), '(Q)\n', (3989, 3992), False, 'import torch\n'), ((6337, 6410), 'torch.nn.functional.max_pool2d', 'torch.nn.functional.max_pool2d', (['tensor', 'kernel_sz'], {'stride': '(1)', 'padding': 'padd'}), '(tensor, kernel_sz, stride=1, padding=padd)\n', (6367, 6410), False, 'import torch\n'), ((1235, 1258), 'numpy.ceil', 'np.ceil', (['(self.h * scale)'], {}), '(self.h * scale)\n', (1242, 1258), True, 'import numpy as np\n'), ((1308, 1331), 'numpy.ceil', 'np.ceil', (['(self.w * scale)'], {}), '(self.w * scale)\n', (1315, 1331), True, 'import numpy as np\n'), ((1783, 1802), 'torch.ones', 'torch.ones', (['x_shape'], {}), '(x_shape)\n', (1793, 1802), False, 'import torch\n'), ((1941, 1956), 'torch.tensor', 'torch.tensor', (['b'], {}), '(b)\n', (1953, 1956), False, 'import torch\n'), ((5635, 5655), 'torch.sum', 'torch.sum', (['self.Yhat'], {}), '(self.Yhat)\n', (5644, 5655), False, 'import torch\n'), ((1379, 1408), 'numpy.ceil', 'np.ceil', (['(3 * sigma[0] * scale)'], {}), '(3 * sigma[0] * scale)\n', (1386, 1408), True, 'import numpy as np\n'), ((1449, 1478), 'numpy.ceil', 'np.ceil', (['(3 * sigma[1] * scale)'], {}), '(3 * sigma[1] * scale)\n', (1456, 1478), True, 'import numpy as np\n'), ((1562, 1583), 'numpy.ceil', 'np.ceil', (['(3 * sigma[0])'], {}), '(3 * sigma[0])\n', (1569, 1583), True, 'import numpy as np\n'), ((1626, 1647), 'numpy.ceil', 'np.ceil', (['(3 * sigma[1])'], {}), '(3 * sigma[1])\n', (1633, 1647), True, 'import numpy as np\n'), ((1848, 1863), 'torch.tensor', 'torch.tensor', (['Y'], {}), '(Y)\n', (1860, 1863), False, 'import torch\n'), ((5693, 5721), 'torch.log', 'torch.log', (['(self.Yhat + 1e-09)'], {}), '(self.Yhat + 1e-09)\n', (5702, 5721), False, 'import torch\n')] |
import itertools
import os
import sys
import gym
import numpy as np
import click
sys.path.append(os.path.dirname(sys.path[0]))
from lib import plotting
from lib.tilecoding import representation
class PolicyEstimator():
"""
Policy Function approximator.
"""
def __init__(self, env, weights):
# We create a separate model for each action in the environment's
# action space. Alternatively we could somehow encode the action
# into the features, but this way it's easier to code up.
self.action_length = 1
state_range = [
np.array(env.observation_space.low),
np.array(env.observation_space.high)]
self.NTiling = 25
self.featurizer = representation.TileCoding(
input_indices = [np.arange(env.observation_space.shape[0])], # Tiling in each dimension
ntiles = [20], # Number of tiles per dimension
ntilings = [self.NTiling], # How many layers of tiles
hashing = None,
state_range = state_range)
# After choice of feature type
self.feature_length = self.featurizer._TileCoding__size
self.weights = (env.action_space.high/self.NTiling) * np.random.normal(size=[self.feature_length, self.action_length])
def featurize_state(self, state):
"""
Returns the featurized representation for a state.
"""
## polynomial features function requires 2D matrix
# features = self.poly.fit_transform(state.reshape(1, -1))
features = np.zeros((self.feature_length,1))
features_index = self.featurizer(state)[0:-1]
features[features_index] = 1
return features, features_index
def predict(self, state):
"""
Makes value function predictions.
Args:
s: state to make a prediction for
a: (Optional) action to make a prediction for
Returns
If an action a is given this returns a single number as the prediction.
If no action is given this returns a vector or predictions for all actions
in the environment where pred[i] is the prediction for action i.
"""
#1: Featurise states & action
features, _ = self.featurize_state(state)
#2: Calculate action-value
mean = np.matmul(features.T, self.weights).flatten()
return mean
def update(self, eligibility, alpha, action_score, update_prev, momentum):
"""
Updates the estimator parameters for a given state and action towards
the target y.
"""
update = np.array(alpha * eligibility * action_score).reshape(-1,1) + momentum * update_prev
self.weights += update
return update
class ValueEstimator():
"""
Value Function approximator.
"""
def __init__(self, env, weights):
# We create a separate model for each action in the environment's
# action space. Alternatively we could somehow encode the action
# into the features, but this way it's easier to code up.
state_range = [
np.array(env.observation_space.low),
np.array(env.observation_space.high)]
self.NTiling = 25
self.featurizer = representation.TileCoding(
input_indices = [np.arange(env.observation_space.shape[0])], # Tiling in each dimension
ntiles = [20], # Number of tiles per dimension
ntilings = [self.NTiling], # How many layers of tiles
hashing = None,
state_range = state_range)
# After choice of feature type
self.feature_length = self.featurizer._TileCoding__size
self.weights = np.random.rand(self.feature_length)
def featurize_state(self, state):
"""
Returns the featurized representation for a state.
"""
## polynomial features function requires 2D matrix
# features = self.poly.fit_transform(state.reshape(1, -1))
features = np.zeros((self.feature_length,1))
features_index = self.featurizer(state)[0:-1]
features[features_index] = 1
return features, features_index
def predict(self, state):
"""
Makes value function predictions.
Args:
s: state to make a prediction for
a: (Optional) action to make a prediction for
Returns
If an action a is given this returns a single number as the prediction.
If no action is given this returns a vector or predictions for all actions
in the environment where pred[i] is the prediction for action i.
"""
#1: Featurise states & action
features, _ = self.featurize_state(state)
#2: Calculate action-value
value = np.matmul(features.T, self.weights).flatten()
#3: Select return
return value
def update(self, eligibility, td_error, alpha, update_prev, momentum):
"""
Updates the estimator parameters for a given state and action towards
the target y.
"""
update = alpha * td_error * eligibility + momentum * update_prev
self.weights += update
return update
def ActorCritic(env, policy_estimator, value_estimator, num_episodes, display=True,
epsilon_decay=0.99, epsilon_initial=0.25, visual_updates=10, policy_std=0.1,
value_alpha=1e-3, value_discount_factor=0.9, value_llambda=0.8, value_momentum=0.9,
policy_alpha=1e-3, policy_discount_factor=0.9, policy_llambda=0, policy_momentum=0):
"""
Q-Learning algorithm for fff-policy TD control using Function Approximation.
Finds the optimal greedy policy while following an epsilon-greedy policy.
Args:
env: OpenAI environment.
estimator: Action-Value function estimator
num_episodes: Number of episodes to run for.
discount_factor: Lambda time discount factor.
epsilon: Chance the sample a random action. Float betwen 0 and 1.
epsilon_decay: Each episode, epsilon is decayed by this factor
Returns:
An EpisodeStats object with two numpy arrays for episode_lengths and episode_rewards.
"""
# Keeps track of useful statistics
stats = plotting.EpisodeStats(
episode_lengths=np.zeros(num_episodes),
episode_rewards=np.zeros(num_episodes))
with click.progressbar(range(num_episodes)) as bar:
for episode in bar:
observation = env.reset()
counter = 0
episode_tracker = []
value_eligibility = np.zeros(value_estimator.feature_length)
policy_eligibility = np.zeros(policy_estimator.feature_length)
value_update = 0
policy_update = 0
while True:
if (episode%visual_updates == 0) & display:
env.render()
counter += 1
episode_tracker.append(observation)
policy_feature, _ = policy_estimator.featurize_state(observation)
mean_action = policy_estimator.predict(observation)
action = np.random.normal(loc=mean_action, scale=policy_std)
policy_score = ((action-mean_action) * policy_feature / (policy_std**2)).flatten() # See DSilver notes
observation_new, reward, done, _ = env.step(action)
# Value TD error
value = value_estimator.predict(observation)
value_feature, _ = value_estimator.featurize_state(observation)
value_new = value_estimator.predict(observation_new)
value_td_target = reward + value_discount_factor * value_new
value_td_error = value_td_target - value
# Eligibility decay
value_eligibility = value_discount_factor * value_llambda * value_eligibility
value_eligibility += value_feature.T.flatten()/value_estimator.NTiling
policy_eligibility = policy_discount_factor * policy_llambda * policy_eligibility
policy_eligibility += policy_score.flatten()/policy_estimator.NTiling
# Update weights
value_update = value_estimator.update(
value_eligibility, value_td_error, value_alpha, value_update, value_momentum)
policy_update = policy_estimator.update(
policy_eligibility, policy_alpha, policy_score, policy_update, policy_momentum)
# Update statistics
stats.episode_rewards[episode] += reward
# Loop back parameters
observation = observation_new
if done:
stats.episode_lengths[episode] = counter
print(stats.episode_rewards[episode])
break
return stats, value_estimator, policy_estimator | [
"numpy.random.normal",
"numpy.random.rand",
"numpy.array",
"os.path.dirname",
"numpy.zeros",
"numpy.matmul",
"numpy.arange"
] | [((99, 127), 'os.path.dirname', 'os.path.dirname', (['sys.path[0]'], {}), '(sys.path[0])\n', (114, 127), False, 'import os\n'), ((1567, 1601), 'numpy.zeros', 'np.zeros', (['(self.feature_length, 1)'], {}), '((self.feature_length, 1))\n', (1575, 1601), True, 'import numpy as np\n'), ((3772, 3807), 'numpy.random.rand', 'np.random.rand', (['self.feature_length'], {}), '(self.feature_length)\n', (3786, 3807), True, 'import numpy as np\n'), ((4075, 4109), 'numpy.zeros', 'np.zeros', (['(self.feature_length, 1)'], {}), '((self.feature_length, 1))\n', (4083, 4109), True, 'import numpy as np\n'), ((600, 635), 'numpy.array', 'np.array', (['env.observation_space.low'], {}), '(env.observation_space.low)\n', (608, 635), True, 'import numpy as np\n'), ((650, 686), 'numpy.array', 'np.array', (['env.observation_space.high'], {}), '(env.observation_space.high)\n', (658, 686), True, 'import numpy as np\n'), ((1235, 1299), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[self.feature_length, self.action_length]'}), '(size=[self.feature_length, self.action_length])\n', (1251, 1299), True, 'import numpy as np\n'), ((3176, 3211), 'numpy.array', 'np.array', (['env.observation_space.low'], {}), '(env.observation_space.low)\n', (3184, 3211), True, 'import numpy as np\n'), ((3226, 3262), 'numpy.array', 'np.array', (['env.observation_space.high'], {}), '(env.observation_space.high)\n', (3234, 3262), True, 'import numpy as np\n'), ((6385, 6407), 'numpy.zeros', 'np.zeros', (['num_episodes'], {}), '(num_episodes)\n', (6393, 6407), True, 'import numpy as np\n'), ((6433, 6455), 'numpy.zeros', 'np.zeros', (['num_episodes'], {}), '(num_episodes)\n', (6441, 6455), True, 'import numpy as np\n'), ((6675, 6715), 'numpy.zeros', 'np.zeros', (['value_estimator.feature_length'], {}), '(value_estimator.feature_length)\n', (6683, 6715), True, 'import numpy as np\n'), ((6749, 6790), 'numpy.zeros', 'np.zeros', (['policy_estimator.feature_length'], {}), '(policy_estimator.feature_length)\n', (6757, 6790), True, 'import numpy as np\n'), ((2362, 2397), 'numpy.matmul', 'np.matmul', (['features.T', 'self.weights'], {}), '(features.T, self.weights)\n', (2371, 2397), True, 'import numpy as np\n'), ((4871, 4906), 'numpy.matmul', 'np.matmul', (['features.T', 'self.weights'], {}), '(features.T, self.weights)\n', (4880, 4906), True, 'import numpy as np\n'), ((7227, 7278), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'mean_action', 'scale': 'policy_std'}), '(loc=mean_action, scale=policy_std)\n', (7243, 7278), True, 'import numpy as np\n'), ((805, 846), 'numpy.arange', 'np.arange', (['env.observation_space.shape[0]'], {}), '(env.observation_space.shape[0])\n', (814, 846), True, 'import numpy as np\n'), ((2668, 2712), 'numpy.array', 'np.array', (['(alpha * eligibility * action_score)'], {}), '(alpha * eligibility * action_score)\n', (2676, 2712), True, 'import numpy as np\n'), ((3381, 3422), 'numpy.arange', 'np.arange', (['env.observation_space.shape[0]'], {}), '(env.observation_space.shape[0])\n', (3390, 3422), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import pandas as pd
import numpy as np
from time import time
import os
import ipdb
import sys
sys.path.append('../utils')
sys.path.append('./pharma')
import preproc_utils
from pharmarec_processing import pivot_pharma_table
def LoadRawHDF5Values(tbl_name,
patient_id,
variable_ids=None,
no_duplicates=True,
verbose=False):
"""
Load data of selected patients from the HDF file of a table.
Parameters:
tbl_name: the name of the table (string)
patient_id: the selected patient ID
verbose: bool; while true, print the information of the returned dataframe
Returns:
df: a dataframe consisting the selected data
"""
input_path = os.path.join(datapath, '1a_hdf5_clean', version, 'duplicates_removed', tbl_name)
filename = [f for f in os.listdir(input_path) if '_%d_'%pid_chunkfile_index.loc[patient_id].ChunkfileIndex in f][0]
df = pd.read_hdf(os.path.join(input_path, filename), where='PatientID = %d'%patient_id, mode='r')
# If variables of interest are specified, then only select columns of interest and discard
# the rest
if variable_ids is not None:
if tbl_name == 'pharmarec':
variables_intersect = set(variable_ids) & set(df['PharmaID'].tolist())
else:
variables_intersect = set(variable_ids) & set(df['VariableID'].tolist())
if len(variables_intersect) == 0:
df = df.loc[[]]
else:
if tbl_name == 'pharmarec':
df = df[df['PharmaID'].isin(variables_intersect)]
else:
df = df[df['VariableID'].isin(variables_intersect)]
# Rename columns to the same name
df = df.rename(columns={'PharmaID': 'VariableID', 'GivenDose': 'Value'})
if tbl_name == 'pharmarec':
df = df[['PatientID', 'Datetime', 'VariableID', 'InfusionID', 'Rate', 'Value', 'Status']]
else:
df = df[['PatientID', 'Datetime', 'VariableID', 'Value']]
if verbose:
if len(df) > 0:
print('patient ', patient_id,
'; # records ', len(df),
'; # variables ', len(df.VariableID.unique()))
else:
print('No data of interest was found for patient ', patient_id)
return df
def remove_invalid_chars(variable_name):
"""
Remove chars that should not appear in the column name of a dataframe.
"""
for x in [' ', '-', '(', ')', ',', '/', ':']:
if x in variable_name:
variable_name = variable_name.replace(x,'_')
for x in ['.', '*', '+']:
if x in variable_name:
variable_name = variable_name.replace(x, '')
return variable_name
def table2matrix(tbl_name, df_tbl, variables_of_interest):
"""
Export data from table to a feature matrix, where each column represents a variable.
Parameter:
df_tbl: the dataframe containing these columns: Datetime, PatientID, VariableID and Value
variables_of_interest: a table of the mapping between their variable IDs and variable
names of variables that we are interested in.
Returns:
df_mat: the feature matrix, whose columns are associated with variables.
"""
# If we choose to use the original name of the variables instead of IDs as the column names,
# we need to remove some of the invalid chars from the variable names.
voi_tmp = variables_of_interest.copy()
voi_tmp.VariableName.apply(lambda x: remove_invalid_chars(x))
df_tbl = df_tbl.join(voi_tmp, on='VariableID', how='inner')
if tbl_name == 'pharmarec':
df_mat = pivot_pharma_table(df_tbl, switch='steph')
else:
df_tbl.drop('VariableID', axis=1, inplace=True)
df_mat = pd.pivot_table(df_tbl, values='Value', index=['PatientID', 'Datetime'],
columns=['VariableName'])
# Add the variables that are among the variables of interest but were not measured for the patients
# in df_tbl.
missing_columns = set(voi_tmp.VariableName.tolist()) - set(df_mat.columns)
for col in missing_columns:
if tbl_name == 'pharmarec':
df_mat[col] = 0
else:
df_mat[col] = np.nan
df_mat.reset_index(inplace=True)
return df_mat
def main(tbl_name, index_chunk, output_to_disk=True):
pid_list = preproc_utils.get_filtered_patient_ids()
output_path = os.path.join(datapath, '2_pivoted', version, tbl_name)
if output_to_disk and not os.path.exists(output_path):
os.makedirs(output_path)
pid_list = np.array(pid_chunkfile_index.index[pid_chunkfile_index.ChunkfileIndex==index_chunk])
output_path = os.path.join(output_path, '%s__%d__%d--%d.h5'%(tbl_name, index_chunk, np.min(pid_list), np.max(pid_list)))
if output_to_disk and os.path.exists(output_path):
print('Already os.path.exists: %s.'%output_path)
print('Please delete it manually if you want to reproduce a new file.')
return -1
# replace_name = False means the variableNames are vIDs (or pIDs for the pharma table)
variables_of_interest = preproc_utils.voi_id_name_mapping(tbl_name, replace_name=False, version=version)
vid_list = variables_of_interest.index.tolist()
df_idx_start = 0
num_pid = len(pid_list)
for i in range(num_pid):
t_total = 0
t = time()
df_tbl = LoadRawHDF5Values(tbl_name, pid_list[i], variable_ids=vid_list, verbose=True)
t_read = time() - t
print('Read time', t_read, 'secs')
t_total += t_read
if len(df_tbl) > 0:
t = time()
df_mat = table2matrix(tbl_name, df_tbl, variables_of_interest)
t_pivot = time() - t
print('Table-to-matrix transform time', t_pivot, 'secs')
t_total += t_pivot
if tbl_name != 'pharmarec':
try:
assert(len(df_mat)==len(df_tbl.drop_duplicates(['Datetime'])))
except AssertionError:
print('Timestamp number mismatches between the feature matrix and the table.')
pass
# import ipdb
# ipdb.set_trace()
try:
assert(len(df_tbl.VariableID.unique())==np.sum(np.sum(pd.notnull(df_mat.iloc[:,2:]), axis=0)>0))
except AssertionError:
print('Variable number mismatches between the feature matrix and the table')
ipdb.set_trace()
sum_tbl_value = np.sum(df_tbl.Value)
if tbl_name == 'pharmarec':
sum_mat_value = 0
non_zero_drugs = df_mat.dropna(axis=1, how='all').columns[2:]
for col in non_zero_drugs:
df_drug = df_mat.loc[:, ['Datetime', col]]
df_drug.set_index('Datetime', inplace=True)
df_drug.sort_index(inplace=True)
df_drug.dropna(inplace=True)
if df_drug.shape[0] < 2:
if df_drug.values.sum() == 0:
# carry on
continue
else:
print('instantaneous drug?')
ipdb.set_trace()
# this seems to be converting back from rate into given doses to compare that the contents of the dataframe are unchanged (in aggregate, at least)
# time_difference calculates a time difference in hours, convert to minutes
# TODO somethign is a bit weird here, but ignore it for now
tdiff = ((df_drug.index[1:] - df_drug.index[:-1]).astype('timedelta64[s]').values)/60
total_dose = np.dot(df_drug.values[:-1].reshape(-1, ), tdiff)
sum_mat_value += total_dose
else:
sum_mat_value = np.nansum(df_mat.iloc[:,2:])
try:
# NOTE: due to rescaling, this should not be true for many pharma records
assert(np.abs(sum_tbl_value - sum_mat_value) < 1e-4)
except AssertionError:
# If the big difference was due to the large absolute values then we look at the the relative difference
if sum_tbl_value != 0 :
try:
assert(np.abs(sum_tbl_value - sum_mat_value) / sum_tbl_value < 1e-4)
except AssertionError:
print('The sum of values in the feature matrix does not match with the sum in the table.')
print('\t\t table:', sum_tbl_value)
print('\t\t matrix:', sum_mat_value)
if tbl_name == 'pharmarec':
print('... but this is expected behaviour due to drug rescaling')
else:
ipdb.set_trace()
df_mat.set_index(np.arange(df_idx_start, df_idx_start+len(df_mat)), inplace=True)
df_idx_start += len(df_mat)
for col in df_mat.columns[2:]:
df_mat[col] = df_mat[col].astype(float)
if output_to_disk:
t = time()
df_mat.to_hdf(output_path, 'pivoted', append=True, complevel=5,
complib='blosc:lz4', data_columns=['PatientID'], format='table')
t_write = time() - t
print('Write time', t_write, 'secs')
t_total += t_write
print('Patient %d / %d finished; Total runtime = %g sec'%(i, num_pid, t_total))
return 1
if __name__=='__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-tbl_name')
parser.add_argument('-version')
parser.add_argument('--index_chunk', type=int, default=None)
parser.add_argument('--output_to_disk', action='store_true')
args = parser.parse_args()
tbl_name = args.tbl_name
version = args.version
index_chunk = args.index_chunk
output_to_disk = args.output_to_disk
datapath = preproc_utils.get_datapath()
pid_chunkfile_index = preproc_utils.get_chunking_info(version=version)
main(tbl_name, index_chunk, output_to_disk)
| [
"preproc_utils.get_chunking_info",
"numpy.array",
"pandas.notnull",
"sys.path.append",
"preproc_utils.voi_id_name_mapping",
"os.path.exists",
"pandas.pivot_table",
"os.listdir",
"preproc_utils.get_datapath",
"argparse.ArgumentParser",
"numpy.max",
"numpy.min",
"numpy.abs",
"ipdb.set_trace"... | [((117, 144), 'sys.path.append', 'sys.path.append', (['"""../utils"""'], {}), "('../utils')\n", (132, 144), False, 'import sys\n'), ((145, 172), 'sys.path.append', 'sys.path.append', (['"""./pharma"""'], {}), "('./pharma')\n", (160, 172), False, 'import sys\n'), ((784, 869), 'os.path.join', 'os.path.join', (['datapath', '"""1a_hdf5_clean"""', 'version', '"""duplicates_removed"""', 'tbl_name'], {}), "(datapath, '1a_hdf5_clean', version, 'duplicates_removed', tbl_name\n )\n", (796, 869), False, 'import os\n'), ((4469, 4509), 'preproc_utils.get_filtered_patient_ids', 'preproc_utils.get_filtered_patient_ids', ([], {}), '()\n', (4507, 4509), False, 'import preproc_utils\n'), ((4533, 4587), 'os.path.join', 'os.path.join', (['datapath', '"""2_pivoted"""', 'version', 'tbl_name'], {}), "(datapath, '2_pivoted', version, tbl_name)\n", (4545, 4587), False, 'import os\n'), ((4695, 4785), 'numpy.array', 'np.array', (['pid_chunkfile_index.index[pid_chunkfile_index.ChunkfileIndex == index_chunk]'], {}), '(pid_chunkfile_index.index[pid_chunkfile_index.ChunkfileIndex ==\n index_chunk])\n', (4703, 4785), True, 'import numpy as np\n'), ((5236, 5321), 'preproc_utils.voi_id_name_mapping', 'preproc_utils.voi_id_name_mapping', (['tbl_name'], {'replace_name': '(False)', 'version': 'version'}), '(tbl_name, replace_name=False, version=version\n )\n', (5269, 5321), False, 'import preproc_utils\n'), ((9884, 9909), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9907, 9909), False, 'import argparse\n'), ((10304, 10332), 'preproc_utils.get_datapath', 'preproc_utils.get_datapath', ([], {}), '()\n', (10330, 10332), False, 'import preproc_utils\n'), ((10359, 10407), 'preproc_utils.get_chunking_info', 'preproc_utils.get_chunking_info', ([], {'version': 'version'}), '(version=version)\n', (10390, 10407), False, 'import preproc_utils\n'), ((1006, 1040), 'os.path.join', 'os.path.join', (['input_path', 'filename'], {}), '(input_path, filename)\n', (1018, 1040), False, 'import os\n'), ((3727, 3769), 'pharmarec_processing.pivot_pharma_table', 'pivot_pharma_table', (['df_tbl'], {'switch': '"""steph"""'}), "(df_tbl, switch='steph')\n", (3745, 3769), False, 'from pharmarec_processing import pivot_pharma_table\n'), ((3853, 3954), 'pandas.pivot_table', 'pd.pivot_table', (['df_tbl'], {'values': '"""Value"""', 'index': "['PatientID', 'Datetime']", 'columns': "['VariableName']"}), "(df_tbl, values='Value', index=['PatientID', 'Datetime'],\n columns=['VariableName'])\n", (3867, 3954), True, 'import pandas as pd\n'), ((4655, 4679), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (4666, 4679), False, 'import os\n'), ((4932, 4959), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (4946, 4959), False, 'import os\n'), ((5493, 5499), 'time.time', 'time', ([], {}), '()\n', (5497, 5499), False, 'from time import time\n'), ((4618, 4645), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (4632, 4645), False, 'import os\n'), ((5612, 5618), 'time.time', 'time', ([], {}), '()\n', (5616, 5618), False, 'from time import time\n'), ((5737, 5743), 'time.time', 'time', ([], {}), '()\n', (5741, 5743), False, 'from time import time\n'), ((6675, 6695), 'numpy.sum', 'np.sum', (['df_tbl.Value'], {}), '(df_tbl.Value)\n', (6681, 6695), True, 'import numpy as np\n'), ((892, 914), 'os.listdir', 'os.listdir', (['input_path'], {}), '(input_path)\n', (902, 914), False, 'import os\n'), ((4868, 4884), 'numpy.min', 'np.min', (['pid_list'], {}), '(pid_list)\n', (4874, 4884), True, 'import numpy as np\n'), ((4886, 4902), 'numpy.max', 'np.max', (['pid_list'], {}), '(pid_list)\n', (4892, 4902), True, 'import numpy as np\n'), ((5841, 5847), 'time.time', 'time', ([], {}), '()\n', (5845, 5847), False, 'from time import time\n'), ((8077, 8106), 'numpy.nansum', 'np.nansum', (['df_mat.iloc[:, 2:]'], {}), '(df_mat.iloc[:, 2:])\n', (8086, 8106), True, 'import numpy as np\n'), ((9399, 9405), 'time.time', 'time', ([], {}), '()\n', (9403, 9405), False, 'from time import time\n'), ((8237, 8274), 'numpy.abs', 'np.abs', (['(sum_tbl_value - sum_mat_value)'], {}), '(sum_tbl_value - sum_mat_value)\n', (8243, 8274), True, 'import numpy as np\n'), ((9608, 9614), 'time.time', 'time', ([], {}), '()\n', (9612, 9614), False, 'from time import time\n'), ((6628, 6644), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (6642, 6644), False, 'import ipdb\n'), ((7410, 7426), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (7424, 7426), False, 'import ipdb\n'), ((6429, 6459), 'pandas.notnull', 'pd.notnull', (['df_mat.iloc[:, 2:]'], {}), '(df_mat.iloc[:, 2:])\n', (6439, 6459), True, 'import pandas as pd\n'), ((8536, 8573), 'numpy.abs', 'np.abs', (['(sum_tbl_value - sum_mat_value)'], {}), '(sum_tbl_value - sum_mat_value)\n', (8542, 8573), True, 'import numpy as np\n'), ((9081, 9097), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (9095, 9097), False, 'import ipdb\n')] |
import numpy as np
def mean(matrix):
return np.mean(matrix, axis=1)
| [
"numpy.mean"
] | [((49, 72), 'numpy.mean', 'np.mean', (['matrix'], {'axis': '(1)'}), '(matrix, axis=1)\n', (56, 72), True, 'import numpy as np\n')] |
import yaml
import itertools
import numpy as np
from montepython.likelihood_class import Likelihood
import pyccl as ccl
class LotssCross(Likelihood):
# initialization routine
def __init__(self, path, data, command_line):
Likelihood.__init__(self, path, data, command_line)
def find_cls(data, prefix, tr1):
for p in itertools.permutations(tr1):
try:
return data[prefix+''.join(p)]
except KeyError:
pass
raise KeyError('Cls do not exist')
def find_cov(data, tr1, tr2):
for p_ref in itertools.permutations([tr1, tr2]):
for p1 in itertools.permutations(p_ref[0]):
for p2 in itertools.permutations(p_ref[1]):
case = [x for y in [p1, p2] for x in y]
try:
return data['cov_'+''.join(case)]
except KeyError:
pass
raise KeyError('Covariance matrix does not exist')
# Read ell cuts
try:
with open(data.arguments['maps']) as f:
self.maps = yaml.safe_load(f)
except KeyError:
self.maps = self.def_maps.copy()
# Read data
try:
data_all = np.load(data.arguments['cl_cov_nz'])
except KeyError:
data_all = np.load(data.arguments['def_cl_cov_nz'])
ells = data_all['l_eff']
# Load Cl's
self.cl_data = np.array([])
used_tracers = np.array([])
for dv in self.maps['data_vectors']:
# Type of tracers
tp1 = next(x['type'] for x in self.maps['maps']
if x['name'] == dv['tracers'][0])
tp2 = next(x['type'] for x in self.maps['maps']
if x['name'] == dv['tracers'][1])
dv['types'] = [tp1, tp2]
# Find ell-cuts
ell_cuts_1 = next(x['ell_cuts'] for x in self.maps['maps']
if x['name'] == dv['tracers'][0])
ell_cuts_2 = next(x['ell_cuts'] for x in self.maps['maps']
if x['name'] == dv['tracers'][1])
dv['ell_cuts'] = [max(ell_cuts_1[0], ell_cuts_2[0]),
min(ell_cuts_1[1], ell_cuts_2[1])]
# Get ells and cls
dv['mask'] = (ells >= dv['ell_cuts'][0]) & (
ells <= dv['ell_cuts'][1])
dv['ells'] = ells[dv['mask']]
cl_data = find_cls(data_all, 'cl_', dv['types'])
cl_noise = find_cls(data_all, 'nl_', dv['types'])
cl_clean = cl_data[dv['mask']] - cl_noise[dv['mask']]
self.cl_data = np.append(self.cl_data, cl_clean)
used_tracers = np.append(used_tracers, dv['tracers'])
used_tracers = np.unique(used_tracers)
# Remove unused tracers
for ntr, tr in enumerate(self.maps['maps']):
if tr['name'] not in used_tracers:
self.maps['maps'].pop(ntr)
# Load dndz
self.z_g = data_all['z_g']
self.nz_g = data_all['nz_g']
# Load Covmat
self.cov = np.zeros((len(self.cl_data), len(self.cl_data)))
for ndv1, dv1 in enumerate(self.maps['data_vectors']):
s1 = int(np.array([self.maps['data_vectors'][x]['mask'] for x in
range(ndv1)]).sum())
e1 = s1+dv1['mask'].sum()
for ndv2, dv2 in enumerate(self.maps['data_vectors']):
s2 = int(np.array([self.maps['data_vectors'][x]['mask']
for x in range(ndv2)]).sum())
e2 = s2+dv2['mask'].sum()
cov = find_cov(data_all, dv1['types'], dv2['types'])
cov = cov[dv1['mask'], :][:, dv2['mask']]
self.cov[s1:e1, s2:e2] = cov
self.cov[s2:e2, s1:e1] = cov.T
# Invert covariance matrix
self.icov = np.linalg.inv(self.cov)
# end of initialization
# compute likelihood
def loglkl(self, cosmo, data):
# Get Tracers
for tr in self.maps['maps']:
if tr['type'] == 'g':
# Bias
bz_g = data.mcmc_parameters['bias_g']['current']
bz_g *= data.mcmc_parameters['bias_g']['scale']
if data.arguments['bias_type'] == 'one_over_growth':
bz_g /= cosmo.get_growth_factor(1./(1.+self.z_g))
elif data.arguments['bias_type'] == 'constant':
bz_g *= np.ones_like(self.z_g)
else:
raise ValueError('Bias type not recognized!')
# Get tracer
tr['tracer'] = ccl.NumberCountsTracer(cosmo.cosmo_ccl, False,
(self.z_g, self.nz_g),
(self.z_g, bz_g))
elif tr['type'] == 'k':
tr['tracer'] = ccl.CMBLensingTracer(cosmo.cosmo_ccl,
z_source=1100)
else:
raise ValueError('Type of tracer can be g, or k!')
# Get theory Cls
cl_theory = np.array([])
for dv in self.maps['data_vectors']:
tracer1 = next(x['tracer'] for x in self.maps['maps']
if x['name'] == dv['tracers'][0])
tracer2 = next(x['tracer'] for x in self.maps['maps']
if x['name'] == dv['tracers'][1])
cls = ccl.angular_cl(cosmo.cosmo_ccl, tracer1, tracer2, dv['ells'])
cl_theory = np.append(cl_theory, cls)
# Get chi2
chi2 = (self.cl_data-cl_theory).dot(self.icov)
chi2 = chi2.dot(self.cl_data-cl_theory)
# Contours:
# sigma8 vs bias: 2 scenarios (bias constant, bias = c/delta)
# only auto-g and auto-g + cross-gl
lkl = - 0.5 * chi2
return lkl
| [
"numpy.ones_like",
"numpy.unique",
"montepython.likelihood_class.Likelihood.__init__",
"pyccl.CMBLensingTracer",
"pyccl.NumberCountsTracer",
"numpy.append",
"numpy.array",
"yaml.safe_load",
"numpy.linalg.inv",
"itertools.permutations",
"numpy.load",
"pyccl.angular_cl"
] | [((242, 293), 'montepython.likelihood_class.Likelihood.__init__', 'Likelihood.__init__', (['self', 'path', 'data', 'command_line'], {}), '(self, path, data, command_line)\n', (261, 293), False, 'from montepython.likelihood_class import Likelihood\n'), ((1545, 1557), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1553, 1557), True, 'import numpy as np\n'), ((1581, 1593), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1589, 1593), True, 'import numpy as np\n'), ((2880, 2903), 'numpy.unique', 'np.unique', (['used_tracers'], {}), '(used_tracers)\n', (2889, 2903), True, 'import numpy as np\n'), ((4014, 4037), 'numpy.linalg.inv', 'np.linalg.inv', (['self.cov'], {}), '(self.cov)\n', (4027, 4037), True, 'import numpy as np\n'), ((5280, 5292), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5288, 5292), True, 'import numpy as np\n'), ((357, 384), 'itertools.permutations', 'itertools.permutations', (['tr1'], {}), '(tr1)\n', (379, 384), False, 'import itertools\n'), ((627, 661), 'itertools.permutations', 'itertools.permutations', (['[tr1, tr2]'], {}), '([tr1, tr2])\n', (649, 661), False, 'import itertools\n'), ((1342, 1378), 'numpy.load', 'np.load', (["data.arguments['cl_cov_nz']"], {}), "(data.arguments['cl_cov_nz'])\n", (1349, 1378), True, 'import numpy as np\n'), ((2757, 2790), 'numpy.append', 'np.append', (['self.cl_data', 'cl_clean'], {}), '(self.cl_data, cl_clean)\n', (2766, 2790), True, 'import numpy as np\n'), ((2818, 2856), 'numpy.append', 'np.append', (['used_tracers', "dv['tracers']"], {}), "(used_tracers, dv['tracers'])\n", (2827, 2856), True, 'import numpy as np\n'), ((5610, 5671), 'pyccl.angular_cl', 'ccl.angular_cl', (['cosmo.cosmo_ccl', 'tracer1', 'tracer2', "dv['ells']"], {}), "(cosmo.cosmo_ccl, tracer1, tracer2, dv['ells'])\n", (5624, 5671), True, 'import pyccl as ccl\n'), ((5696, 5721), 'numpy.append', 'np.append', (['cl_theory', 'cls'], {}), '(cl_theory, cls)\n', (5705, 5721), True, 'import numpy as np\n'), ((689, 721), 'itertools.permutations', 'itertools.permutations', (['p_ref[0]'], {}), '(p_ref[0])\n', (711, 721), False, 'import itertools\n'), ((1197, 1214), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (1211, 1214), False, 'import yaml\n'), ((1427, 1467), 'numpy.load', 'np.load', (["data.arguments['def_cl_cov_nz']"], {}), "(data.arguments['def_cl_cov_nz'])\n", (1434, 1467), True, 'import numpy as np\n'), ((4781, 4873), 'pyccl.NumberCountsTracer', 'ccl.NumberCountsTracer', (['cosmo.cosmo_ccl', '(False)', '(self.z_g, self.nz_g)', '(self.z_g, bz_g)'], {}), '(cosmo.cosmo_ccl, False, (self.z_g, self.nz_g), (self\n .z_g, bz_g))\n', (4803, 4873), True, 'import pyccl as ccl\n'), ((753, 785), 'itertools.permutations', 'itertools.permutations', (['p_ref[1]'], {}), '(p_ref[1])\n', (775, 785), False, 'import itertools\n'), ((5044, 5096), 'pyccl.CMBLensingTracer', 'ccl.CMBLensingTracer', (['cosmo.cosmo_ccl'], {'z_source': '(1100)'}), '(cosmo.cosmo_ccl, z_source=1100)\n', (5064, 5096), True, 'import pyccl as ccl\n'), ((4610, 4632), 'numpy.ones_like', 'np.ones_like', (['self.z_g'], {}), '(self.z_g)\n', (4622, 4632), True, 'import numpy as np\n')] |
import numpy as np
from typing import Tuple
from IMLearn.metalearners.adaboost import AdaBoost
from IMLearn.learners.classifiers import DecisionStump
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from IMLearn.metrics import misclassification_error
COLOR_SCALE = ["yellow", "green"]
def generate_data(n: int, noise_ratio: float) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate a dataset in R^2 of specified size
Parameters
----------
n: int
Number of samples to generate
noise_ratio: float
Ratio of labels to invert
Returns
-------
X: np.ndarray of shape (n_samples,2)
Design matrix of samples
y: np.ndarray of shape (n_samples,)
Labels of samples
"""
'''
generate samples X with shape: (num_samples, 2) and labels y with shape (num_samples).
num_samples: the number of samples to generate
noise_ratio: invert the label for this ratio of the samples
'''
X, y = np.random.rand(n, 2) * 2 - 1, np.ones(n)
y[np.sum(X ** 2, axis=1) < 0.5 ** 2] = -1
y[np.random.choice(n, int(noise_ratio * n))] *= -1
return X, y
def fit_and_evaluate_adaboost(noise, n_learners=250, train_size=5000, test_size=500):
(train_X, train_y), (test_X, test_y) = generate_data(train_size, noise), generate_data(test_size, noise)
# Question 1: Train- and test errors of AdaBoost in noiseless case
training_error, test_error, learners_arr = [], [], np.arange(1, n_learners + 1)
adaboost = AdaBoost(DecisionStump, n_learners).fit(train_X, train_y)
for T in learners_arr:
training_error.append(adaboost.partial_loss(train_X, train_y, T))
test_error.append(adaboost.partial_loss(test_X, test_y, T))
fig = go.Figure([go.Scatter(x=learners_arr, y=training_error, mode='lines', name=r'$Training-Error$'),
go.Scatter(x=learners_arr, y=test_error, mode='lines', name=r'$Test-Error$')])
fig.update_xaxes(title_text="Number of Learners")
fig.update_yaxes(title_text="Error Value")
fig.update_layout(title_text=rf"$\textbf{{Adaboost Error Per Number of Learners}}$")
fig.show()
# Question 2: Plotting decision surfaces
T = [5, 50, 100, 250]
# todo why is first boundary missing
lims = np.array([np.r_[train_X, test_X].min(axis=0), np.r_[train_X, test_X].max(axis=0)]).T + np.array([-.1, .1])
fig = make_subplots(rows=2, cols=2, subplot_titles=[rf"$\textbf{{{t}}}$" for t in T],
horizontal_spacing=0.05, vertical_spacing=.1)
test_set_data_points_scattered = go.Scatter(x=test_X[:, 0], y=test_X[:, 1], mode="markers", showlegend=False,
marker=dict(color=test_y, symbol=class_symbols[test_y.astype(int)],
colorscale=COLOR_SCALE,
line=dict(color="black", width=1)))
for i, t in enumerate(T):
fig.add_traces(
[decision_surface(lambda X: adaboost.partial_predict(X, t), lims[0], lims[1], showscale=False,colorscale=COLOR_SCALE),
test_set_data_points_scattered],
rows=(i // 2) + 1,
cols=(i % 2) + 1
)
acc = np.round(1 - misclassification_error(test_y, adaboost.partial_predict(test_X, t)), 3)
fig.layout.annotations[i].update(text=f'{t} Iterations, accuracy: {acc}')
fig.update_layout(width=800, height=1000,
title=rf"$\textbf{{AdaBoost Decision Boundary based on number of Learners: noise={noise}}}$",
margin=dict(t=100))
fig.update_xaxes(matches='x', range=[-1, 1], constrain="domain")
fig.update_yaxes(matches='y', range=[-1, 1], constrain="domain", scaleanchor="x", scaleratio=1)
fig.show()
# Question 3: Decision surface of best performing ensemble
best_ensemble_idx = np.argmin(test_error)
acc = np.round(1 - test_error[best_ensemble_idx], 3)
best_ensemble_t = best_ensemble_idx + 1
fig = go.Figure(
[decision_surface(lambda x: adaboost.partial_predict(x, best_ensemble_t), lims[0], lims[1], showscale=False, colorscale=COLOR_SCALE),
test_set_data_points_scattered])
fig.update_xaxes(matches='x', range=[-1, 1], constrain="domain")
fig.update_yaxes(matches='y', range=[-1, 1], constrain="domain", scaleanchor="x", scaleratio=1)
fig.update_layout(title_text=f"Best Ensemble Size: {best_ensemble_t}, Accuracy: {acc}")
fig.show()
# Question 4: Decision surface with weighted samples
size_factor = 50 if noise == 0 else 5
training_set_scatter = go.Scatter(
x=train_X[:, 0],
y=train_X[:, 1],
mode="markers",
showlegend=False,
marker=dict(color=(train_y == 1).astype(int),
size=size_factor * adaboost.D_ / np.max(adaboost.D_),
symbol=class_symbols[train_y.astype(int)],
colorscale=COLOR_SCALE,
line=dict(color="black", width=1))
)
fig = go.Figure([decision_surface(adaboost.predict, lims[0], lims[1], showscale=False, colorscale=COLOR_SCALE), training_set_scatter])
fig.update_xaxes(range=[-1, 1], constrain="domain")
fig.update_yaxes(range=[-1, 1], constrain="domain", scaleanchor="x", scaleratio=1)
fig.update_layout(dict1=dict(width=600,
height=600,
title=rf"$\textbf{{Adaboost Training Set sized by Last Iteration Weights, Noise:{noise}}}$"))
fig.show()
if __name__ == '__main__':
np.random.seed(0)
fit_and_evaluate_adaboost(0)
fit_and_evaluate_adaboost(noise=0.4)
| [
"plotly.subplots.make_subplots",
"numpy.ones",
"numpy.random.rand",
"numpy.arange",
"IMLearn.metalearners.adaboost.AdaBoost",
"numpy.max",
"numpy.array",
"numpy.sum",
"plotly.graph_objects.Scatter",
"numpy.random.seed",
"numpy.argmin",
"numpy.round"
] | [((2422, 2552), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(2)', 'cols': '(2)', 'subplot_titles': "[f'$\\\\textbf{{{t}}}$' for t in T]", 'horizontal_spacing': '(0.05)', 'vertical_spacing': '(0.1)'}), "(rows=2, cols=2, subplot_titles=[f'$\\\\textbf{{{t}}}$' for t in\n T], horizontal_spacing=0.05, vertical_spacing=0.1)\n", (2435, 2552), False, 'from plotly.subplots import make_subplots\n'), ((3941, 3962), 'numpy.argmin', 'np.argmin', (['test_error'], {}), '(test_error)\n', (3950, 3962), True, 'import numpy as np\n'), ((3973, 4019), 'numpy.round', 'np.round', (['(1 - test_error[best_ensemble_idx])', '(3)'], {}), '(1 - test_error[best_ensemble_idx], 3)\n', (3981, 4019), True, 'import numpy as np\n'), ((5627, 5644), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5641, 5644), True, 'import numpy as np\n'), ((1046, 1056), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (1053, 1056), True, 'import numpy as np\n'), ((1498, 1526), 'numpy.arange', 'np.arange', (['(1)', '(n_learners + 1)'], {}), '(1, n_learners + 1)\n', (1507, 1526), True, 'import numpy as np\n'), ((2392, 2413), 'numpy.array', 'np.array', (['[-0.1, 0.1]'], {}), '([-0.1, 0.1])\n', (2400, 2413), True, 'import numpy as np\n'), ((1063, 1085), 'numpy.sum', 'np.sum', (['(X ** 2)'], {'axis': '(1)'}), '(X ** 2, axis=1)\n', (1069, 1085), True, 'import numpy as np\n'), ((1542, 1577), 'IMLearn.metalearners.adaboost.AdaBoost', 'AdaBoost', (['DecisionStump', 'n_learners'], {}), '(DecisionStump, n_learners)\n', (1550, 1577), False, 'from IMLearn.metalearners.adaboost import AdaBoost\n'), ((1790, 1878), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'learners_arr', 'y': 'training_error', 'mode': '"""lines"""', 'name': '"""$Training-Error$"""'}), "(x=learners_arr, y=training_error, mode='lines', name=\n '$Training-Error$')\n", (1800, 1878), True, 'import plotly.graph_objects as go\n'), ((1897, 1972), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'learners_arr', 'y': 'test_error', 'mode': '"""lines"""', 'name': '"""$Test-Error$"""'}), "(x=learners_arr, y=test_error, mode='lines', name='$Test-Error$')\n", (1907, 1972), True, 'import plotly.graph_objects as go\n'), ((1016, 1036), 'numpy.random.rand', 'np.random.rand', (['n', '(2)'], {}), '(n, 2)\n', (1030, 1036), True, 'import numpy as np\n'), ((4891, 4910), 'numpy.max', 'np.max', (['adaboost.D_'], {}), '(adaboost.D_)\n', (4897, 4910), True, 'import numpy as np\n')] |
"""Detection, localization and segmentation of solar modules"""
import logging
from copy import deepcopy
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from pvinspect.common.transform import (
FullMultiTransform,
FullTransform,
HomographyTransform,
warp_image,
)
from pvinspect.data.exceptions import UnsupportedModalityException
from pvinspect.data.image import *
from pvinspect.data.image import _sequence
from pvinspect.data.io import ObjectAnnotations
from pvinspect.preproc._mdetect.locate import apply
from shapely.geometry import Polygon
from skimage import filters, measure, morphology, transform
from skimage.filters.thresholding import threshold_otsu
from tqdm.auto import tqdm
@_sequence
def locate_module_and_cells(
sequence: ModuleImageOrSequence,
estimate_distortion: bool = True,
orientation: str = None,
return_bounding_boxes: bool = False,
enable_background_suppresion: bool = True,
) -> Union[Tuple[ModuleImageOrSequence, ObjectAnnotations], ModuleImageSequence]:
"""Locate a single module and its cells
Note:
This methods implements the following paper:
<NAME>, et al. "Fast and robust detection of solar modules in electroluminescence images."
International Conference on Computer Analysis of Images and Patterns. Springer, Cham, 2019.
Args:
sequence (ModuleImageOrSequence): A single module image or a sequence of module images
estimate_distortion (bool): Set True to estimate lens distortion, else False
orientation (str): Orientation of the module ('horizontal' or 'vertical' or None).
If set to None (default), orientation is automatically determined
return_bounding_boxes (bool): Indicates, if bounding boxes of returned modules are returned
enable_background_suppression (bool): Indicate, if background suppresion is enabled. This sometimes causes
problems with PL images and disabling it may help.
Returns:
images: The same image/sequence with location information added
"""
if sequence[0].modality != EL_IMAGE:
logging.error("Module localization is not supporting given imaging modality")
exit()
result = list()
failures = 0
mcs = list()
dts = list()
flags = list()
transforms = list()
for img in tqdm(sequence.images):
data = img.data.copy()
# very simple background suppression
if enable_background_suppresion:
thresh = threshold_otsu(data)
data[data < thresh] = 0
t, mc, dt, f = apply(
data,
img.cols,
img.rows,
is_module_detail=isinstance(img, PartialModuleImage),
orientation=orientation,
)
transforms.append(t)
flags.append(f)
mcs.append(mc)
dts.append(dt)
if estimate_distortion:
if sequence.same_camera:
# do joint estimation
logging.info(
"Jointly estimating parameters for lens distortion. This might take some time.."
)
mcs_new = list()
dts_new = list()
valid = list()
for mc, dt, f in zip(mcs, dts, flags):
if mc is not None and dt is not None:
mcs_new.append(mc[f])
dts_new.append(dt[f])
valid.append(True)
else:
valid.append(False)
transforms = FullMultiTransform(
mcs_new,
dts_new,
image_width=sequence.shape[1],
image_height=sequence.shape[0],
n_dist_coeff=1,
)
transforms_new = list()
i = 0
for v in valid:
if v:
transforms_new.append(transforms[i])
i += 1
else:
transforms_new.append(None)
transforms = transforms_new
else:
transforms = list()
for mc, dt, f, img in zip(mcs, dts, flags, sequence.images):
if mc is not None and dt is not None:
t = FullTransform(
mc[f],
dt[f],
image_width=img.shape[1],
image_height=img.shape[0],
n_dist_coeff=1,
)
transforms.append(t)
else:
transforms.append(None)
for t, img in zip(transforms, sequence.images):
if t is not None and t.valid:
img_res = type(img).from_other(img, meta={"transform": t})
result.append(img_res)
else:
result.append(deepcopy(img))
failures += 1
if failures > 0:
logging.warning("Module localization falied for {:d} images".format(failures))
result = ModuleImageSequence.from_other(sequence, images=result)
if not return_bounding_boxes:
return result
else:
boxes = dict()
# compute polygon for every module and accumulate results
for img in result:
if img.has_meta("transform"):
c = img.cols
r = img.rows
coords = np.array([[0.0, 0.0], [c, 0.0], [c, r], [0.0, r]])
coords_transformed = img.get_meta("transform")(coords)
poly = Polygon(
[
(x, y)
for x, y in zip(
coords_transformed[:, 0].tolist(),
coords_transformed[:, 1].tolist(),
)
]
)
boxes[img.path.name] = [("Module", poly)]
else:
boxes[img.path.name] = []
return result, boxes
def segment_module_part(
image: Image,
first_col: int,
first_row: int,
cols: int,
rows: int,
size: int = None,
padding: float = 0.0,
) -> PartialModuleImage:
"""Segment a part of a module
Args:
image (Image): The corresponding image
first_col (int): First column to appear in the segment
first_row (int): First row to appear in the segment
cols (int): Number of columns of the segment
rows (int): Number of rows of the segment
size (int): Size of a cell in pixels (automatically chosen by default)
padding (float): Optional padding around the given segment relative to the cell size
(must be in [0..1[ )
Returns:
segment: The resulting segment
"""
if not image.has_meta("transform") or not image.get_meta("transform").valid:
logging.error(
"The ModuleImage does not have a valid transform. Did module localization succeed?"
)
exit()
t = image.get_meta("transform")
if padding >= 1.0 or padding < 0.0:
logging.error("padding needs to be in [0..1[")
exit()
last_col = first_col + cols
last_row = first_row + rows
size = t.mean_scale() if size is None else size
result = warp_image(
image.data,
t,
first_col - padding,
first_row - padding,
1 / size,
1 / size,
cols + 2 * padding,
rows + 2 * padding,
)
result = result.astype(image.data.dtype)
transform = HomographyTransform(
np.array(
[
[first_col - padding, first_row - padding],
[last_col + padding, first_row - padding],
[last_col + padding, last_row + padding],
[first_col - padding, last_row + padding],
]
),
np.array(
[
[0.0, 0.0],
[result.shape[1], 0.0],
[result.shape[1], result.shape[0]],
[0.0, result.shape[0]],
]
),
)
# bounding box in original image coords
bb = [
[first_col - padding, first_row - padding],
[first_col + cols + padding, first_row + rows + padding],
]
bb = t(np.array(bb))
bb = Polygon.from_bounds(bb[0][0], bb[0][1], bb[1][0], bb[1][1])
original = image.from_other(image, meta={"segment_module_original_box": bb})
return PartialModuleImage.from_other(
image,
drop_meta_types=[Polygon], # geometric attributes are invalid now..
data=result,
cols=cols + min(first_col, 0),
rows=rows + min(first_row, 0),
first_col=first_col if first_col >= 0 else None,
first_row=first_row if first_row >= 0 else None,
meta={"transform": transform, "segment_module_original": original},
)
def segment_module(
image: ModuleImage, size: int = None, padding: float = 0.0
) -> ModuleImage:
"""Obtain a rectified, cropped and undistorted module image
Args:
image (ModuleImage): A single module image
size (int): Size of a cell in pixels (automatically chosen by default)
padding (float): Optional padding around the given segment relative to the cell size
(must be in [0..1[ )
Returns:
module: The resulting module image
"""
result = segment_module_part(image, 0, 0, image.cols, image.rows, size, padding)
return ModuleImage.from_other(result)
def segment_cell(
image: ModuleImage, row: int, col: int, size: int = None, padding: float = 0.0
) -> CellImage:
"""Obtain a cell image from a module image
Args:
image (ModuleImageOrSequence): A single module image
row (int): The row number (starting at 0)
col (int): The column number (starting at 0)
size (int): Size of the resulting cell image in pixels (automatically chosen by default)
padding (float): Optional padding around the cell relative to the cell size
(must be in [0..1[ )
Returns:
cells: The segmented cell image
"""
result = segment_module_part(image, col, row, 1, 1, size, padding)
return CellImage.from_other(result, row=row, col=col)
@_sequence
def segment_modules(
sequence: ModuleImageOrSequence, size: int = None
) -> ModuleImageSequence:
"""Obtain rectified, cropped and undistorted module images from a sequence. Note that images that do not have a valid transform,
possibly because the detection step failed, are silently ignored.
Args:
sequence (ModuleImageOrSequence): A single module image or a sequence of module images
size (int): Size of the resulting cell images in pixels (automatically chosen by default)
Returns:
module: The segmented module images
"""
scales = np.array(
[
img.get_meta("transform").mean_scale()
for img in sequence.images
if img.has_meta("transform") and img.get_meta("transform").valid
]
)
if scales.std() > 0.1 * scales.mean() and size is None:
logging.warning(
"The size of cells within the sequences varies by more than 10%. However, segment_modules, \
creates images of a fixed size. Please consider to split the sequence into multiple sequences \
with less variation in size."
)
if size is None:
size = int(scales.mean())
result = list()
for img in tqdm(sequence.images):
# for the moment, we silently ignore images without a valid transform
if img.has_meta("transform") and img.get_meta("transform").valid:
result.append(segment_module(img, size))
return type(sequence).from_other(sequence, images=result, same_camera=False)
@_sequence(True)
def segment_cells(
sequence: ModuleImageOrSequence, size: int = None
) -> CellImageSequence:
"""Obtain cell images from a sequence of module images. Note that images that do not have a valid transform,
possibly because the detection step failed, are silently ignored.
Args:
sequence (ModuleImageOrSequence): A single module image or a sequence of module images
size (int): Size of the resulting cell images in pixels (automatically chosen by default)
Returns:
cells: The segmented cell images
"""
scales = np.array(
[
img.get_meta("transform").mean_scale()
for img in sequence.images
if img.has_meta("transform") and img.get_meta("transform").valid
]
)
if scales.std() > 0.1 * scales.mean() and size is None:
logging.warning(
"The size of cells within the sequences varies by more than 10%. However, segment_cells, \
creates cell images of a fixed size. Please consider to split the sequence into multiple sequences \
with less variation in size."
)
if size is None:
size = int(scales.mean())
result = list()
for img in tqdm(sequence.images):
for row in range(img.rows):
for col in range(img.cols):
# for the moment, we silently ignore images without a valid transform
if (
img.has_meta("transform") is not None
and img.get_meta("transform").valid
):
result.append(segment_cell(img, row, col, size))
return CellImageSequence(result)
def _do_locate_multiple_modules(
image: Image,
scale: float,
reject_size_thresh: float,
reject_fill_thresh: float,
padding: float,
cols: int,
rows: int,
drop_clipped_modules: bool,
) -> Tuple[List[ModuleImage], List[Polygon]]:
# filter + binarize
# image_f = filters.gaussian(image._data, filter_size)
image_f = transform.rescale(image._data, scale)
image_f = image_f > filters.threshold_otsu(image_f)
# find regions
labeled = morphology.label(image_f)
regions = measure.regionprops(labeled)
# process regions
# check if bbox is filled to 100*reject_fill_thres%
regions = [r for r in regions if r.area / r.bbox_area >= reject_fill_thresh]
if len(regions) == 0:
return [], []
max_area = int(np.max([r.bbox_area for r in regions]))
results = []
boxes = []
i = 0
for r in regions:
# check size
if r.bbox_area < reject_size_thresh * max_area:
continue
# check not touching boundary
if (
r.bbox[0] == 0
or r.bbox[1] == 0
or r.bbox[2] == labeled.shape[0]
or r.bbox[3] == labeled.shape[1]
) and drop_clipped_modules:
continue
# transform bounding box to original size
s = 1 / scale
bbox = [int(r.bbox[i] * s) for i in range(4)]
# crop module
pad = int(np.sqrt(r.bbox_area) * padding * s)
y0, x0 = max(0, bbox[0] - pad), max(0, bbox[1] - pad)
y1, x1 = (
min(image.shape[0], bbox[2] + pad),
min(image.shape[1], bbox[3] + pad),
)
boxes.append(("Module", Polygon.from_bounds(x0, y0, x1, y1)))
crop = image._data[y0:y1, x0:x1]
p = image.path.parent / "{}_module{:02d}{}".format(
image.path.stem, i, image.path.suffix
)
results.append(
ModuleImage(
data=crop, modality=image.modality, path=p, cols=cols, rows=rows
)
)
i += 1
return results, boxes
@_sequence(True)
def locate_multiple_modules(
sequence: ImageOrSequence,
scale: float = 0.31,
reject_size_thresh: float = 0.26,
reject_fill_thresh: float = 0.42,
padding: float = 0.05,
drop_clipped_modules: bool = True,
cols: int = None,
rows: int = None,
return_bounding_boxes: bool = False,
) -> Tuple[ModuleImageSequence, ObjectAnnotations]:
"""Perform localization and segmentation of multiple modules. The method is published in Hoffmann, Mathis, et al.
"Deep Learning-based Pipeline for Module Power Prediction from EL Measurements." arXiv preprint arXiv:2009.14712 (2020).
Args:
sequence (ImageOrSequence): Input images
scale (float): Image is scaled to this size before processing
reject_size_thresh (float): Detections smaller than this times the median size of detections are rejected
reject_fill_thresh (float): Detections, where more that this parts of the area are black after thresholding are rejected
padding (float): Detections are padded by this times the average size length of the bounding box
drop_clipped_modules (bool): Indicate, if detections that touch the boundary are dropped
cols (int): Number of columns of cells of a single module
cols (rows): Number of rows of cells of a single module
return_bounding_boxes (bool): If true, return the bounding boxes in addition to the crops
Returns:
The cropped modules as a ModuleImageSequence as well as (optionally), the bounding boxes.
"""
# process sequence
results = list()
boxes = dict()
# for img in tqdm(sequence):
for img in tqdm(sequence):
modules, b = _do_locate_multiple_modules(
img,
scale,
reject_size_thresh,
reject_fill_thresh,
padding,
cols,
rows,
drop_clipped_modules,
)
# add original images with box annotations as meta
imgs_org = [
Image.from_other(img, meta={"multimodule_index": i, "multimodule_boxes": b})
for i in range(len(modules))
]
modules = [
ModuleImage.from_other(m, meta={"multimodule_original": o})
for m, o in zip(modules, imgs_org)
]
results += modules
boxes[img.path] = b
if return_bounding_boxes:
if len(results) > 0:
return ModuleImageSequence(results, same_camera=False), boxes
else:
return None, dict()
else:
if len(results) > 0:
return ModuleImageSequence(results, same_camera=False)
else:
return None
| [
"copy.deepcopy",
"numpy.sqrt",
"pvinspect.data.image._sequence",
"skimage.measure.regionprops",
"shapely.geometry.Polygon.from_bounds",
"skimage.filters.threshold_otsu",
"logging.warning",
"logging.info",
"pvinspect.common.transform.FullTransform",
"skimage.morphology.label",
"numpy.max",
"num... | [((11735, 11750), 'pvinspect.data.image._sequence', '_sequence', (['(True)'], {}), '(True)\n', (11744, 11750), False, 'from pvinspect.data.image import _sequence\n'), ((15445, 15460), 'pvinspect.data.image._sequence', '_sequence', (['(True)'], {}), '(True)\n', (15454, 15460), False, 'from pvinspect.data.image import _sequence\n'), ((2362, 2383), 'tqdm.auto.tqdm', 'tqdm', (['sequence.images'], {}), '(sequence.images)\n', (2366, 2383), False, 'from tqdm.auto import tqdm\n'), ((7218, 7349), 'pvinspect.common.transform.warp_image', 'warp_image', (['image.data', 't', '(first_col - padding)', '(first_row - padding)', '(1 / size)', '(1 / size)', '(cols + 2 * padding)', '(rows + 2 * padding)'], {}), '(image.data, t, first_col - padding, first_row - padding, 1 /\n size, 1 / size, cols + 2 * padding, rows + 2 * padding)\n', (7228, 7349), False, 'from pvinspect.common.transform import FullMultiTransform, FullTransform, HomographyTransform, warp_image\n'), ((8229, 8288), 'shapely.geometry.Polygon.from_bounds', 'Polygon.from_bounds', (['bb[0][0]', 'bb[0][1]', 'bb[1][0]', 'bb[1][1]'], {}), '(bb[0][0], bb[0][1], bb[1][0], bb[1][1])\n', (8248, 8288), False, 'from shapely.geometry import Polygon\n'), ((11421, 11442), 'tqdm.auto.tqdm', 'tqdm', (['sequence.images'], {}), '(sequence.images)\n', (11425, 11442), False, 'from tqdm.auto import tqdm\n'), ((12935, 12956), 'tqdm.auto.tqdm', 'tqdm', (['sequence.images'], {}), '(sequence.images)\n', (12939, 12956), False, 'from tqdm.auto import tqdm\n'), ((13741, 13778), 'skimage.transform.rescale', 'transform.rescale', (['image._data', 'scale'], {}), '(image._data, scale)\n', (13758, 13778), False, 'from skimage import filters, measure, morphology, transform\n'), ((13869, 13894), 'skimage.morphology.label', 'morphology.label', (['image_f'], {}), '(image_f)\n', (13885, 13894), False, 'from skimage import filters, measure, morphology, transform\n'), ((13909, 13937), 'skimage.measure.regionprops', 'measure.regionprops', (['labeled'], {}), '(labeled)\n', (13928, 13937), False, 'from skimage import filters, measure, morphology, transform\n'), ((17104, 17118), 'tqdm.auto.tqdm', 'tqdm', (['sequence'], {}), '(sequence)\n', (17108, 17118), False, 'from tqdm.auto import tqdm\n'), ((2139, 2216), 'logging.error', 'logging.error', (['"""Module localization is not supporting given imaging modality"""'], {}), "('Module localization is not supporting given imaging modality')\n", (2152, 2216), False, 'import logging\n'), ((6803, 6911), 'logging.error', 'logging.error', (['"""The ModuleImage does not have a valid transform. Did module localization succeed?"""'], {}), "(\n 'The ModuleImage does not have a valid transform. Did module localization succeed?'\n )\n", (6816, 6911), False, 'import logging\n'), ((7025, 7071), 'logging.error', 'logging.error', (['"""padding needs to be in [0..1["""'], {}), "('padding needs to be in [0..1[')\n", (7038, 7071), False, 'import logging\n'), ((7507, 7699), 'numpy.array', 'np.array', (['[[first_col - padding, first_row - padding], [last_col + padding, first_row -\n padding], [last_col + padding, last_row + padding], [first_col -\n padding, last_row + padding]]'], {}), '([[first_col - padding, first_row - padding], [last_col + padding, \n first_row - padding], [last_col + padding, last_row + padding], [\n first_col - padding, last_row + padding]])\n', (7515, 7699), True, 'import numpy as np\n'), ((7800, 7911), 'numpy.array', 'np.array', (['[[0.0, 0.0], [result.shape[1], 0.0], [result.shape[1], result.shape[0]], [\n 0.0, result.shape[0]]]'], {}), '([[0.0, 0.0], [result.shape[1], 0.0], [result.shape[1], result.\n shape[0]], [0.0, result.shape[0]]])\n', (7808, 7911), True, 'import numpy as np\n'), ((8206, 8218), 'numpy.array', 'np.array', (['bb'], {}), '(bb)\n', (8214, 8218), True, 'import numpy as np\n'), ((11072, 11313), 'logging.warning', 'logging.warning', (['"""The size of cells within the sequences varies by more than 10%. However, segment_modules, creates images of a fixed size. Please consider to split the sequence into multiple sequences with less variation in size."""'], {}), "(\n 'The size of cells within the sequences varies by more than 10%. However, segment_modules, creates images of a fixed size. Please consider to split the sequence into multiple sequences with less variation in size.'\n )\n", (11087, 11313), False, 'import logging\n'), ((12583, 12827), 'logging.warning', 'logging.warning', (['"""The size of cells within the sequences varies by more than 10%. However, segment_cells, creates cell images of a fixed size. Please consider to split the sequence into multiple sequences with less variation in size."""'], {}), "(\n 'The size of cells within the sequences varies by more than 10%. However, segment_cells, creates cell images of a fixed size. Please consider to split the sequence into multiple sequences with less variation in size.'\n )\n", (12598, 12827), False, 'import logging\n'), ((13803, 13834), 'skimage.filters.threshold_otsu', 'filters.threshold_otsu', (['image_f'], {}), '(image_f)\n', (13825, 13834), False, 'from skimage import filters, measure, morphology, transform\n'), ((14166, 14204), 'numpy.max', 'np.max', (['[r.bbox_area for r in regions]'], {}), '([r.bbox_area for r in regions])\n', (14172, 14204), True, 'import numpy as np\n'), ((2525, 2545), 'skimage.filters.thresholding.threshold_otsu', 'threshold_otsu', (['data'], {}), '(data)\n', (2539, 2545), False, 'from skimage.filters.thresholding import threshold_otsu\n'), ((2995, 3099), 'logging.info', 'logging.info', (['"""Jointly estimating parameters for lens distortion. This might take some time.."""'], {}), "(\n 'Jointly estimating parameters for lens distortion. This might take some time..'\n )\n", (3007, 3099), False, 'import logging\n'), ((3521, 3640), 'pvinspect.common.transform.FullMultiTransform', 'FullMultiTransform', (['mcs_new', 'dts_new'], {'image_width': 'sequence.shape[1]', 'image_height': 'sequence.shape[0]', 'n_dist_coeff': '(1)'}), '(mcs_new, dts_new, image_width=sequence.shape[1],\n image_height=sequence.shape[0], n_dist_coeff=1)\n', (3539, 3640), False, 'from pvinspect.common.transform import FullMultiTransform, FullTransform, HomographyTransform, warp_image\n'), ((4812, 4825), 'copy.deepcopy', 'deepcopy', (['img'], {}), '(img)\n', (4820, 4825), False, 'from copy import deepcopy\n'), ((5340, 5390), 'numpy.array', 'np.array', (['[[0.0, 0.0], [c, 0.0], [c, r], [0.0, r]]'], {}), '([[0.0, 0.0], [c, 0.0], [c, r], [0.0, r]])\n', (5348, 5390), True, 'import numpy as np\n'), ((15047, 15082), 'shapely.geometry.Polygon.from_bounds', 'Polygon.from_bounds', (['x0', 'y0', 'x1', 'y1'], {}), '(x0, y0, x1, y1)\n', (15066, 15082), False, 'from shapely.geometry import Polygon\n'), ((4228, 4329), 'pvinspect.common.transform.FullTransform', 'FullTransform', (['mc[f]', 'dt[f]'], {'image_width': 'img.shape[1]', 'image_height': 'img.shape[0]', 'n_dist_coeff': '(1)'}), '(mc[f], dt[f], image_width=img.shape[1], image_height=img.\n shape[0], n_dist_coeff=1)\n', (4241, 4329), False, 'from pvinspect.common.transform import FullMultiTransform, FullTransform, HomographyTransform, warp_image\n'), ((14792, 14812), 'numpy.sqrt', 'np.sqrt', (['r.bbox_area'], {}), '(r.bbox_area)\n', (14799, 14812), True, 'import numpy as np\n')] |
import numpy as np
import copy
import bocd
import changefinder
import pandas as pd
from training import TrainHelper, ModelsGaussianProcessRegression
def run_evars_gpr(base_model: ModelsGaussianProcessRegression.GaussianProcessRegression,
data: pd.DataFrame, season_length: int, target_column: str, train_ind: int,
comparison_partners: bool = False, da: str = 'scaled', cpd: str = 'cf',
scale_thr: float = 0.1, scale_seasons: int = 2,
scale_window: int = None, scale_window_factor: float = 0.1, scale_window_minimum: int = 2,
const_hazard: int = None, const_hazard_factor: int = 2,
cf_r: float = 0.4, cf_order: int = 1, cf_smooth: int = 4, cf_thr_perc: int = 90,
append: str = 'no', max_samples: int = None, max_samples_factor: int = 10,
o_perc: float = 1.1, u_perc: float = 0.1, thr: float = 0.2, under_samp: bool = False,
rel_thr: float = 0.5, rel_coef: float = 1.5, verbose: bool = False):
"""
Run EVARS-GPR algo
:param base_model: base model fitted during offline phase
:param data: data to use
:param season_length: length of one season
:param target_column: target column
:param train_ind: index of last train sample
:param comparison_partners: specify whether to include comparison partners in optimization loop
:param da: data augmentation method
:param cpd: change point detection method
:param scale_thr: threshold for output scaling factor
:param scale_seasons: number of seasons to consider for calculation of output scaling factor
:param scale_window: number of samples prior to change point for calculation of output scaling factor
:param scale_window_factor: scale window as a multiple of the season length
:param scale_window_minimum: minimum of the scale window
:param const_hazard: constant hazard value in case of bocpd
:param const_hazard_factor: constant hazard value as a multiple of the season length
:param cf_r: r value (forgetting factor) for changefinder
:param cf_order: order of SDAR models for changefinder
:param cf_smooth: smoothing constant for changefinder
:param cf_thr_perc: percentile of offline anomaly scores to use for declaration of a change point
:param append: specify whether to append original and scaled dataset for da or not
:param max_samples: maximum samples to consider for data augmentation
:param max_samples_factor: maximum samples to consider for data augmentation as a multiple of the season length
:param o_perc: oversampling percentage for GN
:param u_perc: undersampling percentage for GN
:param thr: threshold for GN
:param under_samp: specify whether to undersample for SMOGN
:param rel_thr: relevance threshold for SMOGN
:param rel_coef: relevance coefficient for SMOGN
:param verbose: print debug info
:return: list of detected change points, evars-gpr predictions, dictionary with predictions of comparison partners,
number of refits
"""
scale_window = max(scale_window_minimum, int(scale_window_factor * season_length)) \
if scale_window is None else scale_window
const_hazard = const_hazard_factor * season_length if const_hazard is None else const_hazard
max_samples = max_samples_factor * season_length if max_samples is None else max_samples
data = data.copy()
data.reset_index(drop=True, inplace=True)
train = data[:train_ind]
# setup cpd
y_deseas = data[target_column].diff(season_length).dropna().values
y_train_deseas = y_deseas[:train_ind-season_length]
if cpd == 'bocd':
mean = np.mean(y_train_deseas)
std = np.std(y_train_deseas)
train_std = (y_train_deseas - mean) / std
bc = bocd.BayesianOnlineChangePointDetection(bocd.ConstantHazard(const_hazard),
bocd.StudentT(mu=0, kappa=1, alpha=1, beta=1))
for i, d_bocd_train in enumerate(train_std):
bc.update(d_bocd_train)
elif cpd == 'cf':
scores = []
cf = changefinder.ChangeFinder(r=cf_r, order=cf_order, smooth=cf_smooth)
for i in y_train_deseas:
scores.append(cf.update(i))
cf_threshold = np.percentile(scores, cf_thr_perc)
if verbose:
print('CF_Scores_Train: threshold=' + str(cf_threshold)
+ ', mean=' + str(np.mean(scores)) + ', max=' + str(np.max(scores))
+ ', 70perc=' + str(np.percentile(scores, 70)) + ', 80perc=' + str(np.percentile(scores, 80))
+ ', 90perc=' + str(np.percentile(scores, 90)) + ', 95perc=' + str(np.percentile(scores, 95))
)
# online part
test = data[train_ind:]
y_train_deseas_manip = y_train_deseas.copy()
rt_mle = np.empty(test[target_column].shape)
predictions = None
train_manip = train.copy()
model = copy.deepcopy(base_model)
# setup comparison partners
if comparison_partners:
model_cpd_retrain_full = copy.deepcopy(base_model)
predictions_cpd_retrain_full = None
model_cpd_moving_window_full = copy.deepcopy(base_model)
predictions_cpd_moving_window_full = None
predictions_cpd_scaled_full = None
cp_detected = []
output_scale_old = 1
output_scale = 1
n_refits = 0
# iterate over whole test set
for index in test.index:
sample = test.loc[index]
train_manip = train_manip.append(sample)
# predict next target value
prediction = model.predict(test=sample.to_frame().T, train=train_manip)
if predictions is None:
predictions = prediction.copy()
else:
predictions = predictions.append(prediction)
# get predictions of comparison partners if specified
if comparison_partners:
prediction_cpd_retrain_full = model_cpd_retrain_full.predict(test=sample.to_frame().T, train=train_manip)
prediction_cpd_moving_window_full = model_cpd_moving_window_full.predict(test=sample.to_frame().T,
train=train_manip)
prediction_cpd_scaled_full = prediction.copy()
prediction_cpd_scaled_full *= output_scale_old
if predictions_cpd_retrain_full is None:
predictions_cpd_retrain_full = prediction_cpd_retrain_full.copy()
predictions_cpd_moving_window_full = prediction_cpd_moving_window_full.copy()
predictions_cpd_scaled_full = prediction_cpd_scaled_full.copy()
else:
predictions_cpd_retrain_full = predictions_cpd_retrain_full.append(prediction_cpd_retrain_full)
predictions_cpd_moving_window_full = \
predictions_cpd_moving_window_full.append(prediction_cpd_moving_window_full)
predictions_cpd_scaled_full = predictions_cpd_scaled_full.append(prediction_cpd_scaled_full)
# CPD
change_point_detected = False
y_deseas = sample[target_column] - data.loc[index-season_length][target_column]
if cpd == 'bocd':
d_bocd = (y_deseas - mean) / std
bc.update(d_bocd)
rt_mle_index = index-train_ind
rt_mle[rt_mle_index] = bc.rt
y_train_deseas_manip = np.append(y_train_deseas_manip, y_deseas)
mean = np.mean(y_train_deseas_manip)
std = np.std(y_train_deseas_manip)
if rt_mle_index > 0 and (rt_mle[rt_mle_index] - rt_mle[rt_mle_index-1] < 0):
change_point_detected = True
curr_ind = rt_mle_index
elif cpd == 'cf':
score = cf.update(y_deseas)
scores.append(score)
if score >= cf_threshold:
if verbose:
print('Anomaly Score ' + str(score) + ' > ' + 'threshold ' + str(cf_threshold))
change_point_detected = True
curr_ind = index - train_ind
# Trigger remaining EVARS-GPR procedures if a change point is detected
if change_point_detected:
if verbose:
print('CP Detected ' + str(curr_ind + train.shape[0]))
cp_detected.append(curr_ind)
try:
# Calculate output scaling factor
change_point_index = curr_ind + train.shape[0]
mean_now = np.mean(data[change_point_index-scale_window+1:change_point_index+1][target_column])
mean_prev_seas_1 = \
np.mean(data[change_point_index-season_length-scale_window+1:change_point_index-season_length+1]
[target_column])
mean_prev_seas_2 = \
np.mean(data[change_point_index-2*season_length-scale_window+1:change_point_index-2*season_length+1]
[target_column])
if scale_seasons == 1:
output_scale = mean_now / mean_prev_seas_1
elif scale_seasons == 2:
output_scale = np.mean([mean_now / mean_prev_seas_1, mean_now / mean_prev_seas_2])
if output_scale == 0:
raise Exception
if verbose:
print('ScaleDiff=' + str(np.abs(output_scale - output_scale_old) / output_scale_old))
# Check deviation to previous scale factor
if np.abs(output_scale - output_scale_old) / output_scale_old > scale_thr:
n_refits += 1
if verbose:
print('try to retrain model: ' + str(change_point_index)
+ ' , output_scale=' + str(output_scale))
if output_scale > 1:
focus = 'high'
else:
focus = 'low'
# augment data
train_samples = TrainHelper.get_augmented_data(data=data, target_column=target_column, da=da,
change_point_index=curr_ind + train.shape[0],
output_scale=output_scale,
rel_coef=rel_coef, rel_thr=rel_thr,
under_samp=under_samp, focus=focus,
o_perc=o_perc, u_perc=u_perc, thr=thr,
append=append, max_samples=max_samples)
# retrain current model
model = ModelsGaussianProcessRegression.GaussianProcessRegression(
target_column=base_model.target_column, seasonal_periods=base_model.seasonal_periods,
kernel=base_model.model.kernel_, alpha=base_model.model.alpha,
n_restarts_optimizer=base_model.model.n_restarts_optimizer,
standardize=base_model.standardize, normalize_y=base_model.model.normalize_y,
one_step_ahead=base_model.one_step_ahead)
model.train(train_samples, cross_val_call=False)
if comparison_partners:
train_data = data.copy()[:change_point_index+1]
# cpd Retrain
model_cpd_retrain_full = ModelsGaussianProcessRegression.GaussianProcessRegression(
target_column=base_model.target_column, seasonal_periods=base_model.seasonal_periods,
kernel=base_model.model.kernel_, alpha=base_model.model.alpha,
n_restarts_optimizer=base_model.model.n_restarts_optimizer,
standardize=base_model.standardize, normalize_y=base_model.model.normalize_y,
one_step_ahead=base_model.one_step_ahead)
model_cpd_retrain_full.train(train_data, cross_val_call=False)
# Moving Window
model_cpd_moving_window_full = ModelsGaussianProcessRegression.GaussianProcessRegression(
target_column=base_model.target_column, seasonal_periods=base_model.seasonal_periods,
kernel=base_model.model.kernel_, alpha=base_model.model.alpha,
n_restarts_optimizer=base_model.model.n_restarts_optimizer,
standardize=base_model.standardize, normalize_y=base_model.model.normalize_y,
one_step_ahead=base_model.one_step_ahead)
model_cpd_moving_window_full.train(train_data[-season_length:], cross_val_call=False)
# in case of a successful refit change output_scale_old
output_scale_old = output_scale
except Exception as exc:
print(exc)
if comparison_partners:
comparison_partners_dict = {'cpd_retrain_full': predictions_cpd_retrain_full,
'cpd_cpd_moving_window_full': predictions_cpd_moving_window_full,
'cpd_scaled_full': predictions_cpd_scaled_full
}
else:
comparison_partners_dict = {}
return cp_detected, predictions, comparison_partners_dict, n_refits
| [
"numpy.mean",
"numpy.abs",
"training.TrainHelper.get_augmented_data",
"bocd.ConstantHazard",
"copy.deepcopy",
"bocd.StudentT",
"numpy.max",
"numpy.append",
"numpy.empty",
"numpy.std",
"training.ModelsGaussianProcessRegression.GaussianProcessRegression",
"numpy.percentile",
"changefinder.Chan... | [((4876, 4911), 'numpy.empty', 'np.empty', (['test[target_column].shape'], {}), '(test[target_column].shape)\n', (4884, 4911), True, 'import numpy as np\n'), ((4978, 5003), 'copy.deepcopy', 'copy.deepcopy', (['base_model'], {}), '(base_model)\n', (4991, 5003), False, 'import copy\n'), ((3708, 3731), 'numpy.mean', 'np.mean', (['y_train_deseas'], {}), '(y_train_deseas)\n', (3715, 3731), True, 'import numpy as np\n'), ((3746, 3768), 'numpy.std', 'np.std', (['y_train_deseas'], {}), '(y_train_deseas)\n', (3752, 3768), True, 'import numpy as np\n'), ((5097, 5122), 'copy.deepcopy', 'copy.deepcopy', (['base_model'], {}), '(base_model)\n', (5110, 5122), False, 'import copy\n'), ((5206, 5231), 'copy.deepcopy', 'copy.deepcopy', (['base_model'], {}), '(base_model)\n', (5219, 5231), False, 'import copy\n'), ((3872, 3905), 'bocd.ConstantHazard', 'bocd.ConstantHazard', (['const_hazard'], {}), '(const_hazard)\n', (3891, 3905), False, 'import bocd\n'), ((3960, 4005), 'bocd.StudentT', 'bocd.StudentT', ([], {'mu': '(0)', 'kappa': '(1)', 'alpha': '(1)', 'beta': '(1)'}), '(mu=0, kappa=1, alpha=1, beta=1)\n', (3973, 4005), False, 'import bocd\n'), ((4151, 4218), 'changefinder.ChangeFinder', 'changefinder.ChangeFinder', ([], {'r': 'cf_r', 'order': 'cf_order', 'smooth': 'cf_smooth'}), '(r=cf_r, order=cf_order, smooth=cf_smooth)\n', (4176, 4218), False, 'import changefinder\n'), ((4315, 4349), 'numpy.percentile', 'np.percentile', (['scores', 'cf_thr_perc'], {}), '(scores, cf_thr_perc)\n', (4328, 4349), True, 'import numpy as np\n'), ((7422, 7463), 'numpy.append', 'np.append', (['y_train_deseas_manip', 'y_deseas'], {}), '(y_train_deseas_manip, y_deseas)\n', (7431, 7463), True, 'import numpy as np\n'), ((7483, 7512), 'numpy.mean', 'np.mean', (['y_train_deseas_manip'], {}), '(y_train_deseas_manip)\n', (7490, 7512), True, 'import numpy as np\n'), ((7531, 7559), 'numpy.std', 'np.std', (['y_train_deseas_manip'], {}), '(y_train_deseas_manip)\n', (7537, 7559), True, 'import numpy as np\n'), ((8495, 8590), 'numpy.mean', 'np.mean', (['data[change_point_index - scale_window + 1:change_point_index + 1][\n target_column]'], {}), '(data[change_point_index - scale_window + 1:change_point_index + 1][\n target_column])\n', (8502, 8590), True, 'import numpy as np\n'), ((8637, 8764), 'numpy.mean', 'np.mean', (['data[change_point_index - season_length - scale_window + 1:\n change_point_index - season_length + 1][target_column]'], {}), '(data[change_point_index - season_length - scale_window + 1:\n change_point_index - season_length + 1][target_column])\n', (8644, 8764), True, 'import numpy as np\n'), ((8836, 8971), 'numpy.mean', 'np.mean', (['data[change_point_index - 2 * season_length - scale_window + 1:\n change_point_index - 2 * season_length + 1][target_column]'], {}), '(data[change_point_index - 2 * season_length - scale_window + 1:\n change_point_index - 2 * season_length + 1][target_column])\n', (8843, 8971), True, 'import numpy as np\n'), ((10020, 10339), 'training.TrainHelper.get_augmented_data', 'TrainHelper.get_augmented_data', ([], {'data': 'data', 'target_column': 'target_column', 'da': 'da', 'change_point_index': '(curr_ind + train.shape[0])', 'output_scale': 'output_scale', 'rel_coef': 'rel_coef', 'rel_thr': 'rel_thr', 'under_samp': 'under_samp', 'focus': 'focus', 'o_perc': 'o_perc', 'u_perc': 'u_perc', 'thr': 'thr', 'append': 'append', 'max_samples': 'max_samples'}), '(data=data, target_column=target_column, da=\n da, change_point_index=curr_ind + train.shape[0], output_scale=\n output_scale, rel_coef=rel_coef, rel_thr=rel_thr, under_samp=under_samp,\n focus=focus, o_perc=o_perc, u_perc=u_perc, thr=thr, append=append,\n max_samples=max_samples)\n', (10050, 10339), False, 'from training import TrainHelper, ModelsGaussianProcessRegression\n'), ((10796, 11204), 'training.ModelsGaussianProcessRegression.GaussianProcessRegression', 'ModelsGaussianProcessRegression.GaussianProcessRegression', ([], {'target_column': 'base_model.target_column', 'seasonal_periods': 'base_model.seasonal_periods', 'kernel': 'base_model.model.kernel_', 'alpha': 'base_model.model.alpha', 'n_restarts_optimizer': 'base_model.model.n_restarts_optimizer', 'standardize': 'base_model.standardize', 'normalize_y': 'base_model.model.normalize_y', 'one_step_ahead': 'base_model.one_step_ahead'}), '(target_column=\n base_model.target_column, seasonal_periods=base_model.seasonal_periods,\n kernel=base_model.model.kernel_, alpha=base_model.model.alpha,\n n_restarts_optimizer=base_model.model.n_restarts_optimizer, standardize\n =base_model.standardize, normalize_y=base_model.model.normalize_y,\n one_step_ahead=base_model.one_step_ahead)\n', (10853, 11204), False, 'from training import TrainHelper, ModelsGaussianProcessRegression\n'), ((9160, 9227), 'numpy.mean', 'np.mean', (['[mean_now / mean_prev_seas_1, mean_now / mean_prev_seas_2]'], {}), '([mean_now / mean_prev_seas_1, mean_now / mean_prev_seas_2])\n', (9167, 9227), True, 'import numpy as np\n'), ((9514, 9553), 'numpy.abs', 'np.abs', (['(output_scale - output_scale_old)'], {}), '(output_scale - output_scale_old)\n', (9520, 9553), True, 'import numpy as np\n'), ((11576, 11984), 'training.ModelsGaussianProcessRegression.GaussianProcessRegression', 'ModelsGaussianProcessRegression.GaussianProcessRegression', ([], {'target_column': 'base_model.target_column', 'seasonal_periods': 'base_model.seasonal_periods', 'kernel': 'base_model.model.kernel_', 'alpha': 'base_model.model.alpha', 'n_restarts_optimizer': 'base_model.model.n_restarts_optimizer', 'standardize': 'base_model.standardize', 'normalize_y': 'base_model.model.normalize_y', 'one_step_ahead': 'base_model.one_step_ahead'}), '(target_column=\n base_model.target_column, seasonal_periods=base_model.seasonal_periods,\n kernel=base_model.model.kernel_, alpha=base_model.model.alpha,\n n_restarts_optimizer=base_model.model.n_restarts_optimizer, standardize\n =base_model.standardize, normalize_y=base_model.model.normalize_y,\n one_step_ahead=base_model.one_step_ahead)\n', (11633, 11984), False, 'from training import TrainHelper, ModelsGaussianProcessRegression\n'), ((12286, 12694), 'training.ModelsGaussianProcessRegression.GaussianProcessRegression', 'ModelsGaussianProcessRegression.GaussianProcessRegression', ([], {'target_column': 'base_model.target_column', 'seasonal_periods': 'base_model.seasonal_periods', 'kernel': 'base_model.model.kernel_', 'alpha': 'base_model.model.alpha', 'n_restarts_optimizer': 'base_model.model.n_restarts_optimizer', 'standardize': 'base_model.standardize', 'normalize_y': 'base_model.model.normalize_y', 'one_step_ahead': 'base_model.one_step_ahead'}), '(target_column=\n base_model.target_column, seasonal_periods=base_model.seasonal_periods,\n kernel=base_model.model.kernel_, alpha=base_model.model.alpha,\n n_restarts_optimizer=base_model.model.n_restarts_optimizer, standardize\n =base_model.standardize, normalize_y=base_model.model.normalize_y,\n one_step_ahead=base_model.one_step_ahead)\n', (12343, 12694), False, 'from training import TrainHelper, ModelsGaussianProcessRegression\n'), ((4721, 4746), 'numpy.percentile', 'np.percentile', (['scores', '(95)'], {}), '(scores, 95)\n', (4734, 4746), True, 'import numpy as np\n'), ((4674, 4699), 'numpy.percentile', 'np.percentile', (['scores', '(90)'], {}), '(scores, 90)\n', (4687, 4699), True, 'import numpy as np\n'), ((9375, 9414), 'numpy.abs', 'np.abs', (['(output_scale - output_scale_old)'], {}), '(output_scale - output_scale_old)\n', (9381, 9414), True, 'import numpy as np\n'), ((4609, 4634), 'numpy.percentile', 'np.percentile', (['scores', '(80)'], {}), '(scores, 80)\n', (4622, 4634), True, 'import numpy as np\n'), ((4562, 4587), 'numpy.percentile', 'np.percentile', (['scores', '(70)'], {}), '(scores, 70)\n', (4575, 4587), True, 'import numpy as np\n'), ((4508, 4522), 'numpy.max', 'np.max', (['scores'], {}), '(scores)\n', (4514, 4522), True, 'import numpy as np\n'), ((4474, 4489), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (4481, 4489), True, 'import numpy as np\n')] |
# -*- ecoding: utf-8 -*-
# @ModuleName: pridict
# @Function:
# @Author: <NAME>
# @Time: 2021/9/1 15:22
import os
import cv2
import keras
import numpy as np
import segmentation_models as sm
import albumentations as A
#DATA_DIR = 'C:/Users/ZhangYe/Desktop/core_segmentation0828/pr_imgs_2/'
dir = 'C:/Users/.../'
DATA_DIR = "C:/Users/..." + "/"
x_test_dir = os.path.join(DATA_DIR, 'test')
y_test_dir = os.path.join(DATA_DIR, 'testannot')
img_width = 640
img_height = 480
BACKBONE = 'efficientnetb5'
img_CLASSES = ['background', 'core'] # 图像标注几类,写几个
#checkpoint = "./best_core_model_res_adam2.h5"
checkpoint = "./model_eb5_adam.h5"
CLASSES = ['core']
#dir = "C:/Users/ZhangYe/Desktop/core_segmentation0828/data/CamVid" + "/"
BATCH_SIZE = 2
n_classes = 1 if len(CLASSES) == 1 else (len(CLASSES) + 1) # case for binary and multiclass segmentation
activation = 'sigmoid' if n_classes == 1 else 'softmax'
model = sm.Unet(BACKBONE, classes=n_classes, activation=activation)
class Dataset:
"""CamVid Dataset. Read images, apply augmentation and preprocessing transformations.
Args:
images_dir (str): path to images folder
masks_dir (str): path to segmentation masks folder
class_values (list): values of classes to extract from segmentation mask
augmentation (albumentations.Compose): data transfromation pipeline
(e.g. flip, scale, etc.)
preprocessing (albumentations.Compose): data preprocessing
(e.g. noralization, shape manipulation, etc.)
"""
# CLASSES = ['sky', 'building', 'pole', 'road', 'pavement',
# 'tree', 'signsymbol', 'fence', 'car',
# 'pedestrian', 'bicyclist', 'unlabelled']
CLASSES = img_CLASSES
def __init__(
self,
images_dir,
masks_dir,
classes=None,
augmentation=None,
preprocessing=None,
):
self.ids = os.listdir(images_dir)
self.images_fps = [os.path.join(images_dir, image_id) for image_id in self.ids]
self.masks_fps = [os.path.join(masks_dir, image_id) for image_id in self.ids]
# convert str names to class values on masks
self.class_values = [self.CLASSES.index(cls.lower()) for cls in classes]
self.augmentation = augmentation
self.preprocessing = preprocessing
def __getitem__(self, i):
# read data
image = cv2.imread(self.images_fps[i])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(self.masks_fps[i], 0)
# extract certain classes from mask (e.g. cars)
masks = [(mask == v) for v in self.class_values]
mask = np.stack(masks, axis=-1).astype('float')
# add background if mask is not binary
if mask.shape[-1] != 1:
background = 1 - mask.sum(axis=-1, keepdims=True)
mask = np.concatenate((mask, background), axis=-1)
# apply augmentations
if self.augmentation:
sample = self.augmentation(image=image, mask=mask)
image, mask = sample['image'], sample['mask']
# apply preprocessing
if self.preprocessing:
sample = self.preprocessing(image=image, mask=mask)
image, mask = sample['image'], sample['mask']
return image, mask
def __len__(self):
return len(self.ids)
class Dataloder(keras.utils.Sequence):
"""Load data from dataset and form batches
Args:
dataset: instance of Dataset class for image loading and preprocessing.
batch_size: Integet number of images in batch.
shuffle: Boolean, if `True` shuffle image indexes each epoch.
"""
def __init__(self, dataset, batch_size=1, shuffle=False):
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
self.indexes = np.arange(len(dataset))
self.on_epoch_end()
def __getitem__(self, i):
# collect batch data
start = i * self.batch_size
stop = (i + 1) * self.batch_size
data = []
for j in range(start, stop):
data.append(self.dataset[j])
# transpose list of lists
batch = [np.stack(samples, axis=0) for samples in zip(*data)]
return batch
def __len__(self):
"""Denotes the number of batches per epoch"""
return len(self.indexes) // self.batch_size
def on_epoch_end(self):
"""Callback function to shuffle indexes each epoch"""
if self.shuffle:
self.indexes = np.random.permutation(self.indexes)
def get_preprocessing(preprocessing_fn):
"""Construct preprocessing transform
Args:
preprocessing_fn (callbale): data normalization function
(can be specific for each pretrained neural network)
Return:
transform: albumentations.Compose
"""
_transform = [
A.Lambda(image=preprocessing_fn),
]
return A.Compose(_transform)
preprocess_input = sm.get_preprocessing(BACKBONE)
test_dataset = Dataset(
x_test_dir,
y_test_dir,
classes=CLASSES,
augmentation=None,
# augmentation=get_validation_augmentation(),
preprocessing=get_preprocessing(preprocess_input),
)
test_dataloader = Dataloder(test_dataset, batch_size=1, shuffle=False)
# check shapes for errors
images_size = (BATCH_SIZE, img_height, img_width, 3)
masks_size = (BATCH_SIZE, img_height, img_width, n_classes)
# assert test_dataloader[0][0].shape == images_size
# assert test_dataloader[0][1].shape == masks_size
# load best weights
model.load_weights(checkpoint)
# scores = model.evaluate_generator(test_dataloader)
# metrics = [sm.metrics.IOUScore(threshold=0.5), sm.metrics.FScore(threshold=0.5)]
# print("Loss: {:.5}".format(scores[0]))
# for metric, value in zip(metrics, scores[1:]):
# print("mean {}: {:.5}".format(metric.__name__, value))
n = 1
# ids = np.random.choice(np.arange(len(test_dataset)), size=n)
print(len(test_dataset),0)
for i in range(len(test_dataset)):
image, mask = test_dataset[i] # get some sample:
image = np.expand_dims(image, axis=0)
pr_mask = model.predict(image)
image = pr_mask.squeeze()
# dir = "pr_imgs" + "\\"
filename = dir + str(i) + ".png"
print(filename)
cv2.imwrite(filename, image)
| [
"cv2.imwrite",
"os.listdir",
"numpy.random.permutation",
"os.path.join",
"segmentation_models.get_preprocessing",
"numpy.stack",
"albumentations.Compose",
"numpy.expand_dims",
"cv2.cvtColor",
"numpy.concatenate",
"cv2.imread",
"segmentation_models.Unet",
"albumentations.Lambda"
] | [((370, 400), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""test"""'], {}), "(DATA_DIR, 'test')\n", (382, 400), False, 'import os\n'), ((415, 450), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""testannot"""'], {}), "(DATA_DIR, 'testannot')\n", (427, 450), False, 'import os\n'), ((941, 1000), 'segmentation_models.Unet', 'sm.Unet', (['BACKBONE'], {'classes': 'n_classes', 'activation': 'activation'}), '(BACKBONE, classes=n_classes, activation=activation)\n', (948, 1000), True, 'import segmentation_models as sm\n'), ((5154, 5184), 'segmentation_models.get_preprocessing', 'sm.get_preprocessing', (['BACKBONE'], {}), '(BACKBONE)\n', (5174, 5184), True, 'import segmentation_models as sm\n'), ((5108, 5129), 'albumentations.Compose', 'A.Compose', (['_transform'], {}), '(_transform)\n', (5117, 5129), True, 'import albumentations as A\n'), ((6284, 6313), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (6298, 6313), True, 'import numpy as np\n'), ((6475, 6503), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'image'], {}), '(filename, image)\n', (6486, 6503), False, 'import cv2\n'), ((1987, 2009), 'os.listdir', 'os.listdir', (['images_dir'], {}), '(images_dir)\n', (1997, 2009), False, 'import os\n'), ((2485, 2515), 'cv2.imread', 'cv2.imread', (['self.images_fps[i]'], {}), '(self.images_fps[i])\n', (2495, 2515), False, 'import cv2\n'), ((2533, 2571), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (2545, 2571), False, 'import cv2\n'), ((2588, 2620), 'cv2.imread', 'cv2.imread', (['self.masks_fps[i]', '(0)'], {}), '(self.masks_fps[i], 0)\n', (2598, 2620), False, 'import cv2\n'), ((5055, 5087), 'albumentations.Lambda', 'A.Lambda', ([], {'image': 'preprocessing_fn'}), '(image=preprocessing_fn)\n', (5063, 5087), True, 'import albumentations as A\n'), ((2038, 2072), 'os.path.join', 'os.path.join', (['images_dir', 'image_id'], {}), '(images_dir, image_id)\n', (2050, 2072), False, 'import os\n'), ((2126, 2159), 'os.path.join', 'os.path.join', (['masks_dir', 'image_id'], {}), '(masks_dir, image_id)\n', (2138, 2159), False, 'import os\n'), ((2961, 3004), 'numpy.concatenate', 'np.concatenate', (['(mask, background)'], {'axis': '(-1)'}), '((mask, background), axis=-1)\n', (2975, 3004), True, 'import numpy as np\n'), ((4333, 4358), 'numpy.stack', 'np.stack', (['samples'], {'axis': '(0)'}), '(samples, axis=0)\n', (4341, 4358), True, 'import numpy as np\n'), ((4692, 4727), 'numpy.random.permutation', 'np.random.permutation', (['self.indexes'], {}), '(self.indexes)\n', (4713, 4727), True, 'import numpy as np\n'), ((2754, 2778), 'numpy.stack', 'np.stack', (['masks'], {'axis': '(-1)'}), '(masks, axis=-1)\n', (2762, 2778), True, 'import numpy as np\n')] |
import numpy as np
import subprocess
import fileinput
import argparse
import random
import shutil
import math
import cv2
import os
import sys
global counter
def createNumpyMatrix(geometricVertices):
"""Parse the strings from the obj file and convert them into numpy matrix of floats to perform math efficiently"""
vertices = []
for line in geometricVertices:
# convert the string to floats for x,y,z coordinates
elements = list(map(lambda x: float(x), line.split()[1:]))
vertices.append(elements)
# convert to 3 x numPoints matrix
vertices = np.asarray(vertices)
vertices = vertices.T
#print(vertices.shape)
return vertices
def getCenterOfMass(geometricVertices):
# com will be a 3x1 vector
com = np.average(geometricVertices, axis=1)
com = com.reshape(3,1)
return com
def centerAndScaleObject(geometricVertices, com, resize, meshIsAreaLight):
"""Translate the object vertices so that they are centered around the origin"""
geometricVertices = geometricVertices - com
stdev = np.std(geometricVertices, axis=1) / float(resize)
stdev = stdev.reshape(3,1)
if not meshIsAreaLight:
# do not scale the area light mesh object
geometricVertices = geometricVertices / stdev
return geometricVertices
def getRotationMatrix(angleX=0.0, angleY=0.0, angleZ=0.0):
if angleX == 0.0 and angleY == 0.0 and angleZ == 0.0:
angleX = round(random.uniform(0, 2*math.pi), 2)
angleY = round(random.uniform(0, 2*math.pi), 2)
angleZ = round(random.uniform(0, 2*math.pi), 2)
Rx = np.array([[1, 0, 0], [0, math.cos(angleX), -math.sin(angleX)], [0, math.sin(angleX), math.cos(angleX)]], dtype=np.float)
Ry = np.array([[math.cos(angleY), 0, math.sin(angleY)], [0, 1, 0], [-math.sin(angleY), 0, math.cos(angleY)]], dtype=np.float)
Rz = np.array([[math.cos(angleZ), -math.sin(angleZ), 0], [math.sin(angleZ), math.cos(angleZ), 0], [0, 0, 1]], dtype=np.float)
R = np.matmul(np.matmul(Rx, Ry), Rz)
#R = np.identity(3)
return R
def rotateObject(geometricVertices, rotationMatrix):
"""Perform matrix multiplication - Rx to get the vertex coordinates after rotation"""
rotatedGeometricVertices = np.matmul(rotationMatrix, geometricVertices)
return rotatedGeometricVertices
def getAxisAlignedBoundingBox(geometricVertices):
mins = np.amin(geometricVertices, axis=1)
maxs = np.amax(geometricVertices, axis=1)
# bbox will have 6 elements
# xLeft, xRight, yTop, yBottom, zNear, zFar
bbox = {'xmin':mins[0], 'xmax':maxs[0], 'ymin':mins[1], 'ymax':maxs[1], 'zmin':mins[2], 'zmax':maxs[2]}
#print("bounding box:", bbox)
return bbox
def positionObjectInTheBox(geometricVertices, bbox, com):
"""Calculate the bounds of the places where the object can be placed inside the box scene and place the object there"""
# assumption - the object can fit inside the box scene entirely
# the scaling to have 5 units standard deviation is just for that
# create the range tuple in which com of object can lie - (min, max)
xComRange = (-10.0 - bbox['xmin'], 10.0 - bbox['xmax'])
yComRange = (-10.0 - bbox['ymin'], 10.0 - bbox['ymax'])
zComRange = (20.0 - bbox['zmin'], 30.0 - bbox['zmax'])
# skip this object if it does not fit inside the box scene
if (xComRange[0] > xComRange[1]) or (yComRange[0] > yComRange[1]) or (zComRange[0] > zComRange[1]):
print("\n\nMisfit\n\n")
return geometricVertices, False
# generate the position - (x,y,z) for the com of the object within the above computed range
# assume uniform distribution
x = round(random.uniform(xComRange[0], xComRange[1]), 2)
y = round(random.uniform(yComRange[0], yComRange[1]), 2)
z = round(random.uniform(zComRange[0], zComRange[1]), 2)
# translate the object so that it is now located at the above randomly generated location
newCom = np.array([x,y,z]).reshape(3,1)
#newCom = np.array([0,0,25]).reshape(3,1)
geometricVertices = geometricVertices + newCom
return geometricVertices, True
def positionLightInTheBox(geometricVertices, bbox, com):
# create the range tuple in which com of object can lie - (min, max)
xComRange = (-10.0 - bbox['xmin'], 10.0 - bbox['xmax'])
yComRange = (-10.0 - bbox['ymin'], 10.0 - bbox['ymax'])
zComRange = (20.0 - bbox['zmin'], 30.0 - bbox['zmax'])
# skip this object if it does not fit inside the box scene
if (xComRange[0] > xComRange[1]) or (yComRange[0] > yComRange[1]) or (zComRange[0] > zComRange[1]):
return geometricVertices, False
# generate the position - (x,y,z) for the com of the object within the above computed range
# assume uniform distribution
x = round(random.uniform(xComRange[0], xComRange[1]), 2)
y = 9.5 # do not change y, the area light has to remain on the ceiling only
z = round(random.uniform(zComRange[0], zComRange[1]), 2)
# translate the object so that it is now located at the above randomly generated location
newCom = np.array([x,y,z]).reshape(3,1)
#newCom = np.array([0,-5,25]).reshape(3,1)
geometricVertices = geometricVertices + newCom
return geometricVertices
def npMatrixToStrings(geometricVertices, dataType):
stringList = []
if dataType == 'geometricVertices':
label = 'v '
else:
# we are modifying vertex normals
label = 'vn '
for vertex in geometricVertices.T:
line = label + str(vertex[0]) + " " + str(vertex[1]) + " " + str(vertex[2]) + "\n"
stringList.append(line)
return stringList
def removeTextureVertices(faces):
newFaces = []
for line in faces:
elements = line.split()[1:]
# elements = ['f', 'v/vt/vn', 'v/vt/vn', 'v/vt/vn']
# elements = ['f', '1231/14134/2341', '12/24/432', '342/345/67']
# we want following
# elements = ['f', '1231/14134/2341', '12/24/432', '342/345/67']
for index, face in enumerate(elements):
#startIndex = face.find('/')
#endIndex = face.rfind('/')+1
endIndex = face.rfind('/')
#toReplace = face[startIndex:endIndex]
#face = face.replace(toReplace, "//")
face = face[:endIndex]
elements[index] = face
newLine = 'f ' + elements[0] + " " + elements[1] + " " + elements[2] + "\n"
newFaces.append(newLine)
return newFaces
def removeVertexNormals(faces):
newFaces = []
for line in faces:
elements = line.split()[1:]
# elements = ['f', 'v/vt/vn', 'v/vt/vn', 'v/vt/vn']
# elements = ['f', '1231/14134/2341', '12/24/432', '342/345/67']
# we want following
# elements = ['f', '1231/14134', '12/24', '342/345']
for index, face in enumerate(elements):
endIndex = face.rfind('/')
face = face[:endIndex]
elements[index] = face
newLine = 'f ' + elements[0] + " " + elements[1] + " " + elements[2] + "\n"
newFaces.append(newLine)
return newFaces
def printFirstThreeVertices(geometricVertices):
print(len(geometricVertices))
for i in range(6):
print(geometricVertices.T[i])
#print(geometricVertices[i])
def renderImages(lightType):
if lightType == 'point':
subprocess.run(["nori.exe", "custom_simple.xml"])
subprocess.run(["nori.exe", "custom_light_point.xml"])
##subprocess.run(["nori.exe", "custom_depth_point.xml"])
subprocess.run(["nori.exe", "custom_noShadow_point.xml"])
else:
subprocess.run(["nori.exe", "custom_whitted.xml"])
subprocess.run(["nori.exe", "custom_light.xml"])
subprocess.run(["nori.exe", "custom_depth.xml"])
subprocess.run(["nori.exe", "custom_noShadow.xml"])
def alignImages(dstFolder, fileName):
global counter
# these weird names can be changed if nori.exe is updated :)
# it was helpful when there was only one xml file
noShadowImage = cv2.imread('custom_noShadow_point_noShadows.png', cv2.IMREAD_COLOR)
#depthMapImage = cv2.imread('custom_depth_point_depthMap.png', cv2.IMREAD_COLOR)
depthMapImage0 = cv2.imread('8viewDepthMap_0.png', cv2.IMREAD_COLOR)
depthMapImage1 = cv2.imread('8viewDepthMap_1.png', cv2.IMREAD_COLOR)
depthMapImage2 = cv2.imread('8viewDepthMap_2.png', cv2.IMREAD_COLOR)
depthMapImage3 = cv2.imread('8viewDepthMap_3.png', cv2.IMREAD_COLOR)
depthMapImage4 = cv2.imread('8viewDepthMap_4.png', cv2.IMREAD_COLOR)
depthMapImage5 = cv2.imread('8viewDepthMap_5.png', cv2.IMREAD_COLOR)
depthMapImage6 = cv2.imread('8viewDepthMap_6.png', cv2.IMREAD_COLOR)
depthMapImage7 = cv2.imread('8viewDepthMap_7.png', cv2.IMREAD_COLOR)
lightMapImage = cv2.imread('custom_light_point_lightDepth.png', cv2.IMREAD_COLOR)
groundTruthImage = cv2.imread('custom_simple_simple.png', cv2.IMREAD_COLOR)
if lightType == 'area':
noShadowImage = cv2.imread('custom_noShadow.png', cv2.IMREAD_COLOR)
depthMapImage = cv2.imread('custom_depth.png', cv2.IMREAD_COLOR)
lightMapImage = cv2.imread('custom_light.png', cv2.IMREAD_COLOR)
groundTruthImage = cv2.imread('custom_whitted.png', cv2.IMREAD_COLOR)
alignedImage = np.concatenate((noShadowImage, lightMapImage, depthMapImage0, depthMapImage1, depthMapImage2, depthMapImage3, depthMapImage4, depthMapImage5, depthMapImage6, depthMapImage7, groundTruthImage), axis=1)
cv2.imwrite(os.path.join(dstFolder, fileName + '_' + str(counter).zfill(4) + '.png'), alignedImage)
counter += 1
#cv2.imwrite(os.path.join(dstFolder, fileName + '.png'), groundTruthImage)
def randomChooseK(inList, k):
retList = []
for i in range(k):
index = random.choice(range(len(inList)))
retList.append(inList.pop(index))
return retList
def splitImages(dstFolder, valCount, testCount, alignedImages):
os.mkdir(os.path.join(dstFolder, 'train'))
os.mkdir(os.path.join(dstFolder, 'test'))
os.mkdir(os.path.join(dstFolder, 'val'))
# randomly choose images for validation set
valAlignedImages = randomChooseK(alignedImages, valCount)
# now randomly choose images for test set
testAlignedImages = randomChooseK(alignedImages, testCount)
# remaining images go in train set
trainAlignedImages = alignedImages
# move the images to their respective folders
for index, imagePath in enumerate(valAlignedImages):
shutil.move(imagePath, os.path.join(dstFolder, os.path.join('val', str(index) + '.png')))
for index, imagePath in enumerate(testAlignedImages):
shutil.move(imagePath, os.path.join(dstFolder, os.path.join('test', str(index) + '.png')))
for index, imagePath in enumerate(trainAlignedImages):
shutil.move(imagePath, os.path.join(dstFolder, os.path.join('train', str(index) + '.png')))
def randomizeObject(meshFile, resize, meshIsAreaLight=False):
fileName = meshFile
objFile = open(fileName, 'r')
# sort all the strings in their corresponding lists
textureVertices = []
geometricVertices = []
vertexNormals = []
faces = []
for line in objFile:
if line[:2] == 'vt':
# texture vertices
textureVertices.append(line)
elif line[:2] == 'vn':
# vertex normals
vertexNormals.append(line)
elif line[0] == 'v':
# geometricVertices
geometricVertices.append(line)
elif line[0] == 'f':
# faces
faces.append(line)
else:
continue
objFile.close()
# create numpy matrix from the vertices string
geometricVertices = createNumpyMatrix(geometricVertices)
# compute the center of mass of the geometric vertices matrix
com = getCenterOfMass(geometricVertices)
# arrange the vertices around the center of mass
# scale the object so that its vertices have 2 units standard deviation from the mean
geometricVertices = centerAndScaleObject(geometricVertices, com, resize, meshIsAreaLight)
if not meshIsAreaLight:
# ROTATION SHOULD HAPPEN AFTER CENTERING AND SCALING THE OBJECT AND BEFORE TRANSLATING IT
# TO ITS NEW POSITION, IT BECOMES EASIER THAT WAY
# create rotation matrix if needed
rotationMatrix = getRotationMatrix()
# rotate the object
geometricVertices = rotateObject(geometricVertices, rotationMatrix)
# CAUTION! MIGHT NEED TO CHANGE THE VERTEX NORMALS TOO
# it probably was causing problems, so also rotating the vertex normals now!
vertexNormals = createNumpyMatrix(vertexNormals)
vertexNormals = rotateObject(vertexNormals, rotationMatrix)
# get axis aligned bounding box of the object
bbox = getAxisAlignedBoundingBox(geometricVertices)
# bbox will have 6 elements
# xLeft, xRight, yTop, yBottom, zNear, zFar
bRenderImage = True
if not meshIsAreaLight:
# translate the object to position it SOMEWHERE in the box scene
geometricVertices, bRenderImage = positionObjectInTheBox(geometricVertices, bbox, com)
else:
# translate the area light to some position outside the box
geometricVertices = positionLightInTheBox(geometricVertices, bbox, com)
# if object cannot be positioned in the box, skip it, do not render that image
if not bRenderImage:
return bRenderImage
# convert the modified geometricVertices back to strings
geometricVertices = npMatrixToStrings(geometricVertices, 'geometricVertices')
if not meshIsAreaLight:
# convert the modified vertexNormals back to strings
vertexNormals = npMatrixToStrings(vertexNormals, 'vertexNormals')
# remove texture vertices information from faces list
faces = removeVertexNormals(faces)
faces = removeTextureVertices(faces)
# create a temporary obj file for the modified object
if meshIsAreaLight:
fileName = 'tempLight.obj'
else:
fileName = 'tempMesh.obj'
objFile = open(fileName, 'w')
# write the geometric vertices to file first up
for line in geometricVertices:
objFile.write(line)
# next fill up the texture vertices
objFile.write("\n")
#for line in textureVertices:
# objFile.write(line)
# next fill up the vertex normals
objFile.write("\n")
#for line in vertexNormals:
# objFile.write(line)
# next fill up the faces
objFile.write("\n")
for line in faces:
objFile.write(line)
objFile.close()
return bRenderImage
def put_rand_light_pos(random_pos_str, xml_file_name):
for line in fileinput.input(xml_file_name, inplace=1):
# if current line has string 'position' in it, then replace line with string that contains random light position
if 'position' in line:
line = random_pos_str + '\n'
# add current line to file. (only file that contains string 'position' is changed
sys.stdout.write(line)
def randomizeLight(lightType):
# use randomizeObject if we are using area light (need to skip rotations in this case)
if lightType == 'area':
lightMeshObj = './light.obj' # is this the right path to give to randomizeObject? yes it is!
randomizeObject(lightMeshObj, resize=1, meshIsAreaLight=True)
return
# we only get to this point if we are dealing with a point light
# pick 3 random points in the following intervals and change light position in xml file
scene_min_x = -10
scene_max_x = 10
scene_min_y = -10
scene_max_y = 10
scene_min_z = 10
scene_max_z = 20 # with these z bounderies, the light will only be outside box (in respect to z axis)
d = 1 # don't want light exactly on edge of box
randX = random.uniform(scene_min_x + d, scene_max_x - d)
randY = random.uniform(scene_min_y + d, scene_max_y - d)
randZ = random.uniform(scene_min_z + d, scene_max_z - d)
rand_pos = str(randX) + "," + str(randY) + "," + str(randZ)
random_pos_str = "<point name=\"position\" value=\"" + rand_pos + "\"/>"
put_rand_light_pos(random_pos_str, 'custom_simple.xml')
put_rand_light_pos(random_pos_str, 'custom_depth_point.xml')
put_rand_light_pos(random_pos_str, 'custom_light_point.xml')
put_rand_light_pos(random_pos_str, 'custom_noShadow_point.xml')
def get8Views(meshFile):
if os.path.exists('8viewDepthMap_0.png'):
os.remove('8viewDepthMap_0.png')
if os.path.exists('8viewDepthMap_1.png'):
os.remove('8viewDepthMap_1.png')
if os.path.exists('8viewDepthMap_2.png'):
os.remove('8viewDepthMap_2.png')
if os.path.exists('8viewDepthMap_3.png'):
os.remove('8viewDepthMap_3.png')
if os.path.exists('8viewDepthMap_4.png'):
os.remove('8viewDepthMap_4.png')
if os.path.exists('8viewDepthMap_5.png'):
os.remove('8viewDepthMap_5.png')
if os.path.exists('8viewDepthMap_6.png'):
os.remove('8viewDepthMap_6.png')
if os.path.exists('8viewDepthMap_7.png'):
os.remove('8viewDepthMap_7.png')
views = [(math.pi/4, math.pi/4), (math.pi/4, -math.pi/4), (math.pi/4, 3*math.pi/4), (math.pi/4, -3*math.pi/4),
(-math.pi/4, math.pi/4), (-math.pi/4, -math.pi/4), (-math.pi/4, 3*math.pi/4), (-math.pi/4, -3*math.pi/4)]
for index, view in enumerate(views):
fileName = meshFile
objFile = open(fileName, 'r')
# sort all the strings in their corresponding lists
textureVertices = []
geometricVertices = []
vertexNormals = []
faces = []
for line in objFile:
if line[:2] == 'vt':
# texture vertices
textureVertices.append(line)
elif line[:2] == 'vn':
# vertex normals
vertexNormals.append(line)
elif line[0] == 'v':
# geometricVertices
geometricVertices.append(line)
elif line[0] == 'f':
# faces
faces.append(line)
else:
continue
objFile.close()
# create numpy matrix from the vertices string
geometricVertices = createNumpyMatrix(geometricVertices)
# compute the center of mass of the geometric vertices matrix
com = getCenterOfMass(geometricVertices)
# arrange the vertices around the center of mass
# scale the object so that its vertices have 2 units standard deviation from the mean
geometricVertices = centerAndScaleObject(geometricVertices, com, resize, meshIsAreaLight=False)
rotationMatrix = getRotationMatrix(angleX=view[0], angleY=view[1], angleZ=0)
# rotate the object
geometricVertices = rotateObject(geometricVertices, rotationMatrix)
# CAUTION! MIGHT NEED TO CHANGE THE VERTEX NORMALS TOO
# it probably was causing problems, so also rotating the vertex normals now!
vertexNormals = createNumpyMatrix(vertexNormals)
vertexNormals = rotateObject(vertexNormals, rotationMatrix)
# position object in center of scene - this scene has no box
geometricVertices += np.array([0,0,25]).reshape(3,1)
# convert the modified geometricVertices back to strings
geometricVertices = npMatrixToStrings(geometricVertices, 'geometricVertices')
# convert the modified vertexNormals back to strings
vertexNormals = npMatrixToStrings(vertexNormals, 'vertexNormals')
# remove texture vertices information from faces list
faces = removeVertexNormals(faces)
faces = removeTextureVertices(faces)
# create a temporary obj file for the modified object
fileName = 'tempMesh.obj'
objFile = open(fileName, 'w')
# write the geometric vertices to file first up
for line in geometricVertices:
objFile.write(line)
# next fill up the texture vertices
objFile.write("\n")
#for line in textureVertices:
# objFile.write(line)
# next fill up the vertex normals
objFile.write("\n")
#for line in vertexNormals:
# objFile.write(line)
# next fill up the faces
objFile.write("\n")
for line in faces:
objFile.write(line)
objFile.close()
# render the depthmap
subprocess.run(["nori.exe", "custom_8view_depth.xml"])
os.rename('custom_8view_depth_depthMap.png', '8viewDepthMap_' + str(index) + '.png')
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog="Data generator")
parser.add_argument('-mesh', '--mesh_folder_path', required=True, help="Path to meshes folder")
parser.add_argument('-light', '--lightType', required=True, help="Light type: 'point' or 'area'")
parser.add_argument('-dst', '--dst_folder_path', required=True, help="Folder name where aligned images are to be saved")
parser.add_argument('-val', type=int, required=True, help="Percentage of images in validation set")
parser.add_argument('-test', type=int, required=True, help="Percentage of images in test set")
args = parser.parse_args()
meshFolder = args.mesh_folder_path
lightType = args.lightType
dstFolder = args.dst_folder_path
valPercentage = args.val
testPercentage = args.test
meshFiles = [os.path.join(meshFolder, f) for f in sorted(os.listdir(meshFolder)) if os.path.isfile(os.path.join(meshFolder, f)) and os.path.splitext(f)[1] == ".obj"]
# create directory to store newly generated aligned images
if os.path.exists(dstFolder):
shutil.rmtree(dstFolder)
bRenderImage = False
resize = 2
os.mkdir(dstFolder)
global counter
for meshFile in meshFiles:
print(meshFile)
fileName = meshFile[meshFile.find('\\')+1:meshFile.find('.obj')]
get8Views(meshFile)
counter = 0
for i in range(10):
# create images for 10 random poses of this mesh
bRenderImage = randomizeObject(meshFile, resize)
if not bRenderImage:
continue
randomizeLight(lightType)
# render the images now!
renderImages(lightType)
alignImages(dstFolder, fileName)
# split the images into train, test and validation folders
alignedImages = [os.path.join(dstFolder, f) for f in os.listdir(dstFolder) if os.path.isfile(os.path.join(dstFolder, f))]
imgCount = len(alignedImages)
valCount = int(valPercentage * imgCount / 100)
testCount = int(testPercentage * imgCount / 100)
print("valCount: ", valCount)
print("testCount: ", testCount)
#splitImages(dstFolder, valCount, testCount, alignedImages) | [
"math.cos",
"numpy.array",
"os.remove",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"subprocess.run",
"numpy.asarray",
"numpy.matmul",
"os.mkdir",
"numpy.concatenate",
"fileinput.input",
"random.uniform",
"numpy.amin",
"numpy.average",
"os.path.splitext",
"numpy.std",
... | [((589, 609), 'numpy.asarray', 'np.asarray', (['vertices'], {}), '(vertices)\n', (599, 609), True, 'import numpy as np\n'), ((766, 803), 'numpy.average', 'np.average', (['geometricVertices'], {'axis': '(1)'}), '(geometricVertices, axis=1)\n', (776, 803), True, 'import numpy as np\n'), ((2243, 2287), 'numpy.matmul', 'np.matmul', (['rotationMatrix', 'geometricVertices'], {}), '(rotationMatrix, geometricVertices)\n', (2252, 2287), True, 'import numpy as np\n'), ((2387, 2421), 'numpy.amin', 'np.amin', (['geometricVertices'], {'axis': '(1)'}), '(geometricVertices, axis=1)\n', (2394, 2421), True, 'import numpy as np\n'), ((2433, 2467), 'numpy.amax', 'np.amax', (['geometricVertices'], {'axis': '(1)'}), '(geometricVertices, axis=1)\n', (2440, 2467), True, 'import numpy as np\n'), ((8008, 8075), 'cv2.imread', 'cv2.imread', (['"""custom_noShadow_point_noShadows.png"""', 'cv2.IMREAD_COLOR'], {}), "('custom_noShadow_point_noShadows.png', cv2.IMREAD_COLOR)\n", (8018, 8075), False, 'import cv2\n'), ((8182, 8233), 'cv2.imread', 'cv2.imread', (['"""8viewDepthMap_0.png"""', 'cv2.IMREAD_COLOR'], {}), "('8viewDepthMap_0.png', cv2.IMREAD_COLOR)\n", (8192, 8233), False, 'import cv2\n'), ((8255, 8306), 'cv2.imread', 'cv2.imread', (['"""8viewDepthMap_1.png"""', 'cv2.IMREAD_COLOR'], {}), "('8viewDepthMap_1.png', cv2.IMREAD_COLOR)\n", (8265, 8306), False, 'import cv2\n'), ((8328, 8379), 'cv2.imread', 'cv2.imread', (['"""8viewDepthMap_2.png"""', 'cv2.IMREAD_COLOR'], {}), "('8viewDepthMap_2.png', cv2.IMREAD_COLOR)\n", (8338, 8379), False, 'import cv2\n'), ((8401, 8452), 'cv2.imread', 'cv2.imread', (['"""8viewDepthMap_3.png"""', 'cv2.IMREAD_COLOR'], {}), "('8viewDepthMap_3.png', cv2.IMREAD_COLOR)\n", (8411, 8452), False, 'import cv2\n'), ((8474, 8525), 'cv2.imread', 'cv2.imread', (['"""8viewDepthMap_4.png"""', 'cv2.IMREAD_COLOR'], {}), "('8viewDepthMap_4.png', cv2.IMREAD_COLOR)\n", (8484, 8525), False, 'import cv2\n'), ((8547, 8598), 'cv2.imread', 'cv2.imread', (['"""8viewDepthMap_5.png"""', 'cv2.IMREAD_COLOR'], {}), "('8viewDepthMap_5.png', cv2.IMREAD_COLOR)\n", (8557, 8598), False, 'import cv2\n'), ((8620, 8671), 'cv2.imread', 'cv2.imread', (['"""8viewDepthMap_6.png"""', 'cv2.IMREAD_COLOR'], {}), "('8viewDepthMap_6.png', cv2.IMREAD_COLOR)\n", (8630, 8671), False, 'import cv2\n'), ((8693, 8744), 'cv2.imread', 'cv2.imread', (['"""8viewDepthMap_7.png"""', 'cv2.IMREAD_COLOR'], {}), "('8viewDepthMap_7.png', cv2.IMREAD_COLOR)\n", (8703, 8744), False, 'import cv2\n'), ((8766, 8831), 'cv2.imread', 'cv2.imread', (['"""custom_light_point_lightDepth.png"""', 'cv2.IMREAD_COLOR'], {}), "('custom_light_point_lightDepth.png', cv2.IMREAD_COLOR)\n", (8776, 8831), False, 'import cv2\n'), ((8855, 8911), 'cv2.imread', 'cv2.imread', (['"""custom_simple_simple.png"""', 'cv2.IMREAD_COLOR'], {}), "('custom_simple_simple.png', cv2.IMREAD_COLOR)\n", (8865, 8911), False, 'import cv2\n'), ((9261, 9469), 'numpy.concatenate', 'np.concatenate', (['(noShadowImage, lightMapImage, depthMapImage0, depthMapImage1,\n depthMapImage2, depthMapImage3, depthMapImage4, depthMapImage5,\n depthMapImage6, depthMapImage7, groundTruthImage)'], {'axis': '(1)'}), '((noShadowImage, lightMapImage, depthMapImage0,\n depthMapImage1, depthMapImage2, depthMapImage3, depthMapImage4,\n depthMapImage5, depthMapImage6, depthMapImage7, groundTruthImage), axis=1)\n', (9275, 9469), True, 'import numpy as np\n'), ((14641, 14682), 'fileinput.input', 'fileinput.input', (['xml_file_name'], {'inplace': '(1)'}), '(xml_file_name, inplace=1)\n', (14656, 14682), False, 'import fileinput\n'), ((15786, 15834), 'random.uniform', 'random.uniform', (['(scene_min_x + d)', '(scene_max_x - d)'], {}), '(scene_min_x + d, scene_max_x - d)\n', (15800, 15834), False, 'import random\n'), ((15847, 15895), 'random.uniform', 'random.uniform', (['(scene_min_y + d)', '(scene_max_y - d)'], {}), '(scene_min_y + d, scene_max_y - d)\n', (15861, 15895), False, 'import random\n'), ((15908, 15956), 'random.uniform', 'random.uniform', (['(scene_min_z + d)', '(scene_max_z - d)'], {}), '(scene_min_z + d, scene_max_z - d)\n', (15922, 15956), False, 'import random\n'), ((16391, 16428), 'os.path.exists', 'os.path.exists', (['"""8viewDepthMap_0.png"""'], {}), "('8viewDepthMap_0.png')\n", (16405, 16428), False, 'import os\n'), ((16478, 16515), 'os.path.exists', 'os.path.exists', (['"""8viewDepthMap_1.png"""'], {}), "('8viewDepthMap_1.png')\n", (16492, 16515), False, 'import os\n'), ((16565, 16602), 'os.path.exists', 'os.path.exists', (['"""8viewDepthMap_2.png"""'], {}), "('8viewDepthMap_2.png')\n", (16579, 16602), False, 'import os\n'), ((16652, 16689), 'os.path.exists', 'os.path.exists', (['"""8viewDepthMap_3.png"""'], {}), "('8viewDepthMap_3.png')\n", (16666, 16689), False, 'import os\n'), ((16739, 16776), 'os.path.exists', 'os.path.exists', (['"""8viewDepthMap_4.png"""'], {}), "('8viewDepthMap_4.png')\n", (16753, 16776), False, 'import os\n'), ((16826, 16863), 'os.path.exists', 'os.path.exists', (['"""8viewDepthMap_5.png"""'], {}), "('8viewDepthMap_5.png')\n", (16840, 16863), False, 'import os\n'), ((16913, 16950), 'os.path.exists', 'os.path.exists', (['"""8viewDepthMap_6.png"""'], {}), "('8viewDepthMap_6.png')\n", (16927, 16950), False, 'import os\n'), ((17000, 17037), 'os.path.exists', 'os.path.exists', (['"""8viewDepthMap_7.png"""'], {}), "('8viewDepthMap_7.png')\n", (17014, 17037), False, 'import os\n'), ((20567, 20613), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""Data generator"""'}), "(prog='Data generator')\n", (20590, 20613), False, 'import argparse\n'), ((21597, 21622), 'os.path.exists', 'os.path.exists', (['dstFolder'], {}), '(dstFolder)\n', (21611, 21622), False, 'import os\n'), ((21702, 21721), 'os.mkdir', 'os.mkdir', (['dstFolder'], {}), '(dstFolder)\n', (21710, 21721), False, 'import os\n'), ((1068, 1101), 'numpy.std', 'np.std', (['geometricVertices'], {'axis': '(1)'}), '(geometricVertices, axis=1)\n', (1074, 1101), True, 'import numpy as np\n'), ((2008, 2025), 'numpy.matmul', 'np.matmul', (['Rx', 'Ry'], {}), '(Rx, Ry)\n', (2017, 2025), True, 'import numpy as np\n'), ((3667, 3709), 'random.uniform', 'random.uniform', (['xComRange[0]', 'xComRange[1]'], {}), '(xComRange[0], xComRange[1])\n', (3681, 3709), False, 'import random\n'), ((3728, 3770), 'random.uniform', 'random.uniform', (['yComRange[0]', 'yComRange[1]'], {}), '(yComRange[0], yComRange[1])\n', (3742, 3770), False, 'import random\n'), ((3789, 3831), 'random.uniform', 'random.uniform', (['zComRange[0]', 'zComRange[1]'], {}), '(zComRange[0], zComRange[1])\n', (3803, 3831), False, 'import random\n'), ((4771, 4813), 'random.uniform', 'random.uniform', (['xComRange[0]', 'xComRange[1]'], {}), '(xComRange[0], xComRange[1])\n', (4785, 4813), False, 'import random\n'), ((4912, 4954), 'random.uniform', 'random.uniform', (['zComRange[0]', 'zComRange[1]'], {}), '(zComRange[0], zComRange[1])\n', (4926, 4954), False, 'import random\n'), ((7324, 7373), 'subprocess.run', 'subprocess.run', (["['nori.exe', 'custom_simple.xml']"], {}), "(['nori.exe', 'custom_simple.xml'])\n", (7338, 7373), False, 'import subprocess\n'), ((7382, 7436), 'subprocess.run', 'subprocess.run', (["['nori.exe', 'custom_light_point.xml']"], {}), "(['nori.exe', 'custom_light_point.xml'])\n", (7396, 7436), False, 'import subprocess\n'), ((7510, 7567), 'subprocess.run', 'subprocess.run', (["['nori.exe', 'custom_noShadow_point.xml']"], {}), "(['nori.exe', 'custom_noShadow_point.xml'])\n", (7524, 7567), False, 'import subprocess\n'), ((7586, 7636), 'subprocess.run', 'subprocess.run', (["['nori.exe', 'custom_whitted.xml']"], {}), "(['nori.exe', 'custom_whitted.xml'])\n", (7600, 7636), False, 'import subprocess\n'), ((7645, 7693), 'subprocess.run', 'subprocess.run', (["['nori.exe', 'custom_light.xml']"], {}), "(['nori.exe', 'custom_light.xml'])\n", (7659, 7693), False, 'import subprocess\n'), ((7702, 7750), 'subprocess.run', 'subprocess.run', (["['nori.exe', 'custom_depth.xml']"], {}), "(['nori.exe', 'custom_depth.xml'])\n", (7716, 7750), False, 'import subprocess\n'), ((7759, 7810), 'subprocess.run', 'subprocess.run', (["['nori.exe', 'custom_noShadow.xml']"], {}), "(['nori.exe', 'custom_noShadow.xml'])\n", (7773, 7810), False, 'import subprocess\n'), ((8965, 9016), 'cv2.imread', 'cv2.imread', (['"""custom_noShadow.png"""', 'cv2.IMREAD_COLOR'], {}), "('custom_noShadow.png', cv2.IMREAD_COLOR)\n", (8975, 9016), False, 'import cv2\n'), ((9041, 9089), 'cv2.imread', 'cv2.imread', (['"""custom_depth.png"""', 'cv2.IMREAD_COLOR'], {}), "('custom_depth.png', cv2.IMREAD_COLOR)\n", (9051, 9089), False, 'import cv2\n'), ((9114, 9162), 'cv2.imread', 'cv2.imread', (['"""custom_light.png"""', 'cv2.IMREAD_COLOR'], {}), "('custom_light.png', cv2.IMREAD_COLOR)\n", (9124, 9162), False, 'import cv2\n'), ((9190, 9240), 'cv2.imread', 'cv2.imread', (['"""custom_whitted.png"""', 'cv2.IMREAD_COLOR'], {}), "('custom_whitted.png', cv2.IMREAD_COLOR)\n", (9200, 9240), False, 'import cv2\n'), ((9923, 9955), 'os.path.join', 'os.path.join', (['dstFolder', '"""train"""'], {}), "(dstFolder, 'train')\n", (9935, 9955), False, 'import os\n'), ((9970, 10001), 'os.path.join', 'os.path.join', (['dstFolder', '"""test"""'], {}), "(dstFolder, 'test')\n", (9982, 10001), False, 'import os\n'), ((10016, 10046), 'os.path.join', 'os.path.join', (['dstFolder', '"""val"""'], {}), "(dstFolder, 'val')\n", (10028, 10046), False, 'import os\n'), ((14975, 14997), 'sys.stdout.write', 'sys.stdout.write', (['line'], {}), '(line)\n', (14991, 14997), False, 'import sys\n'), ((16438, 16470), 'os.remove', 'os.remove', (['"""8viewDepthMap_0.png"""'], {}), "('8viewDepthMap_0.png')\n", (16447, 16470), False, 'import os\n'), ((16525, 16557), 'os.remove', 'os.remove', (['"""8viewDepthMap_1.png"""'], {}), "('8viewDepthMap_1.png')\n", (16534, 16557), False, 'import os\n'), ((16612, 16644), 'os.remove', 'os.remove', (['"""8viewDepthMap_2.png"""'], {}), "('8viewDepthMap_2.png')\n", (16621, 16644), False, 'import os\n'), ((16699, 16731), 'os.remove', 'os.remove', (['"""8viewDepthMap_3.png"""'], {}), "('8viewDepthMap_3.png')\n", (16708, 16731), False, 'import os\n'), ((16786, 16818), 'os.remove', 'os.remove', (['"""8viewDepthMap_4.png"""'], {}), "('8viewDepthMap_4.png')\n", (16795, 16818), False, 'import os\n'), ((16873, 16905), 'os.remove', 'os.remove', (['"""8viewDepthMap_5.png"""'], {}), "('8viewDepthMap_5.png')\n", (16882, 16905), False, 'import os\n'), ((16960, 16992), 'os.remove', 'os.remove', (['"""8viewDepthMap_6.png"""'], {}), "('8viewDepthMap_6.png')\n", (16969, 16992), False, 'import os\n'), ((17047, 17079), 'os.remove', 'os.remove', (['"""8viewDepthMap_7.png"""'], {}), "('8viewDepthMap_7.png')\n", (17056, 17079), False, 'import os\n'), ((20375, 20429), 'subprocess.run', 'subprocess.run', (["['nori.exe', 'custom_8view_depth.xml']"], {}), "(['nori.exe', 'custom_8view_depth.xml'])\n", (20389, 20429), False, 'import subprocess\n'), ((21369, 21396), 'os.path.join', 'os.path.join', (['meshFolder', 'f'], {}), '(meshFolder, f)\n', (21381, 21396), False, 'import os\n'), ((21632, 21656), 'shutil.rmtree', 'shutil.rmtree', (['dstFolder'], {}), '(dstFolder)\n', (21645, 21656), False, 'import shutil\n'), ((22368, 22394), 'os.path.join', 'os.path.join', (['dstFolder', 'f'], {}), '(dstFolder, f)\n', (22380, 22394), False, 'import os\n'), ((1453, 1483), 'random.uniform', 'random.uniform', (['(0)', '(2 * math.pi)'], {}), '(0, 2 * math.pi)\n', (1467, 1483), False, 'import random\n'), ((1509, 1539), 'random.uniform', 'random.uniform', (['(0)', '(2 * math.pi)'], {}), '(0, 2 * math.pi)\n', (1523, 1539), False, 'import random\n'), ((1565, 1595), 'random.uniform', 'random.uniform', (['(0)', '(2 * math.pi)'], {}), '(0, 2 * math.pi)\n', (1579, 1595), False, 'import random\n'), ((3944, 3963), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (3952, 3963), True, 'import numpy as np\n'), ((5067, 5086), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (5075, 5086), True, 'import numpy as np\n'), ((22404, 22425), 'os.listdir', 'os.listdir', (['dstFolder'], {}), '(dstFolder)\n', (22414, 22425), False, 'import os\n'), ((1633, 1649), 'math.cos', 'math.cos', (['angleX'], {}), '(angleX)\n', (1641, 1649), False, 'import math\n'), ((1675, 1691), 'math.sin', 'math.sin', (['angleX'], {}), '(angleX)\n', (1683, 1691), False, 'import math\n'), ((1693, 1709), 'math.cos', 'math.cos', (['angleX'], {}), '(angleX)\n', (1701, 1709), False, 'import math\n'), ((1749, 1765), 'math.cos', 'math.cos', (['angleY'], {}), '(angleY)\n', (1757, 1765), False, 'import math\n'), ((1770, 1786), 'math.sin', 'math.sin', (['angleY'], {}), '(angleY)\n', (1778, 1786), False, 'import math\n'), ((1823, 1839), 'math.cos', 'math.cos', (['angleY'], {}), '(angleY)\n', (1831, 1839), False, 'import math\n'), ((1879, 1895), 'math.cos', 'math.cos', (['angleZ'], {}), '(angleZ)\n', (1887, 1895), False, 'import math\n'), ((1921, 1937), 'math.sin', 'math.sin', (['angleZ'], {}), '(angleZ)\n', (1929, 1937), False, 'import math\n'), ((1939, 1955), 'math.cos', 'math.cos', (['angleZ'], {}), '(angleZ)\n', (1947, 1955), False, 'import math\n'), ((19175, 19195), 'numpy.array', 'np.array', (['[0, 0, 25]'], {}), '([0, 0, 25])\n', (19183, 19195), True, 'import numpy as np\n'), ((21413, 21435), 'os.listdir', 'os.listdir', (['meshFolder'], {}), '(meshFolder)\n', (21423, 21435), False, 'import os\n'), ((22444, 22470), 'os.path.join', 'os.path.join', (['dstFolder', 'f'], {}), '(dstFolder, f)\n', (22456, 22470), False, 'import os\n'), ((1652, 1668), 'math.sin', 'math.sin', (['angleX'], {}), '(angleX)\n', (1660, 1668), False, 'import math\n'), ((1802, 1818), 'math.sin', 'math.sin', (['angleY'], {}), '(angleY)\n', (1810, 1818), False, 'import math\n'), ((1898, 1914), 'math.sin', 'math.sin', (['angleZ'], {}), '(angleZ)\n', (1906, 1914), False, 'import math\n'), ((21455, 21482), 'os.path.join', 'os.path.join', (['meshFolder', 'f'], {}), '(meshFolder, f)\n', (21467, 21482), False, 'import os\n'), ((21488, 21507), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (21504, 21507), False, 'import os\n')] |
from panda_env import PandaEnv
from spinup import ppo_pytorch as ppo
import torch.nn as nn
import gym
import gym_panda
import matplotlib.pyplot as plt
import numpy as np
import cv2
import spinup.algos.pytorch.ppo.core as core
class ProcessFrame84(gym.ObservationWrapper):
def __init__(self, env=None):
super(ProcessFrame84, self).__init__(env)
self.env = env
self.observation_space = gym.spaces.Box(
low=0, high=255, shape=(84, 84, 1), dtype=np.uint8)
def observation(self, obs):
return ProcessFrame84.process(self.env.render(mode='rgb_array'))
def process(frame):
if frame.size == 720 * 960 * 3:
img = np.reshape(frame, [720, 960, 3]).astype(
np.float32)
else:
assert False, "Unknown resolution."
img = img[:, :, 0] * 0.399 + img[:, :, 1] * 0.587 + \
img[:, :, 2] * 0.114
resized_screen = cv2.resize(
img, (112, 84), interpolation=cv2.INTER_AREA)
y_t = resized_screen[:, 14:98]
y_t = np.reshape(y_t, [84, 84, 1])
return y_t.astype(np.uint8)
class ImageToPyTorch(gym.ObservationWrapper):
def __init__(self, env):
super(ImageToPyTorch, self).__init__(env)
old_shape = self.observation_space.shape
new_shape = (old_shape[-1], old_shape[0], old_shape[1])
self.observation_space = gym.spaces.Box(
low=0.0, high=1.0, shape=new_shape, dtype=np.float32)
def observation(self, observation):
return np.moveaxis(observation, 2, 0)
class MoveTowardZ(gym.ActionWrapper):
def __init__(self, env):
super(MoveTowardZ, self).__init__(env)
def action(self, action):
action[2] = -.3
return action
env = gym.make('panda-v0')
env = ProcessFrame84(env)
env = ImageToPyTorch(env)
env = MoveTowardZ(env)
# image = env.reset()
# plt.figure()
# plt.imshow(image.squeeze(),cmap='gray')
# plt.title('Example extracted screen')
# plt.show()
env_fn = lambda : env
ac_kwargs = dict(hidden_sizes=[18,64,64], activation=nn.ReLU)
logger_kwargs = dict(output_dir='spinup', exp_name='panda_ppo')
ppo(env_fn=env_fn,actor_critic=core.CNNActorCritic, ac_kwargs=ac_kwargs, steps_per_epoch=5000, epochs=250, logger_kwargs=logger_kwargs) | [
"numpy.reshape",
"gym.spaces.Box",
"spinup.ppo_pytorch",
"numpy.moveaxis",
"cv2.resize",
"gym.make"
] | [((1769, 1789), 'gym.make', 'gym.make', (['"""panda-v0"""'], {}), "('panda-v0')\n", (1777, 1789), False, 'import gym\n'), ((2151, 2291), 'spinup.ppo_pytorch', 'ppo', ([], {'env_fn': 'env_fn', 'actor_critic': 'core.CNNActorCritic', 'ac_kwargs': 'ac_kwargs', 'steps_per_epoch': '(5000)', 'epochs': '(250)', 'logger_kwargs': 'logger_kwargs'}), '(env_fn=env_fn, actor_critic=core.CNNActorCritic, ac_kwargs=ac_kwargs,\n steps_per_epoch=5000, epochs=250, logger_kwargs=logger_kwargs)\n', (2154, 2291), True, 'from spinup import ppo_pytorch as ppo\n'), ((417, 483), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(84, 84, 1)', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=(84, 84, 1), dtype=np.uint8)\n', (431, 483), False, 'import gym\n'), ((940, 996), 'cv2.resize', 'cv2.resize', (['img', '(112, 84)'], {'interpolation': 'cv2.INTER_AREA'}), '(img, (112, 84), interpolation=cv2.INTER_AREA)\n', (950, 996), False, 'import cv2\n'), ((1063, 1091), 'numpy.reshape', 'np.reshape', (['y_t', '[84, 84, 1]'], {}), '(y_t, [84, 84, 1])\n', (1073, 1091), True, 'import numpy as np\n'), ((1400, 1468), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0.0)', 'high': '(1.0)', 'shape': 'new_shape', 'dtype': 'np.float32'}), '(low=0.0, high=1.0, shape=new_shape, dtype=np.float32)\n', (1414, 1468), False, 'import gym\n'), ((1538, 1568), 'numpy.moveaxis', 'np.moveaxis', (['observation', '(2)', '(0)'], {}), '(observation, 2, 0)\n', (1549, 1568), True, 'import numpy as np\n'), ((686, 718), 'numpy.reshape', 'np.reshape', (['frame', '[720, 960, 3]'], {}), '(frame, [720, 960, 3])\n', (696, 718), True, 'import numpy as np\n')] |
"""
np_rw_buffer.buffer
SeaLandAire Technologies
@author: jengel
Numpy circular buffer to help store audio data.
"""
import numpy as np
import threading
from .utils import make_thread_safe
from .circular_indexes import get_indexes
__all__ = ["UnderflowError", "get_shape_columns", "get_shape", "reshape", "RingBuffer", "RingBufferThreadSafe"]
UnderflowError = ValueError
def get_shape(shape):
"""Return rows, columns for the shape."""
try:
return (shape[0], shape[1]) + shape[2:]
except IndexError:
return (shape[0], 0) + shape[2:]
except TypeError:
return int(shape), 0
# get_shape
def get_shape_columns(shape):
"""Return the number of columns for the shape."""
try:
return shape[1]
except (IndexError, TypeError):
return 0
# end get_shape_columns
def reshape(ring_buffer, shape):
"""Safely reshape the data.
Args:
ring_buffer (RingBuffer/np.ndarray/np.array): Array to reshape
shape (tuple): New shape
"""
try:
buffer = ring_buffer._data
except AttributeError:
buffer = ring_buffer
new_shape = get_shape(shape)
myshape = get_shape(buffer.shape)
if new_shape[1] == 0:
new_shape = (new_shape[0], 1) + new_shape[2:]
if new_shape[0] == -1:
try: # Only change the column shape
buffer.shape = new_shape
except ValueError: # Change the entire array shape
rows = int(np.ceil(myshape[0]/new_shape[1]))
new_shape = (rows, ) + new_shape[1:]
buffer.resize(new_shape, refcheck=False)
else:
# Force proper sizing
buffer.resize(new_shape, refcheck=False)
# Clear the buffer if it did anything but grow in length
# if not (new_shape[0] > myshape[0] and new_shape[1:] == myshape[1:]):
try:
ring_buffer.clear()
except AttributeError:
pass
def format_write_data(data, mydtype):
"""Format the given data to the proper shape that can be written into this buffer."""
try:
len(data) # Raise TypeError if no len
dshape = data.shape
except TypeError:
# Data has no length
data = np.asarray(data, dtype=mydtype)
dshape = data.shape
except AttributeError:
# Data is not a numpy array
data = np.asarray(data, dtype=mydtype)
dshape = data.shape
# Force at least 1 column
if get_shape_columns(dshape) == 0:
data = np.reshape(data, (-1, 1))
dshape = data.shape
return data, dshape
# end format_write_data
class RingBuffer(object):
"""Numpy circular buffer to help store audio data.
Args:
shape (tuple/int): Length of the buffer.
columns (int)[1]: Columns for the buffer.
dtype (numpy.dtype)[numpy.float32]: Numpy data type for the buffer.
"""
def __init__(self, shape, columns=None, dtype=np.float32):
self._start = 0
self._end = 0
self._length = 0
self.lock = threading.RLock()
# Configure the shape (check if the given length was really the shape)
if isinstance(shape, (tuple, list)):
shape = shape
if columns is not None and columns > 0:
shape = (shape[0], columns) + shape[2:]
else:
if columns is None:
columns = 1
shape = (shape, columns)
# Force columns
if get_shape_columns(shape) == 0:
shape = (shape[0], 1) + shape[2:]
# Create the data buffer
shape = tuple((int(np.ceil(i)) for i in shape))
self._data = np.zeros(shape=shape, dtype=dtype)
# end constructor
def clear(self):
"""Clear the data."""
self._start = 0
self._end = 0
self._length = 0
# end clear
def get_data(self):
"""Return the data in the buffer without moving the start pointer."""
idxs = self.get_indexes(self._start, self._length, self.maxsize)
return self._data[idxs].copy()
# end get_data
def set_data(self, data):
"""Set the data."""
self.dtype = data.dtype
self.shape = data.shape
self.clear()
self.expanding_write(data)
# end set_data
def _write(self, data, length, error, move_start=True):
"""Actually write the data to the numpy array.
Args:
data (np.array/np.ndarray): Numpy array of data to write. This should already be in the correct format.
length (int): Length of data to write. (This argument needs to be here for error purposes).
error (bool): Error on overflow else overrun the start pointer or move the start pointer to prevent
overflow (Makes it circular).
move_start (bool)[True]: If error is false should overrun occur or should the start pointer move.
Raises:
OverflowError: If error is True and more data is being written then there is space available.
"""
idxs = self.get_indexes(self._end, length, self.maxsize)
self.move_end(length, error, move_start)
self._data[idxs] = data
def expanding_write(self, data, error=True):
"""Write data into the buffer. If the data is larger than the buffer expand the buffer.
Args:
data (numpy.array): Data to write into the buffer.
error (bool)[True]: Error on overflow else overrun the start pointer or move the start pointer to prevent
overflow (Makes it circular).
Raises:
ValueError: If data shape does not match this shape. Arrays without a column will be convert to 1 column
Example: (5,) will become (5, 1) and will not error if there is 1 column
OverflowError: If the written data will overflow the buffer.
"""
data, shape = format_write_data(data, self.dtype)
length = shape[0]
if shape[1:] != self.shape[1:]:
msg = "could not broadcast input array from shape {:s} into shape {:s}".format(str(shape), str(self.shape))
raise ValueError(msg)
elif length > self.maxsize:
self.shape = (length, ) + self.shape[1:]
self._write(data, length, error)
# end expanding_write
def growing_write(self, data):
"""Write data into the buffer. If there is not enough available space then grow the buffer.
Args:
data (numpy.array): Data to write into the buffer.
Raises:
ValueError: If data shape does not match this shape. Arrays without a column will be convert to 1 column
Example: (5,) will become (5, 1) and will not error if there is 1 column
OverflowError: If the written data will overflow the buffer.
"""
data, shape = format_write_data(data, self.dtype)
length = shape[0]
available = self.get_available_space()
if shape[1:] != self.shape[1:]:
msg = "could not broadcast input array from shape {:s} into shape {:s}".format(str(shape), str(self.shape))
raise ValueError(msg)
elif length > available:
# Keep the old data and reshape
old_data = self.get_data()
self.shape = (self.maxsize + (length - available),) + self.shape[1:]
if len(old_data) > 0:
self._write(old_data, len(old_data), False)
self._write(data, length, error=True)
# end expanding_write
def write_value(self, value, length, error=True, move_start=True):
"""Write a value into the buffer for the given length. This is more efficient then creating and writing an array of a single value.
Args:
value (int/float/object): Value to put in the buffer
length (int): Number of blank samples
error (bool)[True]: Error on overflow else overrun the start pointer or move the start pointer to prevent
overflow (Makes it circular).
move_start (bool)[True]: If error is false should overrun occur or should the start pointer move.
Raises:
OverflowError: If the written data will overflow the buffer.
"""
if not error and length > self.maxsize:
length = self.maxsize
idxs = self.get_indexes(self._end, length, self.maxsize)
self.move_end(length, error, move_start)
self._data[idxs] = value
def write_zeros(self, length, error=True, move_start=True):
"""Write zeros into the buffer for the specified length.
Args:
length (int): Number of blank samples
error (bool)[True]: Error on overflow else overrun the start pointer or move the start pointer to prevent
overflow (Makes it circular).
move_start (bool)[True]: If error is false should overrun occur or should the start pointer move.
Raises:
OverflowError: If the written data will overflow the buffer.
"""
self.write_value(0, length, error=error, move_start=move_start)
def write(self, data, error=True):
"""Write data into the buffer.
Args:
data (numpy.array): Data to write into the buffer.
error (bool)[True]: Error on overflow else overrun the start pointer or move the start pointer to prevent
overflow (Makes it circular).
Raises:
ValueError: If data shape does not match this shape. Arrays without a column will be convert to 1 column
Example: (5,) will become (5, 1) and will not error if there is 1 column
OverflowError: If the written data will overflow the buffer.
"""
data, shape = format_write_data(data, self.dtype)
length = shape[0]
if shape[1:] != self.shape[1:]:
msg = "could not broadcast input array from shape {:s} into shape {:s}".format(str(shape), str(self.shape))
raise ValueError(msg)
elif not error and length > self.maxsize:
data = data[-self.maxsize:]
length = self.maxsize
self._write(data, length, error)
# end write
def read(self, amount=None):
"""Read the data and move the start/read pointer, so that data is not read again.
This method reads empty if the amount specified is greater than the amount in the buffer.
Args:
amount (int)[None]: Amount of data to read
"""
if amount is None:
amount = self._length
# Check available read size
if amount == 0 or amount > self._length:
return self._data[0:0].copy()
idxs = self.get_indexes(self._start, amount, self.maxsize)
self.move_start(amount)
return self._data[idxs].copy()
# end read
def read_remaining(self, amount=None):
"""Read the data and move the start/read pointer, so that the data is not read again.
This method reads the remaining data if the amount specified is greater than the amount in the buffer.
Args:
amount (int)[None]: Amount of data to read
"""
if amount is None or amount > self._length:
amount = self._length
# Check available read size
if amount == 0:
return self._data[0:0].copy()
idxs = self.get_indexes(self._start, amount, self.maxsize)
self.move_start(amount)
return self._data[idxs].copy()
# end read_remaining
def read_overlap(self, amount=None, increment=None):
"""Read the data and move the start/read pointer.
This method only increments the start/read pointer the given increment amount. This way the same data can be
read multiple times.
This method reads empty if the amount specified is greater than the amount in the buffer.
Args:
amount (int)[None]: Amount of data to read
increment (int)[None]: Amount to move the start/read pointer allowing overlap if increment is less than the
given amount.
"""
if amount is None:
amount = self._length
if increment is None:
increment = amount
# Check available read size
if amount == 0 or amount > self._length:
return self._data[0:0].copy()
idxs = self.get_indexes(self._start, amount, self.maxsize)
self.move_start(increment)
return self._data[idxs].copy()
# end read_overlap
def read_last(self, amount=None, update_rate=None):
"""Read the last amount of data and move the start/read pointer.
This is an odd method for FFT calculations. It reads the newest data moving the start pointer by the
update_rate amount that it was given. The returned skips number is the number of update_rate values.
Example:
.. code-block :: python
>>> buffer = RingBuffer(11, 1)
>>> buffer.write([0, 1, 2, 3, 4, 5, 6, 7 ,8, 9, 10])
>>> buffer.read_last(6, 2))
(array([[4.],
[5.],
[6.],
[7.],
[8.],
[9.]], dtype=float32), 3)
>>> # Note must read in a multiple of the amount and moves by a multiple of the update rate.
Args:
amount (int)[None]: Amount of data to read. NFFT value.
update_rate (int)[None]: The fft update rate value. How many samples to move the pointer by
to cause overlap.
Returns:
data (np.array/np.ndarray) [None]: Data that is of length amount.
updates (int) [0]: Number of updates (Total number of update rates until the end of the data was
found including the data that was returned).
"""
if amount is None:
amount = self._length
if update_rate is None:
update_rate = amount
# Check available read size
if amount == 0 or amount > self._length:
return None, 0
skips = (self._length - amount) // update_rate
if skips > 0:
self.move_start(update_rate * skips)
idxs = self.get_indexes(self._start, amount, self.maxsize)
self.move_start(update_rate)
return self._data[idxs].copy(), skips + 1
# end read_last
def __len__(self):
"""Return the current size of the buffer."""
return self._length
def __str__(self):
return self.get_data().__str__()
get_indexes = staticmethod(get_indexes)
def move_start(self, amount, error=True, limit_amount=True):
"""This is an internal method and should not need to be called by the user.
Move the start pointer the given amount (+/-).
Raises:
UnderflowError: If the amount is > the length.
Args:
amount (int): Amount to move the start pointer by.
error (bool)[True]: Raise a ValueError else sync the end pointer and length.
limit_amount (bool)[True]: If True force the amount to be less than or equal to the amount in the buffer.
"""
if amount == 0:
return
elif amount > self._length:
if error:
raise UnderflowError("Not enough data in the buffer " + repr(self))
if limit_amount:
# You cannot read more than what you have
amount = self._length
# end error
stop = self._start + amount
try:
self._start = stop % self.maxsize
except ZeroDivisionError:
self._start = stop
self.sync_length(False or amount < 0) # Length grows if amount was negative.
# end move_start
def move_end(self, amount, error=True, move_start=True):
"""This is an internal method and should not need to be called by the user.
Move the end pointer the given amount (+/-).
Raises:
OverflowError: If the amount is > the available buffer space.
Args:
amount (int): Amount to move the end pointer by.
error (bool)[True]: Raise an OverflowError else sync the start pointer and length.
move_start (bool)[True]: If True and amount > available move the start pointer with the end pointer.
"""
# Check for overflow
avaliable = self.maxsize - self._length
if amount == 0:
return
elif amount > 0 and amount > avaliable:
if error:
raise OverflowError("Not enough space in the buffer " + repr(self) +
" " + repr(len(self)) + " < " + repr(amount))
if move_start:
# Move the start to make it a circular
make_available = amount - avaliable
self.move_start(make_available, False) # Needs to move for sync_length
if amount > self.maxsize:
self.move_start(-(amount - self.maxsize) - 1, False) # Needs to move for sync_length
stop = self._end + amount
try:
self._end = stop % self.maxsize
except ZeroDivisionError:
self._end = stop
self.sync_length(True and amount >= 0) # Length shrinks if amount was negative.
# end move_end
def sync_length(self, should_grow=True):
"""Sync the length with the start and end pointers.
Args:
should_grow (int): Determines if start and end equal means full or empty.
Writing can make full, reading empty.
"""
try:
self._length = (self._end - self._start) % self.maxsize
except ZeroDivisionError:
self._length = 0
if self._length == 0 and should_grow:
self._length = self.maxsize
# end sync_length
@property
def maxsize(self):
"""Return the maximum buffer size."""
return len(self._data)
@maxsize.setter
def maxsize(self, maxsize):
"""Set the maximum size."""
self.shape = (int(maxsize), ) + self.shape[1:]
self.clear()
def get_available_space(self):
"""Return the available space."""
return self.maxsize - len(self)
@property
def columns(self):
"""Return the number of columns/columns."""
try:
return self._data.shape[1] or 1
except (AttributeError, IndexError):
return 1
@columns.setter
def columns(self, columns):
"""Set the columns."""
self.shape = (self.maxsize, columns) + self.shape[2:]
self.clear()
@property
def shape(self):
"""Return the shape of the data."""
return self._data.shape
@shape.setter
def shape(self, new_shape):
"""Set the shape."""
reshape(self, new_shape)
@property
def dtype(self):
"""Return the dtype of the data."""
return self._data.dtype
@dtype.setter
def dtype(self, dtype):
try:
self._data = self._data.astype(dtype)
except (AttributeError, ValueError, TypeError, Exception):
self._data = np.zeros(shape=self.shape, dtype=dtype)
self.clear()
# end class RingBuffer
class RingBufferThreadSafe(RingBuffer):
"""Standard numpy circular buffer.
Args:
length (tuple/int): Length of the buffer.
columns (int)[1]: Columns for the buffer.
dtype (numpy.dtype)[numpy.float32]: Numpy data type for the buffer.
"""
def __init__(self, shape, columns=None, dtype=np.float32):
self.lock = threading.RLock()
super().__init__(shape=shape, columns=columns, dtype=dtype)
# end constructor
clear = make_thread_safe(RingBuffer.clear)
get_data = make_thread_safe(RingBuffer.get_data)
set_data = make_thread_safe(RingBuffer.set_data)
expanding_write = make_thread_safe(RingBuffer.expanding_write)
growing_write = make_thread_safe(RingBuffer.growing_write)
write_value = make_thread_safe(RingBuffer.write_value)
write_zeros = make_thread_safe(RingBuffer.write_zeros)
write = make_thread_safe(RingBuffer.write)
read = make_thread_safe(RingBuffer.read)
read_remaining = make_thread_safe(RingBuffer.read_remaining)
read_overlap = make_thread_safe(RingBuffer.read_overlap)
read_last = make_thread_safe(RingBuffer.read_last)
__len__ = make_thread_safe(RingBuffer.__len__)
__str__ = make_thread_safe(RingBuffer.__str__)
move_start = make_thread_safe(RingBuffer.move_start)
move_end = make_thread_safe(RingBuffer.move_end)
sync_length = make_thread_safe(RingBuffer.sync_length)
get_available_space = make_thread_safe(RingBuffer.get_available_space)
maxsize = make_thread_safe(RingBuffer.maxsize)
columns = make_thread_safe(RingBuffer.columns)
shape = make_thread_safe(RingBuffer.shape)
dtype = make_thread_safe(RingBuffer.dtype)
| [
"numpy.ceil",
"numpy.reshape",
"numpy.asarray",
"threading.RLock",
"numpy.zeros"
] | [((2504, 2529), 'numpy.reshape', 'np.reshape', (['data', '(-1, 1)'], {}), '(data, (-1, 1))\n', (2514, 2529), True, 'import numpy as np\n'), ((3035, 3052), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (3050, 3052), False, 'import threading\n'), ((3647, 3681), 'numpy.zeros', 'np.zeros', ([], {'shape': 'shape', 'dtype': 'dtype'}), '(shape=shape, dtype=dtype)\n', (3655, 3681), True, 'import numpy as np\n'), ((19717, 19734), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (19732, 19734), False, 'import threading\n'), ((2220, 2251), 'numpy.asarray', 'np.asarray', (['data'], {'dtype': 'mydtype'}), '(data, dtype=mydtype)\n', (2230, 2251), True, 'import numpy as np\n'), ((2359, 2390), 'numpy.asarray', 'np.asarray', (['data'], {'dtype': 'mydtype'}), '(data, dtype=mydtype)\n', (2369, 2390), True, 'import numpy as np\n'), ((19270, 19309), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self.shape', 'dtype': 'dtype'}), '(shape=self.shape, dtype=dtype)\n', (19278, 19309), True, 'import numpy as np\n'), ((1476, 1510), 'numpy.ceil', 'np.ceil', (['(myshape[0] / new_shape[1])'], {}), '(myshape[0] / new_shape[1])\n', (1483, 1510), True, 'import numpy as np\n'), ((3597, 3607), 'numpy.ceil', 'np.ceil', (['i'], {}), '(i)\n', (3604, 3607), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import numpy as np
import torch
from utils.utils import get_array_affine_header, get_largest_connected_region, get_2_largest_connected_region
from torch.utils.data import DataLoader
import tqdm
import distutils.dir_util
import nibabel
from skimage.transform import resize, rotate
from datasets.dataset_flare21 import tiny_dataset_flare21
from nets.whichnet import whichnet
import os
def listdir_nohidden(path):
l = []
for f in np.sort(os.listdir(path)) :
if f.startswith('.') == False :
l.append(f)
return l
def infer_flare21(net,
net_id,
anatomy,
output,
device,
vgg,
size):
list_ = listdir_nohidden('./inputs/')
test_ids = []
for elem_ in list_:
if elem_.split('.')[1] == 'nii':
test_ids.append(elem_.split('_')[1])
test_ids = np.array(test_ids, dtype = np.int)
for index, id_ in enumerate(tqdm.tqdm(test_ids)):
test_dataset = tiny_dataset_flare21(id_, size, anatomy, vgg)
test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)
array, affine, header = get_array_affine_header(test_dataset, 'CT')
array_liver = np.copy(array)
array_kidneys = np.copy(array)
array_spleen = np.copy(array)
array_pancreas = np.copy(array)
with torch.no_grad():
for idx, data in enumerate(test_loader):
image = data
image = image.to(device=device, dtype=torch.float32)
net.training = False
prob = torch.softmax(net(image), dim=1)
pred = torch.argmax(prob, dim=1).float()
full_mask = pred.squeeze().cpu().numpy().swapaxes(0,1).astype(np.uint8)
mask_liver = np.zeros(shape=full_mask.shape, dtype=np.uint8)
mask_kidneys = np.zeros(shape=full_mask.shape, dtype=np.uint8)
mask_spleen = np.zeros(shape=full_mask.shape, dtype=np.uint8)
mask_pancreas = np.zeros(shape=full_mask.shape, dtype=np.uint8)
mask_liver[np.where(full_mask==1)] = 1
mask_kidneys[np.where(full_mask==2)] = 1
mask_spleen[np.where(full_mask==3)] = 1
mask_pancreas[np.where(full_mask==4)] = 1
mask_liver = resize(rotate(mask_liver, -90, preserve_range=True), output_shape=(test_dataset.exam.CT.shape[0],test_dataset.exam.CT.shape[1]), preserve_range=True)
mask_kidneys = resize(rotate(mask_kidneys, -90, preserve_range=True), output_shape=(test_dataset.exam.CT.shape[0],test_dataset.exam.CT.shape[1]), preserve_range=True)
mask_spleen = resize(rotate(mask_spleen, -90, preserve_range=True), output_shape=(test_dataset.exam.CT.shape[0],test_dataset.exam.CT.shape[1]), preserve_range=True)
mask_pancreas = resize(rotate(mask_pancreas, -90, preserve_range=True), output_shape=(test_dataset.exam.CT.shape[0],test_dataset.exam.CT.shape[1]), preserve_range=True)
mask_liver[np.where(mask_liver>0.95)] = 1
mask_liver[np.where(mask_liver!=1)] = 0
mask_kidneys[np.where(mask_kidneys>0.95)] = 1
mask_kidneys[np.where(mask_kidneys!=1)] = 0
mask_spleen[np.where(mask_spleen>0.95)] = 1
mask_spleen[np.where(mask_spleen!=1)] = 0
mask_pancreas[np.where(mask_pancreas>0.95)] = 1
mask_pancreas[np.where(mask_pancreas!=1)] = 0
array_liver[0:test_dataset.exam.CT.shape[0],0:test_dataset.exam.CT.shape[1],idx] = mask_liver[::-1,::]
array_kidneys[0:test_dataset.exam.CT.shape[0],0:test_dataset.exam.CT.shape[1],idx] = mask_kidneys[::-1,::]
array_spleen[0:test_dataset.exam.CT.shape[0],0:test_dataset.exam.CT.shape[1],idx] = mask_spleen[::-1,::]
array_pancreas[0:test_dataset.exam.CT.shape[0],0:test_dataset.exam.CT.shape[1],idx] = mask_pancreas[::-1,::]
array_liver = get_largest_connected_region(array_liver)
array_kidneys = get_2_largest_connected_region(array_kidneys)
array_spleen = get_largest_connected_region(array_spleen)
array_pancreas = get_largest_connected_region(array_pancreas)
array[np.where(array_liver==1)] = 1
array[np.where(array_kidneys==1)] = 2
array[np.where(array_spleen==1)] = 3
array[np.where(array_pancreas==1)] = 4
prediction = nibabel.Nifti1Image(array.astype(np.uint16), affine=affine, header=header)
nibabel.save(prediction, output+'test_%0*d'%(3,id_)+'.nii.gz')
del prediction, test_dataset, array, array_liver, array_kidneys, array_spleen, array_pancreas
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
model = './weights/epoch.pth'
output = './outputs/'
distutils.dir_util.mkpath(output)
net_id = 1
n_classes = 5 # 4 organs + background
size = 512
net, vgg = whichnet(net_id, n_classes)
logging.info("loading model {}".format(model))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logging.info(f'using device {device}')
net.to(device=device)
net.load_state_dict(torch.load(model, map_location=device))
logging.info("model loaded !")
infer_flare21(net = net,
net_id = net_id,
anatomy = 'all',
output = output,
device = device,
vgg = vgg,
size = size) | [
"datasets.dataset_flare21.tiny_dataset_flare21",
"numpy.array",
"torch.cuda.is_available",
"logging.info",
"utils.utils.get_2_largest_connected_region",
"os.listdir",
"numpy.where",
"skimage.transform.rotate",
"utils.utils.get_largest_connected_region",
"torch.argmax",
"nibabel.save",
"nets.wh... | [((975, 1007), 'numpy.array', 'np.array', (['test_ids'], {'dtype': 'np.int'}), '(test_ids, dtype=np.int)\n', (983, 1007), True, 'import numpy as np\n'), ((5157, 5233), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(levelname)s: %(message)s"""'}), "(level=logging.INFO, format='%(levelname)s: %(message)s')\n", (5176, 5233), False, 'import logging\n'), ((5460, 5487), 'nets.whichnet.whichnet', 'whichnet', (['net_id', 'n_classes'], {}), '(net_id, n_classes)\n', (5468, 5487), False, 'from nets.whichnet import whichnet\n'), ((5624, 5662), 'logging.info', 'logging.info', (['f"""using device {device}"""'], {}), "(f'using device {device}')\n", (5636, 5662), False, 'import logging\n'), ((5763, 5793), 'logging.info', 'logging.info', (['"""model loaded !"""'], {}), "('model loaded !')\n", (5775, 5793), False, 'import logging\n'), ((511, 527), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (521, 527), False, 'import os\n'), ((1047, 1066), 'tqdm.tqdm', 'tqdm.tqdm', (['test_ids'], {}), '(test_ids)\n', (1056, 1066), False, 'import tqdm\n'), ((1101, 1146), 'datasets.dataset_flare21.tiny_dataset_flare21', 'tiny_dataset_flare21', (['id_', 'size', 'anatomy', 'vgg'], {}), '(id_, size, anatomy, vgg)\n', (1121, 1146), False, 'from datasets.dataset_flare21 import tiny_dataset_flare21\n'), ((1178, 1231), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': '(1)', 'shuffle': '(False)'}), '(test_dataset, batch_size=1, shuffle=False)\n', (1188, 1231), False, 'from torch.utils.data import DataLoader\n'), ((1273, 1316), 'utils.utils.get_array_affine_header', 'get_array_affine_header', (['test_dataset', '"""CT"""'], {}), "(test_dataset, 'CT')\n", (1296, 1316), False, 'from utils.utils import get_array_affine_header, get_largest_connected_region, get_2_largest_connected_region\n'), ((1348, 1362), 'numpy.copy', 'np.copy', (['array'], {}), '(array)\n', (1355, 1362), True, 'import numpy as np\n'), ((1387, 1401), 'numpy.copy', 'np.copy', (['array'], {}), '(array)\n', (1394, 1401), True, 'import numpy as np\n'), ((1425, 1439), 'numpy.copy', 'np.copy', (['array'], {}), '(array)\n', (1432, 1439), True, 'import numpy as np\n'), ((1465, 1479), 'numpy.copy', 'np.copy', (['array'], {}), '(array)\n', (1472, 1479), True, 'import numpy as np\n'), ((4403, 4444), 'utils.utils.get_largest_connected_region', 'get_largest_connected_region', (['array_liver'], {}), '(array_liver)\n', (4431, 4444), False, 'from utils.utils import get_array_affine_header, get_largest_connected_region, get_2_largest_connected_region\n'), ((4469, 4514), 'utils.utils.get_2_largest_connected_region', 'get_2_largest_connected_region', (['array_kidneys'], {}), '(array_kidneys)\n', (4499, 4514), False, 'from utils.utils import get_array_affine_header, get_largest_connected_region, get_2_largest_connected_region\n'), ((4538, 4580), 'utils.utils.get_largest_connected_region', 'get_largest_connected_region', (['array_spleen'], {}), '(array_spleen)\n', (4566, 4580), False, 'from utils.utils import get_array_affine_header, get_largest_connected_region, get_2_largest_connected_region\n'), ((4606, 4650), 'utils.utils.get_largest_connected_region', 'get_largest_connected_region', (['array_pancreas'], {}), '(array_pancreas)\n', (4634, 4650), False, 'from utils.utils import get_array_affine_header, get_largest_connected_region, get_2_largest_connected_region\n'), ((4946, 5015), 'nibabel.save', 'nibabel.save', (['prediction', "(output + 'test_%0*d' % (3, id_) + '.nii.gz')"], {}), "(prediction, output + 'test_%0*d' % (3, id_) + '.nii.gz')\n", (4958, 5015), False, 'import nibabel\n'), ((5718, 5756), 'torch.load', 'torch.load', (['model'], {'map_location': 'device'}), '(model, map_location=device)\n', (5728, 5756), False, 'import torch\n'), ((1502, 1517), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1515, 1517), False, 'import torch\n'), ((4665, 4691), 'numpy.where', 'np.where', (['(array_liver == 1)'], {}), '(array_liver == 1)\n', (4673, 4691), True, 'import numpy as np\n'), ((4709, 4737), 'numpy.where', 'np.where', (['(array_kidneys == 1)'], {}), '(array_kidneys == 1)\n', (4717, 4737), True, 'import numpy as np\n'), ((4755, 4782), 'numpy.where', 'np.where', (['(array_spleen == 1)'], {}), '(array_spleen == 1)\n', (4763, 4782), True, 'import numpy as np\n'), ((4800, 4829), 'numpy.where', 'np.where', (['(array_pancreas == 1)'], {}), '(array_pancreas == 1)\n', (4808, 4829), True, 'import numpy as np\n'), ((5577, 5602), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5600, 5602), False, 'import torch\n'), ((2018, 2065), 'numpy.zeros', 'np.zeros', ([], {'shape': 'full_mask.shape', 'dtype': 'np.uint8'}), '(shape=full_mask.shape, dtype=np.uint8)\n', (2026, 2065), True, 'import numpy as np\n'), ((2097, 2144), 'numpy.zeros', 'np.zeros', ([], {'shape': 'full_mask.shape', 'dtype': 'np.uint8'}), '(shape=full_mask.shape, dtype=np.uint8)\n', (2105, 2144), True, 'import numpy as np\n'), ((2175, 2222), 'numpy.zeros', 'np.zeros', ([], {'shape': 'full_mask.shape', 'dtype': 'np.uint8'}), '(shape=full_mask.shape, dtype=np.uint8)\n', (2183, 2222), True, 'import numpy as np\n'), ((2255, 2302), 'numpy.zeros', 'np.zeros', ([], {'shape': 'full_mask.shape', 'dtype': 'np.uint8'}), '(shape=full_mask.shape, dtype=np.uint8)\n', (2263, 2302), True, 'import numpy as np\n'), ((2347, 2371), 'numpy.where', 'np.where', (['(full_mask == 1)'], {}), '(full_mask == 1)\n', (2355, 2371), True, 'import numpy as np\n'), ((2404, 2428), 'numpy.where', 'np.where', (['(full_mask == 2)'], {}), '(full_mask == 2)\n', (2412, 2428), True, 'import numpy as np\n'), ((2460, 2484), 'numpy.where', 'np.where', (['(full_mask == 3)'], {}), '(full_mask == 3)\n', (2468, 2484), True, 'import numpy as np\n'), ((2518, 2542), 'numpy.where', 'np.where', (['(full_mask == 4)'], {}), '(full_mask == 4)\n', (2526, 2542), True, 'import numpy as np\n'), ((2615, 2659), 'skimage.transform.rotate', 'rotate', (['mask_liver', '(-90)'], {'preserve_range': '(True)'}), '(mask_liver, -90, preserve_range=True)\n', (2621, 2659), False, 'from skimage.transform import resize, rotate\n'), ((2796, 2842), 'skimage.transform.rotate', 'rotate', (['mask_kidneys', '(-90)'], {'preserve_range': '(True)'}), '(mask_kidneys, -90, preserve_range=True)\n', (2802, 2842), False, 'from skimage.transform import resize, rotate\n'), ((2978, 3023), 'skimage.transform.rotate', 'rotate', (['mask_spleen', '(-90)'], {'preserve_range': '(True)'}), '(mask_spleen, -90, preserve_range=True)\n', (2984, 3023), False, 'from skimage.transform import resize, rotate\n'), ((3161, 3208), 'skimage.transform.rotate', 'rotate', (['mask_pancreas', '(-90)'], {'preserve_range': '(True)'}), '(mask_pancreas, -90, preserve_range=True)\n', (3167, 3208), False, 'from skimage.transform import resize, rotate\n'), ((3355, 3382), 'numpy.where', 'np.where', (['(mask_liver > 0.95)'], {}), '(mask_liver > 0.95)\n', (3363, 3382), True, 'import numpy as np\n'), ((3413, 3438), 'numpy.where', 'np.where', (['(mask_liver != 1)'], {}), '(mask_liver != 1)\n', (3421, 3438), True, 'import numpy as np\n'), ((3488, 3517), 'numpy.where', 'np.where', (['(mask_kidneys > 0.95)'], {}), '(mask_kidneys > 0.95)\n', (3496, 3517), True, 'import numpy as np\n'), ((3550, 3577), 'numpy.where', 'np.where', (['(mask_kidneys != 1)'], {}), '(mask_kidneys != 1)\n', (3558, 3577), True, 'import numpy as np\n'), ((3626, 3654), 'numpy.where', 'np.where', (['(mask_spleen > 0.95)'], {}), '(mask_spleen > 0.95)\n', (3634, 3654), True, 'import numpy as np\n'), ((3686, 3712), 'numpy.where', 'np.where', (['(mask_spleen != 1)'], {}), '(mask_spleen != 1)\n', (3694, 3712), True, 'import numpy as np\n'), ((3763, 3793), 'numpy.where', 'np.where', (['(mask_pancreas > 0.95)'], {}), '(mask_pancreas > 0.95)\n', (3771, 3793), True, 'import numpy as np\n'), ((3827, 3855), 'numpy.where', 'np.where', (['(mask_pancreas != 1)'], {}), '(mask_pancreas != 1)\n', (3835, 3855), True, 'import numpy as np\n'), ((1850, 1875), 'torch.argmax', 'torch.argmax', (['prob'], {'dim': '(1)'}), '(prob, dim=1)\n', (1862, 1875), False, 'import torch\n')] |
#!/usr/bin/env python3
# This script will load Qs# chunk pickles according to the omnipickle and
# combine them into complete Qs pickles
#
# Qs_omnipickle = {period_path: Qs_pickle_paths}
#
# Qs# pickles are panda dataframes directly translated from the raw txt files
import matplotlib
import matplotlib.pyplot as plt
import os
from os.path import join as pjoin
import numpy as np
import pandas as pd
from time import asctime
from time import sleep
# From Helpyr
from helpyr import data_loading
from helpyr import logger
from helpyr import crawler as helpyr_crawler
from helpyr import helpyr_misc as hm
from omnipickle_manager import OmnipickleManager
import global_settings as settings
# Primary Pickle Processor takes raw Qs and Qsn pickles and condenses them into
# one Qs pickle for each period. ie. Does processing within each period.
# Secondary Pickle Processor figures out the relationship between period
# pickles. Finds missing periods and accumulated time.
# Tertiary Pickle Processor checks the lighttable data relative to other data.
# eg. scale the data using the sieve data. Requires running other programs
# first.
# To do:
# Create way to delete bad sections in combined Qs
class PrimaryPickleProcessor:
# Outline:
# 1) load Qs_metapickle
# 2) load Qs pickles for a period
# 3) error check raw qs dataframes
# - conflict between qs and qs#?
# 4) combine qs dataframes
# 5) error check combined qs dataframe
# Note: The primary processor uses attribute data (eg. self.variable) to
# get information into functions rather than repetitively pass a list of
# arguments. In retrospect, kwargs may have been a better choice because
# using attribute variables that update every iteration of a for loop makes
# it hard to follow in such a large class.
error_codes = {
'CQF' : "Conflicting Qs files",
'NDF' : "No Data Found",
'MMD' : "Mismatched Data",
}
def __init__(self, output_txt=False):
# File locations
self.root_dir = settings.root_dir
self.pickle_source = settings.Qs_raw_pickles
self.pickle_destination = settings.Qs_primary_pickles
self.txt_destination = f"{self.root_dir}/combined-txts"
self.log_filepath = "./log-files/Qs_primary_processor.txt"
self.metapickle_name = "Qs_metapickle"
self.statspickle_name = "Qs_summary_stats"
self.output_txt = output_txt
# tolerance for difference between files
# This value is more to highlight very different dataframes than have
# any physical meaning.
self.difference_tolerance = 0.02
# Start up logger
self.logger = logger.Logger(self.log_filepath, default_verbose=True)
hm.ensure_dir_exists(self.pickle_destination, self.logger)
self.logger.write(["Begin Primary Pickle Processor output", asctime()])
# Start up loader
self.loader = data_loading.DataLoader(self.pickle_source,
self.pickle_destination, self.logger)
def run(self):
self.logger.write(["Running pickle processor..."])
indent_function = self.logger.run_indented_function
# Load Qs_metapickle
self.metapickle = self.loader.load_pickle(self.metapickle_name)
self.raw_file_counter = 0
self.combined_file_counter = 0
self.summary_stats = {} # (pkl_name, stat_type) : stat_row}
self.pd_summary_stats = None
for period_path in self.metapickle:
# attribute data to be reset every period
self.lingering_errors = [] # error for secondary check to look at
self.Qs_path_list = [] # list of Qs#.txt file paths
self.Qs0_data = None # data for Qs.txt
self.Qsn_data = [] # data for Qs#.txt
self.Qsn_names = [] # Names of Qs# files
self.current_period_path = period_path # is also the metapickle key
self.combined_Qs = None
self.accumulating_overlap = None
# Get meta info
_, experiment, step, rperiod = hm.nsplit(self.current_period_path, 3)
period = rperiod[8:]
msg = f"Processing {experiment} {step} {period}..."
self.pkl_name = '_'.join(['Qs', experiment, step, period])
indent_function(self.process_period, before_msg=msg)
# Make a summary stats dataframe
self.pd_summary_stats = pd.DataFrame.from_dict(
self.summary_stats, orient='index')
self.update_summary_stats()
# Save summary stats pickle/txt files
indent_function(self.produce_stats_pickle,
before_msg="Producing statistics pickle",
after_msg="Statistics pickle produced!")
if self.output_txt:
indent_function(self.write_stats_txt,
before_msg="Writing statistics txt",
after_msg="Done!")
self.logger.write([f"{self.raw_file_counter} raw pickles processed",
f"{self.combined_file_counter} combined pickles produced"])
self.logger.end_output()
def process_period(self):
if self.loader.is_pickled(self.pkl_name):
self.logger.write(["Nothing to do"])
return
indent_function = self.logger.run_indented_function
# Load data
indent_function(self.load_data,
before_msg="Loading data...",
after_msg="Finished loading data!")
# Primary Error Checks
indent_function(self.primary_error_check,
before_msg="Running primary error checks...",
after_msg="Finished primary error checks!")
# Combining Qsn chunks
indent_function(self.combine_Qsn_chunks,
before_msg="Combining Qs chunks...",
after_msg="Finished combining Qs chunks!")
# Secondary Error Checks
indent_function(self.secondary_error_check,
before_msg="Running secondary error checks...",
after_msg="Finished secondary error checks!")
# Calc summary stats
indent_function(self.calculate_stats,
before_msg="Calculating summary stats...",
after_msg="Summary stats calculated!")
# Write to pickle
indent_function(self.produce_processed_pickle,
before_msg="Producing processed pickles...",
after_msg="Processed pickles produced!")
# Write a combined Qs txt file
if self.output_txt:
indent_function(self.write_combined_txt,
before_msg="Writing combined txt file...",
after_msg="Done writing file!")
def load_data(self):
# Load the sorted list of paths for this period
self.Qs_path_list = self.metapickle[self.current_period_path]
# Load the associated data
Qs_period_data = self.loader.load_pickles(self.Qs_path_list, add_path=False)
for Qs_path in self.Qs_path_list:
pkl_name = hm.nsplit(Qs_path, 1)[1]
stripped_name = pkl_name.split('.')[0]
Qs_name = stripped_name.split('_')[-1]
bedload_data = Qs_period_data[Qs_path]
self.raw_file_counter += 1
if Qs_name == 'Qs':
assert(self.Qs0_data is None)
self.Qs0_data = bedload_data
else:
assert(Qs_name[2:].isdigit())
self.Qsn_data.append(bedload_data)
self.Qsn_names.append(Qs_name)
def primary_error_check(self):
# 3) error check raw qs dataframes
# - conflict between qs and qs#?
if self.Qs0_data is not None and self.Qsn_data:
name_list = ', '.join(self.Qsn_names)
error_msg = PrimaryPickleProcessor.error_codes['CQF']
self.logger.warning([error_msg,
"Qs.txt and Qs#.txt files both exist",
f"Qs#.txt: {name_list}"])
self.lingering_errors.append(error_msg)
def combine_Qsn_chunks(self):
# 4) combine qs dataframes
# The data is split up into multiple chunks (each Qs# file is a chunk).
# This functions assembles them into a complete Qs dataframe.
# Overlapping rows are converted to nan because I can't see any way to
# choose which one to keep.
if not self.Qsn_data:
self.logger.write("No chunks to combine.")
return
combined = self._make_like_df(self.Qsn_data[0], ['timestamp'])
accumulating_overlap = None
exclude_cols = ['timestamp', 'missing ratio', 'vel', 'sd vel', 'number vel']
target_cols = hm.exclude_df_cols(combined, exclude_cols)
# Set up a few lambda functions
get_num = lambda s: int(s[2:]) # get the file number from the name
get_target_subset = lambda c: c.loc[:, target_cols]
# Find rows with data. Should remove meta columns beforehand
# Will select rows with non-null values (selects zero rows)
find_data_rows = lambda df: df.notnull().all(axis=1)
for raw_chunk, name in zip(self.Qsn_data, self.Qsn_names):
# Each raw dataframe contains only a chunk of the overall data.
# However they contain zero values for all times outside of the
# valid chunk time. Some chunks overlap too.
ch_num, max_num = get_num(name), get_num(self.Qsn_names[-1])
self.logger.write(f"Processing chunk {ch_num} of {max_num}")
# Get bedload subsets
bedload_chunk = get_target_subset(raw_chunk)
bedload_combined = get_target_subset(combined)
# Find rows with data
chunk_rows = find_data_rows(bedload_chunk)
combined_rows = find_data_rows(bedload_combined)
# Find overlap
overlap_rows = chunk_rows & combined_rows
# Add chunk to combined array
combined.loc[chunk_rows, 1:] = raw_chunk[chunk_rows]
combined.loc[overlap_rows, 1:] = np.nan
# Keep track of overlap rows
if accumulating_overlap is None:
accumulating_overlap = overlap_rows
else:
accumulating_overlap = accumulating_overlap | overlap_rows
self.combined_Qs = combined
self.accumulating_overlap = accumulating_overlap
def _make_like_df(self, like_df, columns_to_copy=[], fill_val=np.nan):
# Make a dataframe like the Qs data with a few columns copied and the
# rest filled with a default value
np_like = np.empty_like(like_df.values)
np_like.fill(fill_val)
pd_like = pd.DataFrame(np_like,
columns=like_df.columns, index=like_df.index)
for column in columns_to_copy:
pd_like.loc[:, column] = like_df.loc[:, column]
return pd_like
def secondary_error_check(self):
# 5) error check combined qs dataframe
self.final_output = None
## Check for diff between raw_Qs and Qs_combined
self._check_diff_raw_combined()
## Set rows with any Nan values to entirely Nan values
nan_rows = self.final_output.isnull().any(axis=1)
self.final_output.loc[nan_rows, 'missing ratio':] = np.nan
## Set outliers to Nan
max_threshold = settings.lighttable_bedload_cutoff
trim_rows = self.final_output['Bedload all'] > max_threshold
trim_count = np.sum(trim_rows)
if trim_count > 0:
trim_vals = self.final_output.loc[trim_rows, 'Bedload all']
str_trim_vals = [f'{v:0.2f}' for v in np.sort(trim_vals.values)]
trim_sum = np.sum(trim_vals)
total_sum = np.sum(self.final_output['Bedload all'])
self.final_output.loc[trim_rows, 'missing ratio':] = np.nan
self.logger.write([
f"{trim_count} points are above the cutoff value of {max_threshold}" +
f" ({trim_sum/1000:0.3f} kg of {total_sum/1000:0.3f} kg; " +
f"{trim_sum/total_sum:0.2%} )",
f"{list(str_trim_vals)}"])
#self.final_output.hist(column='Bedload all', bins=50)
#plt.show()
else:
self.logger.write("No values needed to be trimmed")
## Deal with special cases
self._check_special_cases()
## Check for accumulated overlap
self._check_accumulated_overlap()
## Check for total number of rows
self._fix_n_rows()
def _check_diff_raw_combined(self):
# Check for diff between raw_Qs and Qs_combined
raw_Qs = self.Qs0_data
raw_exists = raw_Qs is not None
combined_Qs = self.combined_Qs
combined_exists = combined_Qs is not None
if raw_exists and combined_exists:
self._difference_check()
elif not(raw_exists or combined_exists):
error_msg = PrimaryPickleProcessor.error_codes['NDF']
self.logger.warning([error_msg,
"Both the raw Qs pickle and combined Qs df are missing."])
else:
using = "raw Qs" if raw_exists else "combined Qs"
self.final_output = raw_Qs if raw_exists else combined_Qs
self.logger.write(f"Only {using} found." +
"No difference check needed.")
def _difference_check(self):
# Look at the difference between the Qs.txt and Qs combined data.
raw_Qs = self.Qs0_data
combined_Qs = self.combined_Qs
# Element-wise bool difference between dataframes
Qs_diff = (combined_Qs != raw_Qs)
# Rows that have Nan values in both dataframes will be thrown out and
# should not count towards the difference.
# Rows that started with a value and ended with Nan should count. (such
# as overlap rows)
Qs_both_nan = combined_Qs.isnull() & raw_Qs.isnull()
both_nan_rows = Qs_both_nan.any(axis=1)
Qs_diff.loc[both_nan_rows, :] = False
# Ignore columns that are likely to be different and don't seem to have
# any practical value. (I think....?)
exclude_cols = ['missing ratio', 'vel', 'sd vel', 'number vel']
Qs_diff.loc[:, exclude_cols] = False
# Isolate the rows and columns where values are different
#Qs_diff.loc[0,:] = False # ignore first row
diff_rows = Qs_diff.any(axis=1)
diff_cols = Qs_diff.any(axis=0)
any_diff = diff_rows.any()
if any_diff:
# Get some metrics on difference
diff_rows_count = diff_rows.sum()
rows_count = diff_rows.shape[0]
diff_ratio = diff_rows_count / rows_count
tolerance = self.difference_tolerance
is_tolerant = '' if diff_ratio < tolerance else ' NOT'
error_msg = PrimaryPickleProcessor.error_codes['MMD']
msgs = [error_msg,
f"Difference ratio of {diff_ratio:.3f} is{is_tolerant} within tolerance of {tolerance}.",
f"{diff_rows_count} conflicting rows found out of {rows_count}",
f"Using combined Qs data",
]
self.logger.warning(msgs)
# Write differing rows/cols to log
diff_raw_Qs = raw_Qs.loc[diff_rows, diff_cols]
diff_combined = combined_Qs.loc[diff_rows, diff_cols]
self.logger.write_dataframe(diff_raw_Qs, "Raw Qs")
self.logger.write_dataframe(diff_combined, "Combined Qs")
#if diff_ratio < diff_tolerance:
# raise NotImplementedError
self.final_output = combined_Qs
# default to using the combined output
else:
self.logger.write(["Qs.txt matches combined Qs chunk data",
"(Excluding velocity columns and missing ratio)"])
self.final_output = combined_Qs
def _check_accumulated_overlap(self):
# Check for accumulated overlap
combined_exists = self.combined_Qs is not None
overlap = self.accumulating_overlap
if combined_exists and overlap.any():
overlap_times = self.combined_Qs.loc[overlap,'timestamp']
str_overlap_times = overlap_times.to_string(float_format="%f")
self.logger.write(["The following timestamps were overlapped: "])
self.logger.write(str_overlap_times.split('\n'), local_indent=1)
def _check_special_cases(self):
_, experiment, step, rperiod = hm.nsplit(self.current_period_path, 3)
period = rperiod[8:]
# Deal with special cases
# See analysis_notes.txt for more info
special_case = ('1A', 'rising-62L', 't40-t60')
if special_case == (experiment, step, period):
self.logger.write(f"Addressing special case {special_case}")
# delete several chunks of bad data from the text file
# Longer chunk may be from pausing the lighttable program, shorter
# chunks may be due to low frame rate
#
# Delete in reverse order (lastest first) so line numbers don't
# shift for the next deletion.
self.final_output = self._delete_chunk(1631, 1638, self.final_output)
self.final_output = self._delete_chunk(726, 1076, self.final_output)
self.final_output = self._delete_chunk(709, 714, self.final_output)
self.final_output = self._delete_chunk(622, 703, self.final_output)
self.final_output = self._delete_chunk(548, 561, self.final_output)
self.final_output = self._delete_chunk(494, 526, self.final_output)
self.final_output = self._delete_chunk(412, 417, self.final_output)
self.final_output = self._delete_chunk(390, 397, self.final_output)
special_case = ('2A', 'rising-50L', 't00-t60')
if special_case == (experiment, step, period):
self.logger.write(f"Addressing special case {special_case}")
# delete lines 1343 to 3916 from the text file
# Appears that I forgot to stop the lighttable program when pausing
# the flow to clean out the trap
self.final_output = self._delete_chunk(1343, 3916, self.final_output)
special_case = ('3A', 'rising-75L', 't00-t20')
if special_case == (experiment, step, period):
print(f"Addressing special case {special_case}")
# delete lines 925 to 2368 from the text file
# Appears that I forgot to stop the lighttable program when pausing
# the flow to clean out the trap
self.final_output = self._delete_chunk(925, 2368, self.final_output)
special_cases = [
# These special cases appear to have high variability from the
# graphs
('1B', 'rising-62L', 't20-t40'),
('1B', 'rising-62L', 't40-t60'),
('2A', 'rising-87L', 't00-t20'),
('2A', 'rising-87L', 't20-t40'),
('2B', 'falling-87L', 't00-t20'),
('2B', 'falling-87L', 't20-t40'),
('2B', 'falling-87L', 't40-t60'),
('2B', 'falling-75L', 't00-t20'),
('2B', 'falling-75L', 't20-t40'),
('2B', 'falling-75L', 't40-t60'),
('3A', 'falling-87L', 't00-t20'),
('3A', 'falling-87L', 't20-t40'),
('3A', 'falling-87L', 't40-t60'),
('5A', 'rising-62L', 't00-t20'),
('5A', 'rising-62L', 't20-t40'),
('5A', 'rising-75', 't00-t20'),
]
if (experiment, step, period) in special_cases:
self.logger.write_blankline(2)
self.logger.write(f"Suspicious case {(experiment, step, period)}")
self.logger.write_blankline(2)
sleep(3)
#assert(False)
def _delete_chunk(self, fstart, fend, data):
# For ease of use, fstart and fend are the line numbers in the
# combined Qs file. The data between those lines (inclusive) will
# be deleted and the timestamps reset to remove any gap
start, end = fstart-2, fend-2
self.logger.write(f"Deleting data lines {start} through {end}.")
# Delete the lines
index = data.index
output = data[(index < start) | (end < index)]
# Fix timestamps and indices
# Note, using the values method gets references to the underlying
# data locations. Therefore I can change them without setting of a
# copy warnings.
output.loc[:, 'timestamp'].values[start:] -= end - start + 1
output.set_index('timestamp', inplace=True)
output.reset_index(inplace=True)
return output
def _fix_n_rows(self):
# Check for total number of rows
# eg. trim to 1200 rows for a 20 minute period (1 row / sec)
_, experiment, step, rperiod = hm.nsplit(self.current_period_path, 3)
period = rperiod[8:]
start, end = [int(t[1:]) for t in period.split(sep='-')]
target_n_rows = abs(end - start) * 60 # one row per second
n_rows, ncols = self.final_output.shape
def check_if_extra(row_idx):
row = self.final_output.iloc[row_idx, :]
empty = row.isnull().any()
zero = (row[['Bedload all', 'Count all']] == 0).all()
return empty or zero
# Delete extra lines from end
while n_rows > target_n_rows and check_if_extra(-1):
self.final_output = self.final_output.iloc[:-1, :]
n_rows, ncols = self.final_output.shape
self.logger.write(f"Dropping last row (new n_rows = {n_rows})")
# Delete extra lines from beginning
while n_rows > target_n_rows and check_if_extra(0):
self.final_output = self.final_output.iloc[1:, :]
n_rows, ncols = self.final_output.shape
self.logger.write(f"Dropping first row (new n_rows = {n_rows})")
# Add or delete rows
n_rows, ncols = self.final_output.shape
if n_rows > target_n_rows:
# Delete data from head (~25%) and tail (~75%) to reach desired #
# of rows
n_drop = n_rows - target_n_rows
self.logger.write(f"Need to drop {n_drop} rows containing data")
head_drop = n_drop // 4
tail_drop = n_drop - head_drop
total_mass = self.final_output.loc[:, 'Bedload all'].sum()
head = self.final_output.iloc[0:head_drop, :]
tail = self.final_output.iloc[-tail_drop:, :]
self.final_output = self.final_output.iloc[head_drop:-tail_drop, :]
self.previous_tail = tail
head_mass = head.loc[:, 'Bedload all'].sum()
tail_mass = tail.loc[:, 'Bedload all'].sum()
loss_ratio = (head_mass + tail_mass) / total_mass
self.logger.write([
f"Dropping {head_drop}s ({head_mass:0.2f}g) from head.",
f"Dropping {tail_drop}s ({tail_mass:0.2f}g) from tail",
f"Original total mass = {total_mass:0.2f}g ({loss_ratio:0.2%} trimmed)"])
if 0.04 < loss_ratio <= 0.05:
self.logger.write_blankline(2)
self.logger.write(f"### {loss_ratio:0.2%} is high! ###")
self.logger.write_blankline(2)
sleep(4)
elif 0.05 < loss_ratio:
self.logger.write_blankline(2)
self.logger.write(f"### {loss_ratio:0.2%} is above tolerance! ###")
self.logger.write_blankline(2)
assert(False)
elif n_rows < target_n_rows:
n_needed = target_n_rows - n_rows
self.logger.write(f"Too few rows. Adding {n_needed} blank rows.")
last_row = self.final_output.iloc[-1, :]
last_timestamp = last_row['timestamp']
n_cols = last_row.shape[0]
needed_range = np.arange(n_needed)
np_empty = np.empty((n_needed, n_cols))
np_empty.fill(np.nan)
np_empty[:,0] = (needed_range + 1 + last_timestamp).T
pd_empty = pd.DataFrame(np_empty,
columns=last_row.index, index=needed_range + n_rows)
self.final_output = pd.concat([self.final_output, pd_empty])
n_rows, ncols = self.final_output.shape
self.logger.write(f"Final number of rows = {n_rows}")
def calculate_stats(self):
# Calc column averages and sums
name = self.pkl_name
data = self.final_output
av = data.mean(axis=0)
sum = data.sum(axis=0)
nans = data.isnull().sum(axis=0)
for stat, row in zip(['av', 'sum', 'nans'],[av, sum, nans]):
key = (name, stat)
# Add the series data to the summary stats dict
# The dict will be converted into a multiindexed dataframe later
self.summary_stats[key] = row
#row_str = row.to_string()
#msg = f"Stats for {key} : {row_str}"
#self.logger.write(msg)
def produce_processed_pickle(self):
if self.final_output is not None:
prepickles = {self.pkl_name : self.final_output}
# prepickles is a dictionary of {'pickle name':data}
self.loader.produce_pickles(prepickles)
self.combined_file_counter += 1
else:
error_msg = PrimaryPickleProcessor.error_codes['NDF']
self.logger.warning([error_msg,
f"Pickle not created for {self.pkl_name}"])
def write_combined_txt(self):
filename = f"{self.pkl_name}.txt"
filepath = os.path.join(self.txt_destination, filename)
data = self.final_output
self.loader.save_txt(data, filepath, is_path=True)
def update_summary_stats(self):
summary_stats = self.pd_summary_stats
pkl_name = self.statspickle_name
if summary_stats.empty:
self.logger.write(["No new stats. Nothing to do."])
return
if self.loader.is_pickled(pkl_name):
self.logger.write(["Stats pickle already exists. Updating..."])
old_stats = self.loader.load_pickle(pkl_name, use_source=False)
unchanged_indices = ~old_stats.index.isin(summary_stats)
new_indices_strs = summary_stats.index.levels[0].__str__().split('\n')
summary_stats = pd.concat([old_stats[unchanged_indices],
summary_stats])
self.logger.write(["Updated index values are:"] + new_indices_strs)
else:
self.logger.write(["Making new stats pickle. Updating..."])
# prepickles is a dictionary of {'pickle name':data}
self.pd_summary_stats = summary_stats
def produce_stats_pickle(self):
summary_stats = self.pd_summary_stats
pkl_name = self.statspickle_name
prepickles = {pkl_name : summary_stats}
self.loader.produce_pickles(prepickles)
self.combined_file_counter += 1
def write_stats_txt(self):
filename = f"{self.statspickle_name}.txt"
filepath = os.path.join(self.txt_destination, filename)
data = self.pd_summary_stats
kwargs = {'index' : True,
'header' : True,
}
self.loader.save_txt(data, filepath, kwargs=kwargs, is_path=True)
class SecondaryPickleProcessor:
def __init__(self):
# File locations
self.root_dir = settings.root_dir
self.pickle_source = settings.Qs_primary_pickles
self.pickle_destination = settings.Qs_secondary_pickles
self.log_filepath = "./log-files/Qs_secondary_processor.txt"
# Start up logger
self.logger = logger.Logger(self.log_filepath, default_verbose=True)
self.logger.write(["Begin Secondary Pickle Processor output", asctime()])
# Start up loader
self.loader = data_loading.DataLoader(self.pickle_source,
self.pickle_destination, logger=self.logger)
def run(self):
self.omnimanager = OmnipickleManager(self.logger)
#self.experiments = {}
indent_function = self.logger.run_indented_function
indent_function(self.load_pickle_info,
before_msg="Getting pickle info", after_msg="Finished!")
indent_function(self.load_data,
before_msg="Loading data", after_msg="Data Loaded!")
indent_function(self.accumulate_time,
before_msg="Accumulating time", after_msg="Time accumulated!")
indent_function(self.save_pickles,
before_msg="Updating pickles", after_msg="Pickles updated!")
self.logger.end_output()
def load_pickle_info(self):
# Fill out the experiments dict with {experiment code : Experiment}
# Create PeriodData and Experiment objects
# Find files
crawler = helpyr_crawler.Crawler(logger=self.logger)
crawler.set_root(self.pickle_source)
pkl_filepaths = crawler.get_target_files("Qs_??_*L_t??-t??.pkl", verbose_file_list=False)
crawler.end()
self.omnimanager.Qs_build_experiment_tree(pkl_filepaths)
self.omnimanager.manually_add_period('3B', 'rising', '62L', 't20-t40')
def load_data(self):
self.omnimanager.reload_Qs_data()
def accumulate_time(self):
self.omnimanager.Qs_accumulate_time(self.exp_accumulate_time)
def save_pickles(self):
#self.omnimanager.Qs_finish_secondary_pickles(self.pickle_destination)
self.omnimanager.store(overwrite={'Qs-secondary' : True})
# Functions that operate "within" an Experiment or PeriodData object
# Could be in part of those class defs, but I want them to be more like
# containers (so reduce clutter functions).
def exp_accumulate_time(self, experiment):
# This function makes a sequential timestap for a period's Qs data.
# Saves it as a new column in the data
exp_code = experiment.code
self.logger.write(f"Accumulating time for experiment {exp_code}")
self.logger.increase_global_indent()
accumulate = 0
prev_period_data = None
for rank in experiment.sorted_ranks:
period_data = experiment.periods[rank]
#self.logger.write(f"Accumulating time for {period_data.Qs_pkl_name}")
if not period_data.has_Qs:
continue
# Account for gaps in the periods
if prev_period_data is not None:
gap = self.get_gap(period_data, prev_period_data)
accumulate += gap
#accumulate += gap + 1
## +1 is so the last row of prev does not overlap with the first
## row of next (index 0)
# Calculate the new seconds column
#seconds = period_data.Qs_data.index.values
#start = seconds[0]
#if seconds[-1] - start + 1 != seconds.size:
# # Something weird is happening with the timestamps
# print(period_data.name)
# assert(False)
#seconds += accumulate - start + 1
seconds = np.arange(period_data.Qs_data.index.values.size) + 1
seconds += accumulate
accumulate = seconds [-1]
# Save the experiment time as new columns in the dataframe
Qs_primary = period_data.data_dict['Qs-primary']
Qs_data = Qs_primary.data
Qs_data['exp_time'] = seconds
Qs_data['exp_time_hrs'] = seconds / 3600
# Bad programming form here... Easiest place to add a discharge
# column too
discharge = period_data.discharge_int
Qs_data['discharge'] = discharge * np.ones_like(seconds)
pickle_path = self.omnimanager.generate_Qs_secondary_picklepath(
period_data, self.pickle_destination)
misc = {'Qs_pkl_name' : Qs_primary.misc['Qs_pkl_name']}
period_data.add_Qs_secondary_data(pickle_path,
specific_data=Qs_data, misc=misc)
prev_period_data = period_data
self.logger.decrease_global_indent()
self.logger.write("Final accumulations times (start (hrs), end (hrs), name):")
self.logger.increase_global_indent()
pkl_name_fu = lambda p: p._Qs_dataset.misc['Qs_pkl_name']
for rank in experiment.sorted_ranks:
period_data = experiment.periods[rank]
#period_data.Qs_data.set_index('exp_time', inplace=True)
# log some stuff
if not period_data.has_Qs:
continue
#new_index = np.round(period_data.Qs_data.index.values / 360)/10 #hrs
#first, last = [new_index[i] for i in (0, -1)]
exp_time_hrs = period_data.Qs_data['exp_time_hrs']
first, last = [exp_time_hrs.iloc[i] for i in (0, -1)]
self.logger.write(f"{first:0.1f}, {last:0.1f}, {pkl_name_fu(period_data)}")
self.logger.decrease_global_indent()
def get_gap(self, curr, prev):
# Calculate the gap between current and previous periods.
# returns the expected seconds between the two periods
# Build step order
limbs = ['r']*5 + ['f']*3
discharges = [50, 62, 75, 87, 100, 87, 75, 62]
step_order = [f"{l}{d}L" for l, d in zip(limbs, discharges)]
curr_step = curr.step
prev_step = prev.step
# Find index of steps
curr_index = step_order.index(curr_step)
prev_index = step_order.index(prev_step)
# Difference of 0 or 1 is okay.
index_diff = curr_index - prev_index
# Get period start and end info
period_ints = lambda p: [int(t[1:]) for t in p.period_range.split('-')]
curr_start, curr_end = period_ints(curr)
prev_start, prev_end = period_ints(prev)
# Calculate gap in minutes
step_duration = 60 # 1 hour in minutes
gap = step_duration * index_diff + curr_start - prev_end
pkl_name_fu = lambda p: p._Qs_dataset.misc['Qs_pkl_name']
if gap > 0:
# Error, missing data
self.logger.warning(["Missing data",
f"Missing {gap} minutes of data " +
f"between {pkl_name_fu(prev)} and {pkl_name_fu(curr)}"])
else:
# Data is okay.
self.logger.write(
f"No gap from {pkl_name_fu(prev)} to {pkl_name_fu(curr)}")
return gap * 60 # return gap in seconds
class TertiaryPickleProcessor:
def __init__(self, root_dir):
# File locations
self.root_dir = settings.lighttable_data_dir
self.pickle_source = settings.Qs_secondary_pickles
self.pickle_destination = settings.Qs_tertiary_pickles
self.log_filepath = "./log-files/Qs_tertiary_processor.txt"
omnipickle_path = settings.omnipickle_path
# Start up logger
self.logger = logger.Logger(self.log_filepath, default_verbose=True)
hm.ensure_dir_exists(self.pickle_destination, self.logger)
self.logger.write(["Begin Tertiary Pickle Processor output", asctime()])
# Start up loader
self.loader = data_loading.DataLoader(self.pickle_source,
self.pickle_destination, self.logger)
# Reload omnimanager
self.omnimanager = OmnipickleManager(self.logger)
self.omnimanager.restore()
self.omnimanager.update_tree_definitions()
def run(self):
self.logger.write(["Running tertiary pickle processor..."])
indent_function = self.logger.run_indented_function
# Remove large values based on frame rates
# Scale data from the sieve data
# indent_function(self.write_stats_txt, before_msg="Writing
# statistics txt",
# after_msg="Done!")
self.logger.end_output()
if __name__ == "__main__":
# Run the script
primary = PrimaryPickleProcessor(output_txt=True)
primary.run()
secondary = SecondaryPickleProcessor()
secondary.run()
| [
"time.sleep",
"helpyr.helpyr_misc.exclude_df_cols",
"numpy.arange",
"numpy.sort",
"pandas.DataFrame.from_dict",
"numpy.empty",
"pandas.DataFrame",
"helpyr.crawler.Crawler",
"helpyr.helpyr_misc.nsplit",
"numpy.ones_like",
"helpyr.helpyr_misc.ensure_dir_exists",
"time.asctime",
"os.path.join",... | [((2738, 2792), 'helpyr.logger.Logger', 'logger.Logger', (['self.log_filepath'], {'default_verbose': '(True)'}), '(self.log_filepath, default_verbose=True)\n', (2751, 2792), False, 'from helpyr import logger\n'), ((2801, 2859), 'helpyr.helpyr_misc.ensure_dir_exists', 'hm.ensure_dir_exists', (['self.pickle_destination', 'self.logger'], {}), '(self.pickle_destination, self.logger)\n', (2821, 2859), True, 'from helpyr import helpyr_misc as hm\n'), ((2989, 3075), 'helpyr.data_loading.DataLoader', 'data_loading.DataLoader', (['self.pickle_source', 'self.pickle_destination', 'self.logger'], {}), '(self.pickle_source, self.pickle_destination, self.\n logger)\n', (3012, 3075), False, 'from helpyr import data_loading\n'), ((4482, 4540), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['self.summary_stats'], {'orient': '"""index"""'}), "(self.summary_stats, orient='index')\n", (4504, 4540), True, 'import pandas as pd\n'), ((8885, 8927), 'helpyr.helpyr_misc.exclude_df_cols', 'hm.exclude_df_cols', (['combined', 'exclude_cols'], {}), '(combined, exclude_cols)\n', (8903, 8927), True, 'from helpyr import helpyr_misc as hm\n'), ((10816, 10845), 'numpy.empty_like', 'np.empty_like', (['like_df.values'], {}), '(like_df.values)\n', (10829, 10845), True, 'import numpy as np\n'), ((10895, 10962), 'pandas.DataFrame', 'pd.DataFrame', (['np_like'], {'columns': 'like_df.columns', 'index': 'like_df.index'}), '(np_like, columns=like_df.columns, index=like_df.index)\n', (10907, 10962), True, 'import pandas as pd\n'), ((11698, 11715), 'numpy.sum', 'np.sum', (['trim_rows'], {}), '(trim_rows)\n', (11704, 11715), True, 'import numpy as np\n'), ((16792, 16830), 'helpyr.helpyr_misc.nsplit', 'hm.nsplit', (['self.current_period_path', '(3)'], {}), '(self.current_period_path, 3)\n', (16801, 16830), True, 'from helpyr import helpyr_misc as hm\n'), ((21256, 21294), 'helpyr.helpyr_misc.nsplit', 'hm.nsplit', (['self.current_period_path', '(3)'], {}), '(self.current_period_path, 3)\n', (21265, 21294), True, 'from helpyr import helpyr_misc as hm\n'), ((26001, 26045), 'os.path.join', 'os.path.join', (['self.txt_destination', 'filename'], {}), '(self.txt_destination, filename)\n', (26013, 26045), False, 'import os\n'), ((27504, 27548), 'os.path.join', 'os.path.join', (['self.txt_destination', 'filename'], {}), '(self.txt_destination, filename)\n', (27516, 27548), False, 'import os\n'), ((28132, 28186), 'helpyr.logger.Logger', 'logger.Logger', (['self.log_filepath'], {'default_verbose': '(True)'}), '(self.log_filepath, default_verbose=True)\n', (28145, 28186), False, 'from helpyr import logger\n'), ((28318, 28411), 'helpyr.data_loading.DataLoader', 'data_loading.DataLoader', (['self.pickle_source', 'self.pickle_destination'], {'logger': 'self.logger'}), '(self.pickle_source, self.pickle_destination, logger\n =self.logger)\n', (28341, 28411), False, 'from helpyr import data_loading\n'), ((28470, 28500), 'omnipickle_manager.OmnipickleManager', 'OmnipickleManager', (['self.logger'], {}), '(self.logger)\n', (28487, 28500), False, 'from omnipickle_manager import OmnipickleManager\n'), ((29305, 29347), 'helpyr.crawler.Crawler', 'helpyr_crawler.Crawler', ([], {'logger': 'self.logger'}), '(logger=self.logger)\n', (29327, 29347), True, 'from helpyr import crawler as helpyr_crawler\n'), ((35405, 35459), 'helpyr.logger.Logger', 'logger.Logger', (['self.log_filepath'], {'default_verbose': '(True)'}), '(self.log_filepath, default_verbose=True)\n', (35418, 35459), False, 'from helpyr import logger\n'), ((35468, 35526), 'helpyr.helpyr_misc.ensure_dir_exists', 'hm.ensure_dir_exists', (['self.pickle_destination', 'self.logger'], {}), '(self.pickle_destination, self.logger)\n', (35488, 35526), True, 'from helpyr import helpyr_misc as hm\n'), ((35657, 35743), 'helpyr.data_loading.DataLoader', 'data_loading.DataLoader', (['self.pickle_source', 'self.pickle_destination', 'self.logger'], {}), '(self.pickle_source, self.pickle_destination, self.\n logger)\n', (35680, 35743), False, 'from helpyr import data_loading\n'), ((35821, 35851), 'omnipickle_manager.OmnipickleManager', 'OmnipickleManager', (['self.logger'], {}), '(self.logger)\n', (35838, 35851), False, 'from omnipickle_manager import OmnipickleManager\n'), ((4135, 4173), 'helpyr.helpyr_misc.nsplit', 'hm.nsplit', (['self.current_period_path', '(3)'], {}), '(self.current_period_path, 3)\n', (4144, 4173), True, 'from helpyr import helpyr_misc as hm\n'), ((11915, 11932), 'numpy.sum', 'np.sum', (['trim_vals'], {}), '(trim_vals)\n', (11921, 11932), True, 'import numpy as np\n'), ((11957, 11997), 'numpy.sum', 'np.sum', (["self.final_output['Bedload all']"], {}), "(self.final_output['Bedload all'])\n", (11963, 11997), True, 'import numpy as np\n'), ((20162, 20170), 'time.sleep', 'sleep', (['(3)'], {}), '(3)\n', (20167, 20170), False, 'from time import sleep\n'), ((26767, 26823), 'pandas.concat', 'pd.concat', (['[old_stats[unchanged_indices], summary_stats]'], {}), '([old_stats[unchanged_indices], summary_stats])\n', (26776, 26823), True, 'import pandas as pd\n'), ((2928, 2937), 'time.asctime', 'asctime', ([], {}), '()\n', (2935, 2937), False, 'from time import asctime\n'), ((7233, 7254), 'helpyr.helpyr_misc.nsplit', 'hm.nsplit', (['Qs_path', '(1)'], {}), '(Qs_path, 1)\n', (7242, 7254), True, 'from helpyr import helpyr_misc as hm\n'), ((23712, 23720), 'time.sleep', 'sleep', (['(4)'], {}), '(4)\n', (23717, 23720), False, 'from time import sleep\n'), ((24301, 24320), 'numpy.arange', 'np.arange', (['n_needed'], {}), '(n_needed)\n', (24310, 24320), True, 'import numpy as np\n'), ((24345, 24373), 'numpy.empty', 'np.empty', (['(n_needed, n_cols)'], {}), '((n_needed, n_cols))\n', (24353, 24373), True, 'import numpy as np\n'), ((24497, 24572), 'pandas.DataFrame', 'pd.DataFrame', (['np_empty'], {'columns': 'last_row.index', 'index': '(needed_range + n_rows)'}), '(np_empty, columns=last_row.index, index=needed_range + n_rows)\n', (24509, 24572), True, 'import pandas as pd\n'), ((24621, 24661), 'pandas.concat', 'pd.concat', (['[self.final_output, pd_empty]'], {}), '([self.final_output, pd_empty])\n', (24630, 24661), True, 'import pandas as pd\n'), ((28257, 28266), 'time.asctime', 'asctime', ([], {}), '()\n', (28264, 28266), False, 'from time import asctime\n'), ((31580, 31628), 'numpy.arange', 'np.arange', (['period_data.Qs_data.index.values.size'], {}), '(period_data.Qs_data.index.values.size)\n', (31589, 31628), True, 'import numpy as np\n'), ((32170, 32191), 'numpy.ones_like', 'np.ones_like', (['seconds'], {}), '(seconds)\n', (32182, 32191), True, 'import numpy as np\n'), ((35596, 35605), 'time.asctime', 'asctime', ([], {}), '()\n', (35603, 35605), False, 'from time import asctime\n'), ((11865, 11890), 'numpy.sort', 'np.sort', (['trim_vals.values'], {}), '(trim_vals.values)\n', (11872, 11890), True, 'import numpy as np\n')] |
import logging
import torch.utils.data
import dtoolcore
import numpy as np
from PIL import Image
class WrappedDataSet(torch.utils.data.Dataset):
"""Subclass of pytorch Dataset that provides dtool DataSet methods.
This class mostly provides methods that consumers of DataSets require, and
passes those methods onto its internal DataSet object.
Args:
uri: URI for enclosed dtool DataSet.
"""
def __init__(self, uri):
self.dataset = dtoolcore.DataSet.from_uri(uri)
def put_overlay(self, overlay_name, overlay):
self.dataset.put_overlay(overlay_name, overlay)
def get_annotation(self, annotation_name):
return self.dataset.get_annotation(annotation_name)
@property
def name(self):
return self.dataset.name
@property
def uri(self):
return self.dataset.uri
@property
def uuid(self):
return self.dataset.uuid
def scaled_float_array_to_pil_image(array):
"""Convert an array of floats to a PIL image.
Args:
array (np.ndarray): Array representing an image. Expected to be float
and normalised between 0 and 1.
Returns:
A PIL Image object created from the array
"""
intarray = (255 * array).astype(np.uint8)
if len(array.shape) > 3:
raise ValueError(f"Can't handle array of shape {array.shape}")
if len(array.shape) == 2:
return Image.fromarray(intarray)
elif len(array.shape) == 3:
intarray = np.transpose(intarray, (1, 2, 0))
channels = intarray.shape[2]
if channels == 1:
return Image.fromarray(intarray.squeeze())
elif channels == 3:
return Image.fromarray(intarray)
else:
raise ValueError(f"Can't handle image with {channels} channels")
else:
raise ValueError(f"Can't handle array with shape {array.shape}")
def coerce_to_fixed_size_rgb(im, target_dim):
"""Convert a PIL image to a fixed size and 3 channel RGB format."""
if im.mode not in ['RGB', 'L']:
raise Exception(f"Unknown image mode: {im.mode}")
resized_im = im.resize(target_dim)
if im.mode is 'RGB':
return resized_im
return resized_im.convert('RGB')
class ImageDataSet(WrappedDataSet):
"""Class allowing a collection of images annotated with categories to be
used as both a Pytorch Dataset and a dtool DataSet."""
def __init__(self, uri, usetype='train'):
super().__init__(uri)
self.loaded_images = {}
self.cat_lookup = self.dataset.get_overlay('category')
self.cat_encoding = self.dataset.get_annotation('category_encoding')
self.image_dim = 256, 256
try:
usetype_overlay = self.dataset.get_overlay('usetype')
self.identifiers = [
idn for idn in self.dataset.identifiers
if usetype_overlay[idn] == usetype
]
except dtoolcore.DtoolCoreKeyError:
self.identifiers = self.dataset.identifiers
self.idn_lookup = {n: idn for n, idn in enumerate(self.identifiers)}
def __len__(self):
return len(self.identifiers)
def __getitem__(self, index):
idn = self.idn_lookup[index]
if idn not in self.loaded_images:
logging.debug(f"Loading {self.dataset.item_content_abspath(idn)}")
im = Image.open(self.dataset.item_content_abspath(idn))
resized_converted = coerce_to_fixed_size_rgb(im, self.image_dim)
channels_first = np.moveaxis(np.array(resized_converted), 2, 0)
self.loaded_images[idn] = channels_first.astype(np.float32) / 255
return self.loaded_images[idn], self.cat_encoding[self.cat_lookup[idn]]
@property
def input_channels(self):
return 3
@property
def dim(self):
return self.image_dim[0]
class TensorDataSet(WrappedDataSet):
"""Class that allows numpy arrays to be accessed as both a pytorch
Dataset and a dtool DataSet."""
def __init__(self, uri):
super().__init__(uri)
tensor_file_idn = self.dataset.get_annotation("tensor_file_idn")
npy_fpath = self.dataset.item_content_abspath(tensor_file_idn)
self.X = np.load(npy_fpath, mmap_mode=None)
labels_idn = dtoolcore.utils.generate_identifier("labels.npy")
labels_fpath = self.dataset.item_content_abspath(labels_idn)
self.y = np.load(labels_fpath, mmap_mode=None)
self.image_dim = self.dataset.get_annotation("image_dimensions")
self.tensor = self.X
self.labels = self.y
def __len__(self):
return self.tensor.shape[0]
def __getitem__(self, index):
raw = self.tensor[index]
scaledfloat = raw.astype(np.float32) / 255
label = self.labels[index]
return scaledfloat.reshape(*self.image_dim), int(label)
@property
def input_channels(self):
"""The number of channels each tensor provides."""
return self.image_dim[0]
@property
def dim(self):
"""The linear dimensions of the tensor, e.g. it is dim x dim in shape.
"""
return self.image_dim[1]
def create_tensor_dataset_from_arrays(
output_base_uri, output_name, data_array, label_array, image_dim, readme_content
):
"""Create a dtool DataSet with the necessary annotations to be used as a
TensorDataSet.
Args:
output_base_uri: The base URI where the dataset will be created.
output_name: The name for the output dataset.
data_array (ndarray): The numpy array holding data.
label_array (ndarray): The numpy array holding labels.
image_dim (tuple): Dimensions to which input images should be reshaped.
readme_content (string): Content that will be used to create README.yml
in the created dataset.
Returns:
URI: The URI of the created dataset
"""
with dtoolcore.DataSetCreator(output_name, output_base_uri) as qds:
data_fpath = qds.prepare_staging_abspath_promise('data.npy')
np.save(data_fpath, data_array)
labels_fpath = qds.prepare_staging_abspath_promise('labels.npy')
np.save(labels_fpath, label_array)
data_idn = dtoolcore.utils.generate_identifier('data.npy')
qds.put_annotation("tensor_file_idn", data_idn)
qds.put_annotation("image_dimensions", image_dim)
qds.put_annotation("dtoolAI.inputtype", "TensorDataSet")
qds.put_readme(readme_content)
return qds.uri
| [
"PIL.Image.fromarray",
"numpy.transpose",
"dtoolcore.DataSet.from_uri",
"dtoolcore.utils.generate_identifier",
"numpy.array",
"numpy.save",
"numpy.load",
"dtoolcore.DataSetCreator"
] | [((6277, 6324), 'dtoolcore.utils.generate_identifier', 'dtoolcore.utils.generate_identifier', (['"""data.npy"""'], {}), "('data.npy')\n", (6312, 6324), False, 'import dtoolcore\n'), ((479, 510), 'dtoolcore.DataSet.from_uri', 'dtoolcore.DataSet.from_uri', (['uri'], {}), '(uri)\n', (505, 510), False, 'import dtoolcore\n'), ((1433, 1458), 'PIL.Image.fromarray', 'Image.fromarray', (['intarray'], {}), '(intarray)\n', (1448, 1458), False, 'from PIL import Image\n'), ((4271, 4305), 'numpy.load', 'np.load', (['npy_fpath'], {'mmap_mode': 'None'}), '(npy_fpath, mmap_mode=None)\n', (4278, 4305), True, 'import numpy as np\n'), ((4328, 4377), 'dtoolcore.utils.generate_identifier', 'dtoolcore.utils.generate_identifier', (['"""labels.npy"""'], {}), "('labels.npy')\n", (4363, 4377), False, 'import dtoolcore\n'), ((4464, 4501), 'numpy.load', 'np.load', (['labels_fpath'], {'mmap_mode': 'None'}), '(labels_fpath, mmap_mode=None)\n', (4471, 4501), True, 'import numpy as np\n'), ((5973, 6027), 'dtoolcore.DataSetCreator', 'dtoolcore.DataSetCreator', (['output_name', 'output_base_uri'], {}), '(output_name, output_base_uri)\n', (5997, 6027), False, 'import dtoolcore\n'), ((6113, 6144), 'numpy.save', 'np.save', (['data_fpath', 'data_array'], {}), '(data_fpath, data_array)\n', (6120, 6144), True, 'import numpy as np\n'), ((6226, 6260), 'numpy.save', 'np.save', (['labels_fpath', 'label_array'], {}), '(labels_fpath, label_array)\n', (6233, 6260), True, 'import numpy as np\n'), ((1510, 1543), 'numpy.transpose', 'np.transpose', (['intarray', '(1, 2, 0)'], {}), '(intarray, (1, 2, 0))\n', (1522, 1543), True, 'import numpy as np\n'), ((3580, 3607), 'numpy.array', 'np.array', (['resized_converted'], {}), '(resized_converted)\n', (3588, 3607), True, 'import numpy as np\n'), ((1709, 1734), 'PIL.Image.fromarray', 'Image.fromarray', (['intarray'], {}), '(intarray)\n', (1724, 1734), False, 'from PIL import Image\n')] |
import math
import numpy as np
from numerical_differentiation import gradient_approximation, \
Hessian_approximation
from line_search import line_search_by_backtracking
from utils import compute_error
def gradient_descent(f, x_0, tol,
tol_backtracking, x_ast=None, p_ast=None, maxiter=30,
gf_symbolic=None):
'''
Method of gradient descent to numerically approximate solution of min f.
Args:
f (fun): definition of function f as lambda expression or function definition.
x_0 (numpy ndarray): initial point for gradient descent method.
tol (float): tolerance that will halt method. Controls norm of gradient of f.
tol_backtracking (float): tolerance that will halt method. Controls value of line search by backtracking.
x_ast (numpy ndarray): solution of min f, now it's required that user knows the solution...
p_ast (float): value of f(x_ast), now it's required that user knows the solution...
maxiter (int): maximum number of iterations.
gf_symbolic (fun): definition of gradient of f. If given, no approximation is
performed via finite differences.
Returns:
x (numpy ndarray): numpy array, approximation of x_ast.
iteration (int): number of iterations.
Err_plot (numpy ndarray): numpy array of absolute error between p_ast and f(x) with x approximation
of x_ast. Useful for plotting.
x_plot (numpy ndarray): numpy array that containts in columns vector of approximations. Last column
contains x, approximation of solution. Useful for plotting.
'''
iteration = 0
x = x_0
feval = f(x)
if gf_symbolic:
gfeval = gf_symbolic(x)
else:
gfeval = gradient_approximation(f,x)
normgf = np.linalg.norm(gfeval)
Err_plot_aux = np.zeros(maxiter)
Err_plot_aux[iteration]=compute_error(p_ast,feval)
Err = compute_error(x_ast,x)
n = x.size
x_plot = np.zeros((n,maxiter))
x_plot[:,iteration] = x
print('I\tNormagf\t\tError x_ast\tError p_ast\tline search')
print('{}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{}'.format(iteration,normgf,Err,Err_plot_aux[iteration],"---"))
iteration+=1
while(normgf>tol and iteration < maxiter):
dir_desc = -gfeval
der_direct = gfeval.dot(dir_desc)
t = line_search_by_backtracking(f,dir_desc,x,der_direct)
x = x + t*dir_desc
feval = f(x)
if gf_symbolic:
gfeval = gf_symbolic(x)
else:
gfeval = gradient_approximation(f,x)
normgf = np.linalg.norm(gfeval)
Err_plot_aux[iteration]=compute_error(p_ast,feval)
x_plot[:,iteration] = x
Err = compute_error(x_ast,x)
print('{}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{:0.2e}'.format(iteration,normgf,Err,
Err_plot_aux[iteration],t))
if t<tol_backtracking: #if t is less than tol_backtracking then we need to check the reason
iter_salida=iteration
iteration = maxiter - 1
iteration+=1
print('{} {:0.2e}'.format("Error of x with respect to x_ast:",Err))
print('{} {}'.format("Approximate solution:", x))
cond = Err_plot_aux > np.finfo(float).eps*10**(-2)
Err_plot = Err_plot_aux[cond]
if iteration == maxiter and t < tol_backtracking:
print("Backtracking value less than tol_backtracking, check approximation")
iteration=iter_salida
else:
if iteration == maxiter:
print("Reached maximum of iterations, check approximation")
x_plot = x_plot[:,:iteration]
return [x,iteration,Err_plot,x_plot]
def Newtons_method(f, x_0, tol,
tol_backtracking, x_ast=None, p_ast=None, maxiter=30,
gf_symbolic=None,
Hf_symbolic=None):
'''
Newton's method to numerically approximate solution of min f.
Args:
f (fun): definition of function f as lambda expression or function definition.
x_0 (numpy ndarray): initial point for Newton's method.
tol (float): tolerance that will halt method. Controls stopping criteria.
tol_backtracking (float): tolerance that will halt method. Controls value of line search by backtracking.
x_ast (numpy ndarray): solution of min f, now it's required that user knows the solution...
p_ast (float): value of f(x_ast), now it's required that user knows the solution...
maxiter (int): maximum number of iterations
gf_symbolic (fun): definition of gradient of f. If given, no approximation is
performed via finite differences.
Hf_symbolic (fun): definition of Hessian of f. If given, no approximation is
performed via finite differences.
Returns:
x (numpy ndarray): numpy array, approximation of x_ast.
iteration (int): number of iterations.
Err_plot (numpy ndarray): numpy array of absolute error between p_ast and f(x) with x approximation
of x_ast. Useful for plotting.
x_plot (numpy ndarray): numpy array that containts in columns vector of approximations. Last column
contains x, approximation of solution. Useful for plotting.
'''
iteration = 0
x = x_0
feval = f(x)
if gf_symbolic:
gfeval = gf_symbolic(x)
else:
gfeval = gradient_approximation(f,x)
if Hf_symbolic:
Hfeval = Hf_symbolic(x)
else:
Hfeval = Hessian_approximation(f,x)
normgf = np.linalg.norm(gfeval)
condHf= np.linalg.cond(Hfeval)
Err_plot_aux = np.zeros(maxiter)
Err_plot_aux[iteration]=compute_error(p_ast,feval)
Err = compute_error(x_ast,x)
n = x.size
x_plot = np.zeros((n,maxiter))
x_plot[:,iteration] = x
#Newton's direction and Newton's decrement
dir_Newton = np.linalg.solve(Hfeval, -gfeval)
dec_Newton = -gfeval.dot(dir_Newton)
print('I\tNormgf \tNewton Decrement\tError x_ast\tError p_ast\tline search\tCondHf')
print('{}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{}\t\t{:0.2e}'.format(iteration,normgf,
dec_Newton,Err,
Err_plot_aux[iteration],"---",
condHf))
stopping_criteria = dec_Newton/2
iteration+=1
while(stopping_criteria>tol and iteration < maxiter):
der_direct = -dec_Newton
t = line_search_by_backtracking(f,dir_Newton,x,der_direct)
x = x + t*dir_Newton
feval = f(x)
if gf_symbolic:
gfeval = gf_symbolic(x)
else:
gfeval = gradient_approximation(f,x)
if Hf_symbolic:
Hfeval = Hf_symbolic(x)
else:
Hfeval = Hessian_approximation(f,x)
normgf = np.linalg.norm(gfeval)
condHf= np.linalg.cond(Hfeval)
#Newton's direction and Newton's decrement
dir_Newton = np.linalg.solve(Hfeval, -gfeval)
dec_Newton = -gfeval.dot(dir_Newton)
Err_plot_aux[iteration]=compute_error(p_ast,feval)
x_plot[:,iteration] = x
Err = compute_error(x_ast,x)
print('{}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{:0.2e}'.format(iteration,normgf,
dec_Newton,Err,
Err_plot_aux[iteration],t,
condHf))
stopping_criteria = dec_Newton/2
if t<tol_backtracking: #if t is less than tol_backtracking then we need to check the reason
iter_salida=iteration
iteration = maxiter - 1
iteration+=1
print('{} {:0.2e}'.format("Error of x with respect to x_ast:",Err))
print('{} {}'.format("Approximate solution:", x))
cond = Err_plot_aux > np.finfo(float).eps*10**(-2)
Err_plot = Err_plot_aux[cond]
if iteration == maxiter and t < tol_backtracking:
print("Backtracking value less than tol_backtracking, check approximation")
iteration=iter_salida
else:
if iteration == maxiter:
print("Reached maximum of iterations, check approximation")
x_plot = x_plot[:,:iteration]
return [x,iteration,Err_plot,x_plot] | [
"numerical_differentiation.Hessian_approximation",
"numpy.linalg.solve",
"numpy.linalg.cond",
"utils.compute_error",
"line_search.line_search_by_backtracking",
"numpy.zeros",
"numpy.linalg.norm",
"numerical_differentiation.gradient_approximation",
"numpy.finfo"
] | [((1928, 1950), 'numpy.linalg.norm', 'np.linalg.norm', (['gfeval'], {}), '(gfeval)\n', (1942, 1950), True, 'import numpy as np\n'), ((1975, 1992), 'numpy.zeros', 'np.zeros', (['maxiter'], {}), '(maxiter)\n', (1983, 1992), True, 'import numpy as np\n'), ((2021, 2048), 'utils.compute_error', 'compute_error', (['p_ast', 'feval'], {}), '(p_ast, feval)\n', (2034, 2048), False, 'from utils import compute_error\n'), ((2063, 2086), 'utils.compute_error', 'compute_error', (['x_ast', 'x'], {}), '(x_ast, x)\n', (2076, 2086), False, 'from utils import compute_error\n'), ((2114, 2136), 'numpy.zeros', 'np.zeros', (['(n, maxiter)'], {}), '((n, maxiter))\n', (2122, 2136), True, 'import numpy as np\n'), ((5788, 5810), 'numpy.linalg.norm', 'np.linalg.norm', (['gfeval'], {}), '(gfeval)\n', (5802, 5810), True, 'import numpy as np\n'), ((5823, 5845), 'numpy.linalg.cond', 'np.linalg.cond', (['Hfeval'], {}), '(Hfeval)\n', (5837, 5845), True, 'import numpy as np\n'), ((5870, 5887), 'numpy.zeros', 'np.zeros', (['maxiter'], {}), '(maxiter)\n', (5878, 5887), True, 'import numpy as np\n'), ((5916, 5943), 'utils.compute_error', 'compute_error', (['p_ast', 'feval'], {}), '(p_ast, feval)\n', (5929, 5943), False, 'from utils import compute_error\n'), ((5958, 5981), 'utils.compute_error', 'compute_error', (['x_ast', 'x'], {}), '(x_ast, x)\n', (5971, 5981), False, 'from utils import compute_error\n'), ((6009, 6031), 'numpy.zeros', 'np.zeros', (['(n, maxiter)'], {}), '((n, maxiter))\n', (6017, 6031), True, 'import numpy as np\n'), ((6133, 6165), 'numpy.linalg.solve', 'np.linalg.solve', (['Hfeval', '(-gfeval)'], {}), '(Hfeval, -gfeval)\n', (6148, 6165), True, 'import numpy as np\n'), ((1886, 1914), 'numerical_differentiation.gradient_approximation', 'gradient_approximation', (['f', 'x'], {}), '(f, x)\n', (1908, 1914), False, 'from numerical_differentiation import gradient_approximation, Hessian_approximation\n'), ((2485, 2540), 'line_search.line_search_by_backtracking', 'line_search_by_backtracking', (['f', 'dir_desc', 'x', 'der_direct'], {}), '(f, dir_desc, x, der_direct)\n', (2512, 2540), False, 'from line_search import line_search_by_backtracking\n'), ((2726, 2748), 'numpy.linalg.norm', 'np.linalg.norm', (['gfeval'], {}), '(gfeval)\n', (2740, 2748), True, 'import numpy as np\n'), ((2781, 2808), 'utils.compute_error', 'compute_error', (['p_ast', 'feval'], {}), '(p_ast, feval)\n', (2794, 2808), False, 'from utils import compute_error\n'), ((2854, 2877), 'utils.compute_error', 'compute_error', (['x_ast', 'x'], {}), '(x_ast, x)\n', (2867, 2877), False, 'from utils import compute_error\n'), ((5626, 5654), 'numerical_differentiation.gradient_approximation', 'gradient_approximation', (['f', 'x'], {}), '(f, x)\n', (5648, 5654), False, 'from numerical_differentiation import gradient_approximation, Hessian_approximation\n'), ((5734, 5761), 'numerical_differentiation.Hessian_approximation', 'Hessian_approximation', (['f', 'x'], {}), '(f, x)\n', (5755, 5761), False, 'from numerical_differentiation import gradient_approximation, Hessian_approximation\n'), ((6824, 6881), 'line_search.line_search_by_backtracking', 'line_search_by_backtracking', (['f', 'dir_Newton', 'x', 'der_direct'], {}), '(f, dir_Newton, x, der_direct)\n', (6851, 6881), False, 'from line_search import line_search_by_backtracking\n'), ((7218, 7240), 'numpy.linalg.norm', 'np.linalg.norm', (['gfeval'], {}), '(gfeval)\n', (7232, 7240), True, 'import numpy as np\n'), ((7257, 7279), 'numpy.linalg.cond', 'np.linalg.cond', (['Hfeval'], {}), '(Hfeval)\n', (7271, 7279), True, 'import numpy as np\n'), ((7353, 7385), 'numpy.linalg.solve', 'np.linalg.solve', (['Hfeval', '(-gfeval)'], {}), '(Hfeval, -gfeval)\n', (7368, 7385), True, 'import numpy as np\n'), ((7463, 7490), 'utils.compute_error', 'compute_error', (['p_ast', 'feval'], {}), '(p_ast, feval)\n', (7476, 7490), False, 'from utils import compute_error\n'), ((7536, 7559), 'utils.compute_error', 'compute_error', (['x_ast', 'x'], {}), '(x_ast, x)\n', (7549, 7559), False, 'from utils import compute_error\n'), ((2681, 2709), 'numerical_differentiation.gradient_approximation', 'gradient_approximation', (['f', 'x'], {}), '(f, x)\n', (2703, 2709), False, 'from numerical_differentiation import gradient_approximation, Hessian_approximation\n'), ((7033, 7061), 'numerical_differentiation.gradient_approximation', 'gradient_approximation', (['f', 'x'], {}), '(f, x)\n', (7055, 7061), False, 'from numerical_differentiation import gradient_approximation, Hessian_approximation\n'), ((7165, 7192), 'numerical_differentiation.Hessian_approximation', 'Hessian_approximation', (['f', 'x'], {}), '(f, x)\n', (7186, 7192), False, 'from numerical_differentiation import gradient_approximation, Hessian_approximation\n'), ((3402, 3417), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (3410, 3417), True, 'import numpy as np\n'), ((8333, 8348), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (8341, 8348), True, 'import numpy as np\n')] |
"""
Sugeno fuzzy model
==============================================================================
"""
import matplotlib.pyplot as plt
import numpy as np
from .core import plot_crisp_input
# #############################################################################
#
#
# Fuzzy Variable
#
#
# #############################################################################
class FuzzyVariable:
def __init__(self, name, universe, sets):
self.name = name.replace(" ", "_")
self.universe = universe
self.sets = sets
def compute_membership(self, value, fuzzyset):
fn, params = self.sets[fuzzyset]
membership = fn(value, **params)
return membership
class SugenoRule:
def __init__(self, premises, coef, intercept):
self.premises = premises
self.coef = coef
self.intercept = intercept
def __repr__(self):
text = "IF\n"
space = " " * 4
for i_premise, premise in enumerate(self.premises):
if i_premise == 0:
text += space + premise[0].name + " IS"
for t in premise[1:]:
text += " " + t
text += "\n"
else:
text += space + premise[0] + " " + premise[1].name + " IS"
for t in premise[2:]:
text += " " + t
text += "\n"
text += "THEN\n"
if self.coef is not None:
for i_key, key in enumerate(self.coef.keys()):
if i_key == 0:
text += space
if self.coef[key] > 0:
if self.coef[key] == 1:
text += key + "\n"
else:
text += str(self.coef[key]) + " " + key + "\n"
if self.coef[key] < 0:
if self.coef[key] == -1:
text += "- " + key + "\n"
else:
text += "- " + str(abs(self.coef[key])) + " " + key + "\n"
else:
if self.coef[key] > 0:
if self.coef[key] == 1:
text += space + "+ " + key + "\n"
else:
text += (
space + "+ " + str(self.coef[key]) + " " + key + "\n"
)
if self.coef[key] < 0:
if self.coef[key] == -1:
text += space + "- " + key + "\n"
else:
text += (
space
+ "- "
+ str(abs(self.coef[key]))
+ " "
+ key
+ "\n"
)
if self.intercept is not None:
if self.intercept > 0:
text += space + "+ " + str(self.intercept)
if self.intercept < 0:
text += space + "- " + str(abs(self.intercept))
else:
if self.intercept is not None:
if self.intercept >= 0:
text += space + str(self.intercept)
if self.intercept < 0:
text += space + "- " + str(abs(self.intercept))
return text
def probor(a, b):
return np.maximum(0, np.minimum(1, a + b - a * b))
class Sugeno:
def __init__(self, and_operator, or_operator, defuzzification_operator):
self.and_operator = and_operator
self.or_operator = or_operator
self.defuzzification_operator = defuzzification_operator
#
self.implication_operator = "prod"
def __call__(self, rules, **values):
self.rules = rules
# self.get_universes()
self.fuzzificate(**values)
self.aggregate_premises()
self.build_infered_consequence(**values)
self.aggregate_productions()
return self.infered_value
def fuzzificate(self, **values):
"""Computes the memberships of the antecedents.
Args:
values: crisp values for the antecedentes in the rule.
"""
for rule in self.rules:
rule.fuzzificated_values = {}
for i_premise, premise in enumerate(rule.premises):
if i_premise == 0:
fuzzyvar, fuzzyset = premise
else:
_, fuzzyvar, fuzzyset = premise
crisp_value = values[fuzzyvar.name]
fn, params = fuzzyvar.sets[fuzzyset]
rule.fuzzificated_values[fuzzyvar.name] = fn(
crisp_value, **params
).tolist()
def aggregate_premises(self):
for rule in self.rules:
if isinstance(self.or_operator, str):
or_operator = {
"max": np.maximum,
"probor": probor,
}[self.or_operator]
if isinstance(self.and_operator, str):
and_operator = {
"min": np.minimum,
"prod": np.multiply,
}[self.and_operator]
rule.aggregated_membership = None
for premise in rule.premises:
if rule.aggregated_membership is None:
rule.aggregated_membership = rule.fuzzificated_values[
premise[0].name
]
else:
name = premise[1].name
if premise[0] == "OR":
rule.aggregated_membership = or_operator(
rule.aggregated_membership, rule.fuzzificated_values[name]
)
if premise[0] == "AND":
rule.aggregated_membership = and_operator(
rule.aggregated_membership, rule.fuzzificated_values[name]
)
def build_infered_consequence(self, **values):
for rule in self.rules:
result = 0
for key in rule.coef.keys():
crisp_value = values[key]
coef = rule.coef[key]
result += coef * crisp_value
if rule.intercept is not None:
result += result.intercept
rule.infered_consequence = result
def aggregate_productions(self):
if self.defuzzification_operator == "wtaver":
s = sum([rule.aggregated_membership for rule in self.rules])
for rule in self.rules:
rule.aggregated_membership = rule.aggregated_membership / s
infered_value = 0
for rule in self.rules:
infered_value += rule.aggregated_membership * rule.infered_consequence
self.infered_value = infered_value
def plot(self, rules, **values):
#
def get_position():
names = []
for rule in rules:
for i_premise, premise in enumerate(rule.premises):
if i_premise == 0:
names.append(premise[0].name)
else:
names.append(premise[1].name)
if rule.coef is not None:
for key in rule.coef.keys():
names.append(key)
names = sorted(set(names))
position = {name: i_name for i_name, name in enumerate(names)}
return position
# computation
self.__call__(rules, **values)
n_rows = len(self.rules) + 1
position = get_position()
n_variables = len(position.keys())
universes = {}
for i_rule, rule in enumerate(rules):
#
# Plot premises
#
for i_premise, premise in enumerate(rule.premises):
if i_premise == 0:
fuzzyvar = premise[0]
varname = premise[0].name
fuzzyset = premise[1]
else:
fuzzyvar = premise[1]
varname = premise[1].name
fuzzyset = premise[2]
i_col = position[varname]
if i_col == 0:
view_yaxis = "left"
else:
view_yaxis = False
plt.subplot(
n_rows,
n_variables + 1,
i_rule * (n_variables + 1) + i_col + 1,
)
view_xaxis = True if i_rule + 1 == len(rules) else False
title = varname if i_rule == 0 else None
#
fn, params = fuzzyvar.sets[fuzzyset]
universe = fuzzyvar.universe
membership = fn(universe, **params)
#
universes[varname] = universe
#
plot_crisp_input(
value=values[varname],
universe=universe,
membership=membership,
name=title,
view_xaxis=view_xaxis,
view_yaxis=view_yaxis,
)
#
# Plot consesquence
#
plt.subplot(
n_rows,
n_variables + 1,
i_rule * (n_variables + 1) + n_variables + 1,
)
text_kwargs = dict(ha="center", va="bottom", fontsize=18, color="black")
plt.gca().text(
0.5, 0.5, str(round(rule.infered_consequence, 2)), **text_kwargs
)
text_kwargs = dict(ha="center", va="top", fontsize=18, color="black")
plt.gca().text(
0.5,
0.5,
"(" + str(round(rule.aggregated_membership, 2)) + ")",
**text_kwargs
)
plt.gca().get_yaxis().set_visible(False)
plt.gca().get_xaxis().set_visible(False)
plt.subplot(
n_rows,
n_variables + 1,
n_rows * (n_variables + 1),
)
text_kwargs = dict(ha="center", va="center", fontsize=18, color="black")
plt.gca().text(0.5, 0.5, str(round(self.infered_value, 2)), **text_kwargs)
plt.gca().get_yaxis().set_visible(False)
plt.gca().get_xaxis().set_visible(False)
#
# format first column
#
for i_row in range(len(self.rules)):
plt.subplot(
n_rows,
n_variables + 1,
i_row * (n_variables + 1) + 1,
)
plt.gca().set_ylim(-0.05, 1.05)
plt.gca().spines["bottom"].set_visible(True)
plt.gca().spines["left"].set_visible(False)
plt.gca().spines["right"].set_visible(False)
plt.gca().spines["top"].set_visible(False)
for i_col in range(len(position.keys())):
plt.subplot(
n_rows,
n_variables + 1,
(n_rows - 2) * (n_variables + 1) + i_col + 1,
)
varname = [k for k in position.keys() if position[k] == i_col][0]
xmin = min(universes[varname])
xmax = max(universes[varname])
plt.gca().set_xlim(xmin, xmax)
plt.gca().spines["bottom"].set_visible(True)
plt.gca().spines["left"].set_visible(False)
plt.gca().spines["right"].set_visible(False)
plt.gca().spines["top"].set_visible(False)
# from mf import trimf
# x1 = FuzzyVariable(
# name="x1",
# universe=np.linspace(start=0, stop=20, num=50),
# sets={
# "small_1": (trimf, {"a": 0, "b": 0, "c": 16}),
# "big_1": (trimf, {"a": 10, "b": 20, "c": 20}),
# },
# )
# # print(x1.compute_membership(0, "small_1"))
# x2 = FuzzyVariable(
# name="x2",
# universe=np.linspace(start=0, stop=20, num=50),
# sets={
# "small_2": (trimf, {"a": 0, "b": 0, "c": 8}),
# "big_2": (trimf, {"a": 2, "b": 10, "c": 20}),
# },
# )
# rule_1 = SugenoRule(
# premises=[
# (x1, "small_1"),
# ("AND", x2, "small_2"),
# ],
# coef={"x1": 1, "x2": 1},
# intercept=None,
# )
# rule_2 = SugenoRule(
# premises=[
# (x1, "big_1"),
# ],
# coef={"x1": 2},
# intercept=None,
# )
# rule_3 = SugenoRule(
# premises=[
# (x2, "big_2"),
# ],
# coef={"x2": 3},
# intercept=None,
# )
# # print(rule_1)
# # print()
# # print(rule_2)
# # print()
# # print(rule_3)
# model = Sugeno(
# and_operator="min",
# or_operator="max",
# defuzzification_operator="wtaver",
# )
# plt.figure(figsize=(12, 9))
# model.plot(
# rules=[rule_1, rule_2, rule_3],
# x1=12,
# x2=5,
# )
| [
"matplotlib.pyplot.gca",
"matplotlib.pyplot.subplot",
"numpy.minimum"
] | [((3530, 3558), 'numpy.minimum', 'np.minimum', (['(1)', '(a + b - a * b)'], {}), '(1, a + b - a * b)\n', (3540, 3558), True, 'import numpy as np\n'), ((10174, 10238), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_rows', '(n_variables + 1)', '(n_rows * (n_variables + 1))'], {}), '(n_rows, n_variables + 1, n_rows * (n_variables + 1))\n', (10185, 10238), True, 'import matplotlib.pyplot as plt\n'), ((9436, 9522), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_rows', '(n_variables + 1)', '(i_rule * (n_variables + 1) + n_variables + 1)'], {}), '(n_rows, n_variables + 1, i_rule * (n_variables + 1) +\n n_variables + 1)\n', (9447, 9522), True, 'import matplotlib.pyplot as plt\n'), ((10657, 10724), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_rows', '(n_variables + 1)', '(i_row * (n_variables + 1) + 1)'], {}), '(n_rows, n_variables + 1, i_row * (n_variables + 1) + 1)\n', (10668, 10724), True, 'import matplotlib.pyplot as plt\n'), ((11121, 11207), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_rows', '(n_variables + 1)', '((n_rows - 2) * (n_variables + 1) + i_col + 1)'], {}), '(n_rows, n_variables + 1, (n_rows - 2) * (n_variables + 1) +\n i_col + 1)\n', (11132, 11207), True, 'import matplotlib.pyplot as plt\n'), ((8529, 8605), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_rows', '(n_variables + 1)', '(i_rule * (n_variables + 1) + i_col + 1)'], {}), '(n_rows, n_variables + 1, i_rule * (n_variables + 1) + i_col + 1)\n', (8540, 8605), True, 'import matplotlib.pyplot as plt\n'), ((10376, 10385), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10383, 10385), True, 'import matplotlib.pyplot as plt\n'), ((9680, 9689), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9687, 9689), True, 'import matplotlib.pyplot as plt\n'), ((9885, 9894), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9892, 9894), True, 'import matplotlib.pyplot as plt\n'), ((10801, 10810), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10808, 10810), True, 'import matplotlib.pyplot as plt\n'), ((11445, 11454), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11452, 11454), True, 'import matplotlib.pyplot as plt\n'), ((10459, 10468), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10466, 10468), True, 'import matplotlib.pyplot as plt\n'), ((10508, 10517), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10515, 10517), True, 'import matplotlib.pyplot as plt\n'), ((10071, 10080), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10078, 10080), True, 'import matplotlib.pyplot as plt\n'), ((10124, 10133), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10131, 10133), True, 'import matplotlib.pyplot as plt\n'), ((10845, 10854), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10852, 10854), True, 'import matplotlib.pyplot as plt\n'), ((10902, 10911), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10909, 10911), True, 'import matplotlib.pyplot as plt\n'), ((10958, 10967), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10965, 10967), True, 'import matplotlib.pyplot as plt\n'), ((11015, 11024), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11022, 11024), True, 'import matplotlib.pyplot as plt\n'), ((11488, 11497), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11495, 11497), True, 'import matplotlib.pyplot as plt\n'), ((11545, 11554), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11552, 11554), True, 'import matplotlib.pyplot as plt\n'), ((11601, 11610), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11608, 11610), True, 'import matplotlib.pyplot as plt\n'), ((11658, 11667), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11665, 11667), True, 'import matplotlib.pyplot as plt\n')] |
import warnings
import numpy as np
import pandas as pd
from scipy import optimize
from autocnet.camera import camera
from autocnet.camera import utils as camera_utils
from autocnet.utils.utils import make_homogeneous, normalize_vector
try:
import cv2
cv2_avail = True
except: # pragma: no cover
cv_avail = False
def compute_epipolar_lines(F, x, index=None):
"""
Given a fundamental matrix and a set of homogeneous points
Parameters
----------
F : ndarray
of shape (3,3) that represents the fundamental matrix
x : ndarray
of shape (n, 3) of homogeneous coordinates
Returns
-------
lines : ndarray
of shape (n,3) of epipolar lines in standard form
"""
if isinstance(x, pd.DataFrame):
x = x.values
if not x.shape[1] == 3:
raise ValueError('The input points must be homogenous with shape (n,3)')
# Compute the unnormalized epipolar lines
lines = np.inner(F, x)
# Normalize the lines
nu = lines[0] ** 2 + lines[1] ** 2
try:
nu = 1 / np.sqrt(nu)
except:
nu = 1
lines *= nu
lines = lines.T
if index is not None:
lines = pd.DataFrame(lines, columns=['a', 'b', 'c'], index=index)
# Inner transposes the result, so transpose back into the 3 column form
return lines
def epipolar_distance(lines, pts):
"""
Given a set of epipolar lines and a set of points, compute the euclidean
distance between each point and the corresponding epipolar line
Parameters
----------
lines : ndarray
of shape (n,3) of epipolar lines in standard form
pts : ndarray
of shape (n, 3) of homogeneous coordinates
"""
num = np.abs(lines[:,0] * pts[:,0] + lines[:,1] * pts[:,1] + lines[:,2])
denom = np.sqrt(lines[:,0] ** 2 + lines[:,1] ** 2)
return num / denom
def compute_reprojection_error(F, x, x1, index=None):
"""
Given a set of matches and a known fundamental matrix,
compute distance between match points and the associated
epipolar lines.
The distance between a point and the associated epipolar
line is computed as: $d = \frac{\lvert ax_{0} + by_{0} + c \rvert}{\sqrt{a^{2} + b^{2}}}$.
Parameters
----------
F : ndarray
(3,3) Fundamental matrix
x : arraylike
(n,2) or (n,3) array of homogeneous coordinates
x1 : arraylike
(n,2) or (n,3) array of homogeneous coordinates with the same
length as argument x
Returns
-------
F_error : ndarray
n,1 vector of reprojection errors
"""
if isinstance(x, (pd.Series, pd.DataFrame)):
x = x.values
if isinstance(x1, (pd.Series, pd.DataFrame)):
x1 = x1.values
if x.shape[1] != 3:
x = make_homogeneous(x)
if x1.shape[1] != 3:
x1 = make_homogeneous(x1)
# Compute the epipolar lines
lines1 = compute_epipolar_lines(F,x)
lines2 = compute_epipolar_lines(F.T, x1)
# Compute the euclidean distance from the pt to the line
d1 = epipolar_distance(lines2, x)
d2 = epipolar_distance(lines1, x1)
# Grab the max err from either reprojection
err = np.max(np.column_stack((d1,d2)), axis=1)
if index is not None:
err = pd.Series(err, index=index)
return err
def compute_fundamental_error(F, x, x1):
"""
Compute the fundamental error using the idealized error metric.
Ideal error is defined by $x^{\intercal}Fx = 0$,
where $x$ are all matchpoints in a given image and
$x^{\intercal}F$ defines the standard form of the
epipolar line in the second image.
This method assumes that x and x1 are ordered such that x[0]
correspondes to x1[0].
Parameters
----------
F : ndarray
(3,3) Fundamental matrix
x : arraylike
(n,2) or (n,3) array of homogeneous coordinates
x1 : arraylike
(n,2) or (n,3) array of homogeneous coordinates with the same
length as argument x
Returns
-------
F_error : ndarray
n,1 vector of reprojection errors
"""
# TODO: Can this be vectorized for performance?
if x.shape[1] != 3:
x = make_homogeneous(x)
if x1.shape[1] != 3:
x1 = make_homogeneous(x1)
if isinstance(x, pd.DataFrame):
x = x.values
if isinstance(x1, pd.DataFrame):
x1 = x1.values
err = np.empty(len(x))
for i in range(len(x)):
err[i] = x1[i].T.dot(F).dot(x[i])
return err
def update_fundamental_mask(F, x1, x2, threshold=1.0, index=None, method='reprojection'):
"""
Given a Fundamental matrix and two sets of points, compute the
reprojection error between x1 and x2. A mask is returned with all
repojection errors greater than the error set to false.
Parameters
----------
F : ndarray
(3,3) Fundamental matrix
x1 : arraylike
(n,2) or (n,3) array of homogeneous coordinates
x2 : arraylike
(n,2) or (n,3) array of homogeneous coordinates
threshold : float
The new upper limit for error. If using
reprojection this is measured in pixels (the default). If
using fundamental, the idealized error is 0. Values +- 0.05
should be good.
index : ndarray
Optional index for mapping between reprojective error
and an associated dataframe (e.g., an indexed matches dataframe).
Returns
-------
mask : dataframe
"""
if method == 'reprojection':
error = compute_reprojection_error(F, x1, x2)
elif method == 'fundamental':
error = compute_fundamental_error(F, x1, x2)
else:
warnings.warn('Unknown error method. Options are "reprojection" or "fundamental".')
mask = pd.DataFrame(np.abs(error) <= threshold, index=index, columns=['fundamental'])
if index is not None:
mask.index = index
return mask
def enforce_singularity_constraint(F):
"""
The fundamental matrix should be rank 2. In instances when it is not,
the singularity constraint should be enforced. This is forces epipolar lines
to be conincident.
Parameters
----------
F : ndarray
(3,3) Fundamental Matrix
Returns
-------
F : ndarray
(3,3) Singular Fundamental Matrix
References
----------
.. [Hartley2003]
"""
if np.linalg.matrix_rank(F) != 2:
u, d, vt = np.linalg.svd(F)
F = u.dot(np.diag([d[0], d[1], 0])).dot(vt)
return F
def compute_fundamental_matrix(kp1, kp2, method='mle', reproj_threshold=2.0,
confidence=0.99, mle_reproj_threshold=0.5):
"""
Given two arrays of keypoints compute the fundamental matrix. This function
accepts two dataframe of keypoints that have
Parameters
----------
kp1 : arraylike
(n, 2) of coordinates from the source image
kp2 : ndarray
(n, 2) of coordinates from the destination image
method : {'ransac', 'lmeds', 'normal', '8point'}
The openCV algorithm to use for outlier detection
reproj_threshold : float
The maximum distances in pixels a reprojected points
can be from the epipolar line to be considered an inlier
confidence : float
[0, 1] that the estimated matrix is correct
Returns
-------
F : ndarray
A 3x3 fundamental matrix
mask : pd.Series
A boolean mask identifying those points that are valid.
Notes
-----
While the method is user definable, if the number of input points
is < 7, normal outlier detection is automatically used, if 7 > n > 15,
least medians is used, and if 7 > 15, ransac can be used.
"""
if method == 'mle':
# Grab an initial estimate using RANSAC, then apply MLE
method_ = cv2.FM_RANSAC
elif method == 'ransac':
method_ = cv2.FM_RANSAC
elif method == 'lmeds':
method_ = cv2.FM_LMEDS
elif method == 'normal':
method_ = cv2.FM_7POINT
elif method == '8point':
method_ = cv2.FM_8POINT
else:
raise ValueError("Unknown estimation method. Choices are: 'lme', 'ransac', 'lmeds', '8point', or 'normal'.")
if len(kp1) == 0 or len(kp2) == 0:
warnings.warn("F-matix computation failed. One of the keypoint args is empty. kp1:{}, kp2:{}.".format(len(kp1), len(kp2)))
return None, None
# OpenCV wants arrays
try: # OpenCV < 3.4.1
F, mask = cv2.findFundamentalMat(np.asarray(kp1),
np.asarray(kp2),
method_,
param1=reproj_threshold,
param2=confidence)
except: # OpenCV >= 3.4.1
F, mask = cv2.findFundamentalMat(np.asarray(kp1),
np.asarray(kp2),
method_,
ransacReprojThreshold=reproj_threshold,
confidence=confidence)
if F is None:
warnings.warn("F computation failed with no result. Returning None.")
return None, None
if F.shape != (3,3):
warnings.warn('F computation fell back to 7-point algorithm, not setting F.')
return None, None
# Ensure that the singularity constraint is met
F = enforce_singularity_constraint(F)
try:
mask = mask.astype(bool).ravel() # Enforce dimensionality
except:
return # pragma: no cover
if method == 'mle':
# Now apply the gold standard algorithm to refine F
if kp1.shape[1] != 3:
kp1 = make_homogeneous(kp1)
if kp2.shape[1] != 3:
kp2 = make_homogeneous(kp2)
# Generate an idealized and to be updated camera model
p1 = camera.camera_from_f(F)
p = camera.idealized_camera()
if kp1[mask].shape[0] <=12 or kp2[mask].shape[0] <=12:
warnings.warn("Unable to apply MLE. Not enough correspondences. Returning with a RANSAC computed F matrix.")
return F, mask
# Apply Levenber-Marquardt to perform a non-linear lst. squares fit
# to minimize triangulation error (this is a local bundle)
result = optimize.least_squares(camera.projection_error, p1.ravel(),
args=(p, kp1[mask].T, kp2[mask].T),
method='lm')
gold_standard_p = result.x.reshape(3, 4) # SciPy Lst. Sq. requires a vector, camera is 3x4
optimality = result.optimality
gold_standard_f = camera_utils.crossform(gold_standard_p[:,3]).dot(gold_standard_p[:,:3])
F = gold_standard_f
mask = update_fundamental_mask(F, kp1, kp2,
threshold=mle_reproj_threshold).values
return F, mask
| [
"pandas.Series",
"numpy.abs",
"numpy.linalg.matrix_rank",
"numpy.sqrt",
"autocnet.camera.camera.idealized_camera",
"autocnet.camera.camera.camera_from_f",
"autocnet.camera.utils.crossform",
"numpy.asarray",
"numpy.column_stack",
"numpy.inner",
"numpy.linalg.svd",
"numpy.diag",
"pandas.DataFr... | [((962, 976), 'numpy.inner', 'np.inner', (['F', 'x'], {}), '(F, x)\n', (970, 976), True, 'import numpy as np\n'), ((1732, 1803), 'numpy.abs', 'np.abs', (['(lines[:, 0] * pts[:, 0] + lines[:, 1] * pts[:, 1] + lines[:, 2])'], {}), '(lines[:, 0] * pts[:, 0] + lines[:, 1] * pts[:, 1] + lines[:, 2])\n', (1738, 1803), True, 'import numpy as np\n'), ((1811, 1855), 'numpy.sqrt', 'np.sqrt', (['(lines[:, 0] ** 2 + lines[:, 1] ** 2)'], {}), '(lines[:, 0] ** 2 + lines[:, 1] ** 2)\n', (1818, 1855), True, 'import numpy as np\n'), ((1188, 1245), 'pandas.DataFrame', 'pd.DataFrame', (['lines'], {'columns': "['a', 'b', 'c']", 'index': 'index'}), "(lines, columns=['a', 'b', 'c'], index=index)\n", (1200, 1245), True, 'import pandas as pd\n'), ((2795, 2814), 'autocnet.utils.utils.make_homogeneous', 'make_homogeneous', (['x'], {}), '(x)\n', (2811, 2814), False, 'from autocnet.utils.utils import make_homogeneous, normalize_vector\n'), ((2853, 2873), 'autocnet.utils.utils.make_homogeneous', 'make_homogeneous', (['x1'], {}), '(x1)\n', (2869, 2873), False, 'from autocnet.utils.utils import make_homogeneous, normalize_vector\n'), ((3199, 3224), 'numpy.column_stack', 'np.column_stack', (['(d1, d2)'], {}), '((d1, d2))\n', (3214, 3224), True, 'import numpy as np\n'), ((3274, 3301), 'pandas.Series', 'pd.Series', (['err'], {'index': 'index'}), '(err, index=index)\n', (3283, 3301), True, 'import pandas as pd\n'), ((4197, 4216), 'autocnet.utils.utils.make_homogeneous', 'make_homogeneous', (['x'], {}), '(x)\n', (4213, 4216), False, 'from autocnet.utils.utils import make_homogeneous, normalize_vector\n'), ((4255, 4275), 'autocnet.utils.utils.make_homogeneous', 'make_homogeneous', (['x1'], {}), '(x1)\n', (4271, 4275), False, 'from autocnet.utils.utils import make_homogeneous, normalize_vector\n'), ((6418, 6442), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['F'], {}), '(F)\n', (6439, 6442), True, 'import numpy as np\n'), ((6468, 6484), 'numpy.linalg.svd', 'np.linalg.svd', (['F'], {}), '(F)\n', (6481, 6484), True, 'import numpy as np\n'), ((9212, 9281), 'warnings.warn', 'warnings.warn', (['"""F computation failed with no result. Returning None."""'], {}), "('F computation failed with no result. Returning None.')\n", (9225, 9281), False, 'import warnings\n'), ((9341, 9418), 'warnings.warn', 'warnings.warn', (['"""F computation fell back to 7-point algorithm, not setting F."""'], {}), "('F computation fell back to 7-point algorithm, not setting F.')\n", (9354, 9418), False, 'import warnings\n'), ((9966, 9989), 'autocnet.camera.camera.camera_from_f', 'camera.camera_from_f', (['F'], {}), '(F)\n', (9986, 9989), False, 'from autocnet.camera import camera\n'), ((10002, 10027), 'autocnet.camera.camera.idealized_camera', 'camera.idealized_camera', ([], {}), '()\n', (10025, 10027), False, 'from autocnet.camera import camera\n'), ((1069, 1080), 'numpy.sqrt', 'np.sqrt', (['nu'], {}), '(nu)\n', (1076, 1080), True, 'import numpy as np\n'), ((5714, 5803), 'warnings.warn', 'warnings.warn', (['"""Unknown error method. Options are "reprojection" or "fundamental"."""'], {}), '(\n \'Unknown error method. Options are "reprojection" or "fundamental".\')\n', (5727, 5803), False, 'import warnings\n'), ((5823, 5836), 'numpy.abs', 'np.abs', (['error'], {}), '(error)\n', (5829, 5836), True, 'import numpy as np\n'), ((8594, 8609), 'numpy.asarray', 'np.asarray', (['kp1'], {}), '(kp1)\n', (8604, 8609), True, 'import numpy as np\n'), ((8652, 8667), 'numpy.asarray', 'np.asarray', (['kp2'], {}), '(kp2)\n', (8662, 8667), True, 'import numpy as np\n'), ((9797, 9818), 'autocnet.utils.utils.make_homogeneous', 'make_homogeneous', (['kp1'], {}), '(kp1)\n', (9813, 9818), False, 'from autocnet.utils.utils import make_homogeneous, normalize_vector\n'), ((9867, 9888), 'autocnet.utils.utils.make_homogeneous', 'make_homogeneous', (['kp2'], {}), '(kp2)\n', (9883, 9888), False, 'from autocnet.utils.utils import make_homogeneous, normalize_vector\n'), ((10103, 10223), 'warnings.warn', 'warnings.warn', (['"""Unable to apply MLE. Not enough correspondences. Returning with a RANSAC computed F matrix."""'], {}), "(\n 'Unable to apply MLE. Not enough correspondences. Returning with a RANSAC computed F matrix.'\n )\n", (10116, 10223), False, 'import warnings\n'), ((8916, 8931), 'numpy.asarray', 'np.asarray', (['kp1'], {}), '(kp1)\n', (8926, 8931), True, 'import numpy as np\n'), ((8974, 8989), 'numpy.asarray', 'np.asarray', (['kp2'], {}), '(kp2)\n', (8984, 8989), True, 'import numpy as np\n'), ((10757, 10802), 'autocnet.camera.utils.crossform', 'camera_utils.crossform', (['gold_standard_p[:, 3]'], {}), '(gold_standard_p[:, 3])\n', (10779, 10802), True, 'from autocnet.camera import utils as camera_utils\n'), ((6503, 6527), 'numpy.diag', 'np.diag', (['[d[0], d[1], 0]'], {}), '([d[0], d[1], 0])\n', (6510, 6527), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 9 14:42:15 2018
@author: <NAME> et <NAME>
"""
import time
import math
import numpy as np
import matplotlib.pyplot as plt
import random
import csv
#création d'arbres et d'instances :
def CalculArbreComplet(hauteur):
arbre = []
arbre.append(0)
n = (2**hauteur) - 1
for i in range(1,n):
newV = ((i+1)//2)-1
arbre.append(newV)
return arbre
def InstanceAleatoire(n,M):
tA = [random.randrange(0, M) for i in range(n)]
tB = [random.randrange(0, M) for i in range(n)]
cAB = [random.randrange(0, M) for i in range(n)]
cBA = [random.randrange(0, M) for i in range(n)]
cAB[0]=0
cBA[0]=0
return (tA,tB,cAB,cBA)
#permet de simplifier la génération d'arbre et l'appel des fonctions CalculSigmaOptimum...
def InstanciationComplet(h):
n = (2**h)-1
Arbre = CalculArbreComplet(h)
(tA,tB,cAB,cBA)=InstanceAleatoire(n,50)
return (Arbre,tA,tB,cAB,cBA)
#pour affiner test de performances
def CalculArbreDroit(n):
"""créé un arbre "droit" (1 fils par noeud sauf la feuille)
"""
res = [i for i in range (n-1)]
return [0] + res
#fonction de test sur algo 1:
def testCalculSigma1(t,n):
"""t = nombre de test, n = hauteur arbres.
Permet de faire t tests sur des arbres de même taille
utilité : lisser les résultats de durée d'execution"""
res = []
for i in range(t):
Arbre = CalculArbreDroit(n)
(tA,tB,cAB,cBA) = InstanceAleatoire(n,50)
tdeb = time.perf_counter_ns()
CalculSigmaOptimum(Arbre,tA,tB,cAB,cBA)
tfin = time.perf_counter_ns()-tdeb
res.append(tfin)
return np.median(res)
def testMasseCalculSigma1(n,t):
"""n = dernière valeur de n voulue"""
Ln = []
Ltemps = []
for i in range(1,n+1):
Ln.append(i)
Ltemps.append(testCalculSigma1(t,i))
return(Ln,Ltemps)
#tests de CalculBorneInf
def testCalculBorneInf(h):
Arbre = CalculArbreComplet(h)
n = len(Arbre)
(tA,tB,cAB,cBA)= InstanceAleatoire(n,50)
dA = [0]*n
dB = [0]*n
tdeb = time.perf_counter_ns()
CalculBorneinf(Arbre,Arbre,set(),tA,tB,cAB,cBA,dA,dB,0)
tfin = time.perf_counter_ns()-tdeb
return tfin
def testMasseCalculBorneInfh(h):
Ln = []
Ltemp=[]
i = 2
while(i<=h):
res = 0
temp = []
for j in range(40):
temp.append(testCalculBorneInf(i))
res=np.median(temp)
Ltemp.append(res)
Ln.append(2**i-1)
i=i+1
return (Ln,Ltemp)
def testCalculSigma2(h,t):
"""n doit être une puissance de 2 - 1"""
res = []
for i in range(t):
(Arbre,tA,tB,cAB,cBA)=InstanciationComplet(h)
dA = [0]*len(Arbre)
dB = [0]*len(Arbre)
Sigma = [0]*len(Arbre)
Marques = [0]*len(Arbre)
tdeb = time.perf_counter_ns()
CalculBorneinf(Arbre,Arbre,set(),tA,tB,cAB,cBA,dA,dB,0)
CalculSigmaOptimum2(Arbre,tA,tB,cAB,cBA,dA,dB,Sigma,Marques,0)
tfin = time.perf_counter_ns()-tdeb
res.append(tfin)
return np.median(res)
def testMasseCalculSigma2(h,t):
Ln=[]
Ltemps=[]
for i in range(2,h+1):
n=2**i-1
print(n)
Ln.append(n)
temp = testCalculSigma2(i,t)
Ltemps.append(temp)
return(Ln,Ltemps)
def testSigma2(h,t):
res = []
for i in range(t):
(Arbre,tA,tB,cAB,cBA)=InstanciationComplet(h)
dA = [0]*len(Arbre)
dB = [0]*len(Arbre)
Sigma = [0]*len(Arbre)
Marques = [0]*len(Arbre)
tdeb = time.perf_counter()
CalculBorneInf2(Arbre,tA,tB,cAB,cBA,dA,dB)
CalculSigmaOptimum2(Arbre,tA,tB,cAB,cBA,dA,dB,Sigma,Marques,0)
tfin = time.perf_counter()-tdeb
res.append(tfin)
return np.median(res)
def testMasseSigma2(h,t):
Ln=[]
Ltemps=[]
for i in range(2,h+1):
n=2**i-1
print(n)
Ln.append(n)
temp = testSigma2(i,t)
Ltemps.append(temp)
return(Ln,Ltemps)
#test de Calcul
#fonctions traçage de graphe
def graphe(L1,L2): #graphe obtenu par les mesures
x = np.array(L1)
y = np.array(L2)
fig = plt.figure()
plt.xlim(L1[0],L1[len(L1)-1])
plt.plot(x,y)
fig.savefig('CalculSigma2.png')
#plt.show()
def CalculPente(Lx,Ly):
"""Lx contient les abscisses (ici les tailles des arbres testés)
Ly les ordonnées (ici le temps d'exécution)"""
res = []
for i in range(len(Lx)-2):
pente = (Ly[i+1]-Ly[i])/(Lx[i+1]-Lx[i])
res.append(pente)
pente = np.median(res)
return pente
| [
"numpy.median",
"random.randrange",
"matplotlib.pyplot.plot",
"time.perf_counter",
"numpy.array",
"matplotlib.pyplot.figure",
"time.perf_counter_ns"
] | [((1729, 1743), 'numpy.median', 'np.median', (['res'], {}), '(res)\n', (1738, 1743), True, 'import numpy as np\n'), ((2180, 2202), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (2200, 2202), False, 'import time\n'), ((3198, 3212), 'numpy.median', 'np.median', (['res'], {}), '(res)\n', (3207, 3212), True, 'import numpy as np\n'), ((3930, 3944), 'numpy.median', 'np.median', (['res'], {}), '(res)\n', (3939, 3944), True, 'import numpy as np\n'), ((4279, 4291), 'numpy.array', 'np.array', (['L1'], {}), '(L1)\n', (4287, 4291), True, 'import numpy as np\n'), ((4301, 4313), 'numpy.array', 'np.array', (['L2'], {}), '(L2)\n', (4309, 4313), True, 'import numpy as np\n'), ((4325, 4337), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4335, 4337), True, 'import matplotlib.pyplot as plt\n'), ((4378, 4392), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (4386, 4392), True, 'import matplotlib.pyplot as plt\n'), ((4744, 4758), 'numpy.median', 'np.median', (['res'], {}), '(res)\n', (4753, 4758), True, 'import numpy as np\n'), ((491, 513), 'random.randrange', 'random.randrange', (['(0)', 'M'], {}), '(0, M)\n', (507, 513), False, 'import random\n'), ((544, 566), 'random.randrange', 'random.randrange', (['(0)', 'M'], {}), '(0, M)\n', (560, 566), False, 'import random\n'), ((598, 620), 'random.randrange', 'random.randrange', (['(0)', 'M'], {}), '(0, M)\n', (614, 620), False, 'import random\n'), ((652, 674), 'random.randrange', 'random.randrange', (['(0)', 'M'], {}), '(0, M)\n', (668, 674), False, 'import random\n'), ((1575, 1597), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (1595, 1597), False, 'import time\n'), ((2276, 2298), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (2296, 2298), False, 'import time\n'), ((2539, 2554), 'numpy.median', 'np.median', (['temp'], {}), '(temp)\n', (2548, 2554), True, 'import numpy as np\n'), ((2956, 2978), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (2976, 2978), False, 'import time\n'), ((3707, 3726), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3724, 3726), False, 'import time\n'), ((1663, 1685), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (1683, 1685), False, 'import time\n'), ((3132, 3154), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (3152, 3154), False, 'import time\n'), ((3867, 3886), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3884, 3886), False, 'import time\n')] |
"""
:copyright: 2017 H2O.ai, Inc.
:license: Apache License Version 2.0 (see LICENSE for details)
"""
import time
import copy
import sys
import os
import numpy as np
import pandas as pd
import logging
print(sys.path)
from h2o4gpu.util.testing_utils import find_file, run_glm
logging.basicConfig(level=logging.DEBUG)
def fun(nGPUs=1, nFolds=1, nLambdas=100, nAlphas=8, classification=False, use_seed=True, validFraction=0.0):
name = str(sys._getframe().f_code.co_name)
name = str(sys._getframe(1).f_code.co_name)
t = time.time()
print("cwd: %s" % (os.getcwd()))
sys.stdout.flush()
if nGPUs > 0:
use_gpu = True
else:
use_gpu = False
display = 1
write = 1
# seed = np.random.randint(0, 2 ** 31 - 1)
seed = 1034753 if use_seed else None
print("Reading Data")
if 1 == 0: # not yet
t1 = time.time()
target = None
import datatable as dt # omp problem in pycharm
train = find_file("./testsbig/data/xtrain.txt")
test = find_file("./testsbig/data/xtest.txt")
train = os.path.normpath(os.path.join(os.getcwd(), train))
train_df = dt.fread(train).topandas()
train_df = train_df[pd.notnull(train_df[target])].reset_index(drop=True) # drop rows with NA response
test = os.path.normpath(os.path.join(os.getcwd(), test))
test_df = dt.fread(test).topandas()
test_df = test_df[pd.notnull(test_df[target])].reset_index(drop=True) # drop rows with NA response
y = train_df[target]
df_before = copy.deepcopy(train_df)
classes = 1 if not classification else len(y.unique())
print("Testing GLM for " + ((str(classes) + "-class classification") if classes >= 2 else "regression"))
else:
if 1 == 1: # avoid for now so get info
# should all be explicitly np.float32 or all np.float64
xtrain = np.loadtxt("./data/xtrainhyatt.csv", delimiter=',', dtype=np.float32)
ytrain = np.loadtxt("./data/ytrainhyatt.csv", delimiter=',', dtype=np.float32)
xtest = np.loadtxt("./data/xtesthyatt.csv", delimiter=',', dtype=np.float32)
ytest = np.loadtxt("./data/ytesthyatt.csv", delimiter=',', dtype=np.float32)
wtrain = np.ones((xtrain.shape[0], 1), dtype=np.float32)
t1 = time.time()
pred_val, rmse_train, rmse_test = runglm(nFolds, nAlphas, nLambdas, xtrain, ytrain, xtest, ytest, wtrain,
write, display, use_gpu, name=name)
else:
xfull = np.loadtxt("./data/xtrainhyatt.csv", delimiter=',', dtype=np.float32)
yfull = np.loadtxt("./data/ytrainhyatt.csv", delimiter=',', dtype=np.float32)
t1 = time.time()
rmse_train, rmse_test = elastic_net(xfull, yfull, nGPUs=nGPUs, nlambda=nLambdas, nfolds=nFolds,
nalpha=nAlphas,
validFraction=validFraction, verbose=0, name=name)
print("Testing GLM")
# check rmse
print(rmse_train[0, 0])
print(rmse_train[0, 1])
print(rmse_train[0, 2])
print(rmse_test[0, 2])
sys.stdout.flush()
# FIXME: But these below should really be order 1 to 1.5 according to Wamsi!
assert rmse_train[0, 0] < 20
assert rmse_train[0, 1] < 20
assert rmse_train[0, 2] < 31
assert rmse_test[0, 2] < 31
print('/n Total execution time:%d' % (time.time() - t1))
print("TEST PASSED")
sys.stdout.flush()
print("Time taken: {}".format(time.time() - t))
# endfunnel(pipes)
print("DONE.")
sys.stdout.flush()
def test_glm_hyatt_gpu_fold1_quick(): fun(True, 1, 5, 3, classification=False, validFraction=0.2)
def test_glm_hyatt_gpu_fold1(): fun(True, 1, 100, 8, classification=False, validFraction=0.2)
def test_glm_hyatt_gpu_fold5(): fun(True, 5, 100, 3, classification=False, validFraction=0.2)
# def test_glm_hyatt_cpu_fold1_quick(): fun(False, 1, 5, 3, classification=False, validFraction=0.2)
# def test_glm_hyatt_cpu_fold1(): fun(False, 1, 100, 8, classification=False, validFraction=0.2)
# def test_glm_hyatt_cpu_fold5(): fun(False, 5, 100, 3, classification=False, validFraction=0.2)
if __name__ == '__main__':
test_glm_hyatt_gpu_fold1_quick()
test_glm_hyatt_gpu_fold1()
test_glm_hyatt_gpu_fold5()
# test_glm_hyatt_cpu_fold1_quick()
# test_glm_hyatt_cpu_fold1()
# test_glm_hyatt_cpu_fold5()
| [
"logging.basicConfig",
"numpy.ones",
"datatable.fread",
"sys._getframe",
"os.getcwd",
"numpy.loadtxt",
"h2o4gpu.util.testing_utils.find_file",
"copy.deepcopy",
"sys.stdout.flush",
"pandas.notnull",
"time.time"
] | [((280, 320), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (299, 320), False, 'import logging\n'), ((535, 546), 'time.time', 'time.time', ([], {}), '()\n', (544, 546), False, 'import time\n'), ((589, 607), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (605, 607), False, 'import sys\n'), ((3215, 3233), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3231, 3233), False, 'import sys\n'), ((3539, 3557), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3555, 3557), False, 'import sys\n'), ((3660, 3678), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3676, 3678), False, 'import sys\n'), ((870, 881), 'time.time', 'time.time', ([], {}), '()\n', (879, 881), False, 'import time\n'), ((977, 1016), 'h2o4gpu.util.testing_utils.find_file', 'find_file', (['"""./testsbig/data/xtrain.txt"""'], {}), "('./testsbig/data/xtrain.txt')\n", (986, 1016), False, 'from h2o4gpu.util.testing_utils import find_file, run_glm\n'), ((1032, 1070), 'h2o4gpu.util.testing_utils.find_file', 'find_file', (['"""./testsbig/data/xtest.txt"""'], {}), "('./testsbig/data/xtest.txt')\n", (1041, 1070), False, 'from h2o4gpu.util.testing_utils import find_file, run_glm\n'), ((1565, 1588), 'copy.deepcopy', 'copy.deepcopy', (['train_df'], {}), '(train_df)\n', (1578, 1588), False, 'import copy\n'), ((571, 582), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (580, 582), False, 'import os\n'), ((1913, 1982), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/xtrainhyatt.csv"""'], {'delimiter': '""","""', 'dtype': 'np.float32'}), "('./data/xtrainhyatt.csv', delimiter=',', dtype=np.float32)\n", (1923, 1982), True, 'import numpy as np\n'), ((2004, 2073), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/ytrainhyatt.csv"""'], {'delimiter': '""","""', 'dtype': 'np.float32'}), "('./data/ytrainhyatt.csv', delimiter=',', dtype=np.float32)\n", (2014, 2073), True, 'import numpy as np\n'), ((2094, 2162), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/xtesthyatt.csv"""'], {'delimiter': '""","""', 'dtype': 'np.float32'}), "('./data/xtesthyatt.csv', delimiter=',', dtype=np.float32)\n", (2104, 2162), True, 'import numpy as np\n'), ((2183, 2251), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/ytesthyatt.csv"""'], {'delimiter': '""","""', 'dtype': 'np.float32'}), "('./data/ytesthyatt.csv', delimiter=',', dtype=np.float32)\n", (2193, 2251), True, 'import numpy as np\n'), ((2273, 2320), 'numpy.ones', 'np.ones', (['(xtrain.shape[0], 1)'], {'dtype': 'np.float32'}), '((xtrain.shape[0], 1), dtype=np.float32)\n', (2280, 2320), True, 'import numpy as np\n'), ((2339, 2350), 'time.time', 'time.time', ([], {}), '()\n', (2348, 2350), False, 'import time\n'), ((2592, 2661), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/xtrainhyatt.csv"""'], {'delimiter': '""","""', 'dtype': 'np.float32'}), "('./data/xtrainhyatt.csv', delimiter=',', dtype=np.float32)\n", (2602, 2661), True, 'import numpy as np\n'), ((2682, 2751), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/ytrainhyatt.csv"""'], {'delimiter': '""","""', 'dtype': 'np.float32'}), "('./data/ytrainhyatt.csv', delimiter=',', dtype=np.float32)\n", (2692, 2751), True, 'import numpy as np\n'), ((2770, 2781), 'time.time', 'time.time', ([], {}), '()\n', (2779, 2781), False, 'import time\n'), ((447, 462), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (460, 462), False, 'import sys\n'), ((494, 510), 'sys._getframe', 'sys._getframe', (['(1)'], {}), '(1)\n', (507, 510), False, 'import sys\n'), ((1118, 1129), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1127, 1129), False, 'import os\n'), ((1158, 1173), 'datatable.fread', 'dt.fread', (['train'], {}), '(train)\n', (1166, 1173), True, 'import datatable as dt\n'), ((1342, 1353), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1351, 1353), False, 'import os\n'), ((1380, 1394), 'datatable.fread', 'dt.fread', (['test'], {}), '(test)\n', (1388, 1394), True, 'import datatable as dt\n'), ((3490, 3501), 'time.time', 'time.time', ([], {}), '()\n', (3499, 3501), False, 'import time\n'), ((3593, 3604), 'time.time', 'time.time', ([], {}), '()\n', (3602, 3604), False, 'import time\n'), ((1213, 1241), 'pandas.notnull', 'pd.notnull', (['train_df[target]'], {}), '(train_df[target])\n', (1223, 1241), True, 'import pandas as pd\n'), ((1432, 1459), 'pandas.notnull', 'pd.notnull', (['test_df[target]'], {}), '(test_df[target])\n', (1442, 1459), True, 'import pandas as pd\n')] |
import time
print(time.strftime('%Y-%m-%d %H:%M'))
import sys
import numpy as np
if len(sys.argv) < 3:
print('Usage: python3 regression.py X y [seed]')
print('Example: python3 regression.py X_9_18.npz y_9_18.npz 10')
exit()
from scipy.sparse import load_npz
X = load_npz(sys.argv[1])
print(X.shape)
from numpy import load
y = load(sys.argv[2])
y = y['y']
print(y.shape)
if len(sys.argv) == 4:
seed = int(sys.argv[3])
else:
seed = None
# data input and preprocessing done
from sklearn.ensemble import BaggingRegressor
from sklearn.neural_network import MLPRegressor
clf = MLPRegressor(hidden_layer_sizes=(768,128,64,32,16,16,8), activation='relu',
alpha=0.1, batch_size=256, early_stopping=False,
learning_rate_init=0.00075, solver='adam', learning_rate='adaptive', nesterovs_momentum=True,
max_iter=200, tol=1e-8, verbose=True, validation_fraction=0.1, random_state=seed)
print(clf)
#clf = BaggingRegressor(clf, n_estimators=5,
# max_samples=0.8, verbose=5, n_jobs=2)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, random_state=42)
clf.fit(X_train, y_train)
# there is sudden spike in memory consumption here
import gc
gc.collect()
y_test_pred = clf.predict(X_test)
y_train_pred = clf.predict(X_train)
#gc.collect()
from sklearn.metrics import mean_squared_error
from scipy.stats import pearsonr
def p(y_pred,y_true):
return pearsonr(y_pred,y_true)[0]
print('Train Pearson Score: {}'.format(p(y_train, y_train_pred)))
print('Train Error: {}'.format(mean_squared_error(y_train, y_train_pred) / 2.0))
print('Train R2 Score: {}'.format(clf.score(X_train, y_train)))
print('Train Data Mean: {} Standard Deviation: {}'.format(np.mean(y_train), np.std(y_train)))
print('Train Predicted Mean: {} Standard Deviation: {}'.format(np.mean(y_train_pred), np.std(y_train_pred)))
print('Test Pearson Score: {}'.format(p(y_test, y_test_pred)))
print('Test Error: {}'.format(mean_squared_error(y_test, y_test_pred) / 2.0))
print('Test R2 Score:: {}'.format(clf.score(X_test, y_test)))
print('Test Data Mean: {} Standard Deviation: {}'.format(np.mean(y_test), np.std(y_test)))
print('Test Predicted Mean: {} Standard Deviation: {}'.format(np.mean(y_test_pred), np.std(y_test_pred)))
will_save = input('Do you want to save this classifier? [y/N]')
if 'y' in will_save:
fname = input('Name of classifier')
if len(fname) == 0:
fname = 'classifier-' + time.strftime('%Y-%m-%d %H:%M')
from sklearn.externals import joblib
joblib.dump(clf, fname + '.pkl', compress=9)
'''
from sklearn.metrics import make_scorer
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_validate
mse_scorer = make_scorer(mean_squared_error, greater_is_better=False)
pcc_scorer = make_scorer(p)
scoring = {'mse_scorer':mse_scorer, 'pcc_scorer':pcc_scorer}
#score = cross_val_score(clf, X, y, cv=5, verbose=1, scoring=pcc_scorer)
scores = cross_validate(clf, X, y, cv=10, verbose=5, scoring=scoring)
print(scores)
#print(score)
'''
# grid search
'''
from sklearn.model_selection import GridSearchCV
p_grid = {'learning_rate_init': [0.001],
'alpha': [0.1, 0.01],
'batch_size': [256, 512],
'hidden_layer_sizes': [(8,), (16,), (32,), (64,)]}
gscv = GridSearchCV(clf, param_grid=p_grid, scoring={'Pearson':pcc_scorer,'MSE':mse_scorer},
verbose=5, cv=3, refit='Pearson', n_jobs=1)
gscv.fit(X_train,y_train)
print(gscv.best_params_)
print(gscv.best_score_)
print(gscv.score(X_test, y_test))
import pandas as pd
with open('grid_search_one_layer_ws_twelve.csv', 'a') as f:
pd.DataFrame(gscv.cv_results_).to_csv(f, header=True)
print('Grid Search CV Done')
'''
| [
"scipy.stats.pearsonr",
"numpy.mean",
"sklearn.neural_network.MLPRegressor",
"sklearn.model_selection.train_test_split",
"scipy.sparse.load_npz",
"numpy.std",
"time.strftime",
"sklearn.metrics.mean_squared_error",
"gc.collect",
"sklearn.externals.joblib.dump",
"numpy.load"
] | [((276, 297), 'scipy.sparse.load_npz', 'load_npz', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (284, 297), False, 'from scipy.sparse import load_npz\n'), ((340, 357), 'numpy.load', 'load', (['sys.argv[2]'], {}), '(sys.argv[2])\n', (344, 357), False, 'from numpy import load\n'), ((594, 918), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'hidden_layer_sizes': '(768, 128, 64, 32, 16, 16, 8)', 'activation': '"""relu"""', 'alpha': '(0.1)', 'batch_size': '(256)', 'early_stopping': '(False)', 'learning_rate_init': '(0.00075)', 'solver': '"""adam"""', 'learning_rate': '"""adaptive"""', 'nesterovs_momentum': '(True)', 'max_iter': '(200)', 'tol': '(1e-08)', 'verbose': '(True)', 'validation_fraction': '(0.1)', 'random_state': 'seed'}), "(hidden_layer_sizes=(768, 128, 64, 32, 16, 16, 8), activation=\n 'relu', alpha=0.1, batch_size=256, early_stopping=False,\n learning_rate_init=0.00075, solver='adam', learning_rate='adaptive',\n nesterovs_momentum=True, max_iter=200, tol=1e-08, verbose=True,\n validation_fraction=0.1, random_state=seed)\n", (606, 918), False, 'from sklearn.neural_network import MLPRegressor\n'), ((1167, 1222), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'train_size': '(0.8)', 'random_state': '(42)'}), '(X, y, train_size=0.8, random_state=42)\n', (1183, 1222), False, 'from sklearn.model_selection import train_test_split\n'), ((1311, 1323), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1321, 1323), False, 'import gc\n'), ((18, 49), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M"""'], {}), "('%Y-%m-%d %H:%M')\n", (31, 49), False, 'import time\n'), ((2628, 2672), 'sklearn.externals.joblib.dump', 'joblib.dump', (['clf', "(fname + '.pkl')"], {'compress': '(9)'}), "(clf, fname + '.pkl', compress=9)\n", (2639, 2672), False, 'from sklearn.externals import joblib\n'), ((1525, 1549), 'scipy.stats.pearsonr', 'pearsonr', (['y_pred', 'y_true'], {}), '(y_pred, y_true)\n', (1533, 1549), False, 'from scipy.stats import pearsonr\n'), ((1822, 1838), 'numpy.mean', 'np.mean', (['y_train'], {}), '(y_train)\n', (1829, 1838), True, 'import numpy as np\n'), ((1840, 1855), 'numpy.std', 'np.std', (['y_train'], {}), '(y_train)\n', (1846, 1855), True, 'import numpy as np\n'), ((1921, 1942), 'numpy.mean', 'np.mean', (['y_train_pred'], {}), '(y_train_pred)\n', (1928, 1942), True, 'import numpy as np\n'), ((1944, 1964), 'numpy.std', 'np.std', (['y_train_pred'], {}), '(y_train_pred)\n', (1950, 1964), True, 'import numpy as np\n'), ((2228, 2243), 'numpy.mean', 'np.mean', (['y_test'], {}), '(y_test)\n', (2235, 2243), True, 'import numpy as np\n'), ((2245, 2259), 'numpy.std', 'np.std', (['y_test'], {}), '(y_test)\n', (2251, 2259), True, 'import numpy as np\n'), ((2324, 2344), 'numpy.mean', 'np.mean', (['y_test_pred'], {}), '(y_test_pred)\n', (2331, 2344), True, 'import numpy as np\n'), ((2346, 2365), 'numpy.std', 'np.std', (['y_test_pred'], {}), '(y_test_pred)\n', (2352, 2365), True, 'import numpy as np\n'), ((1650, 1691), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_train', 'y_train_pred'], {}), '(y_train, y_train_pred)\n', (1668, 1691), False, 'from sklearn.metrics import mean_squared_error\n'), ((2061, 2100), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_test_pred'], {}), '(y_test, y_test_pred)\n', (2079, 2100), False, 'from sklearn.metrics import mean_squared_error\n'), ((2551, 2582), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M"""'], {}), "('%Y-%m-%d %H:%M')\n", (2564, 2582), False, 'import time\n')] |
#!/usr/bin/env python
"""
Copyright (c) UChicago Argonne, LLC. All rights reserved.
See LICENSE file.
#C In some methods LFit or L refer to the Lattice Constant not RLU
"""
# ---------------------------------------------------------------------------------------------------------------------#
from __future__ import unicode_literals
from PyQt5.QtWidgets import *
from matplotlib.backends.backend_qt5 import NavigationToolbar2QT as NavigationToolbar
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from lmfit.models import LorentzianModel, GaussianModel, LinearModel, VoigtModel
from pylab import *
import numpy as np
from xPlotUtil.Source.AlgebraicExpressions import AlgebraicExpress
# ---------------------------------------------------------------------------------------------------------------------#
class GaussianFitting:
"""Contains Gaussian fit and lattice fit.
"""
def __init__ (self, parent=None):
self.TwoPkGausFitData = []
self.OnePkFitData = []
self.LPosPrcChangeData = []
self.LPos1PrcChangeData = []
self.LPos2PrcChangeData = []
self.readSpec = parent
self.dockedOpt = self.readSpec.dockedOpt
self.myMainWindow = self.dockedOpt.myMainWindow
self.algebraExp = AlgebraicExpress(parent=self)
self.lorentFit = self.algebraExp.lorentFit
self.continueGraphingEachFit = True # Boolean to stop Each fit graphing
# --------------------------------Gaussian Fit---------------------------------------------------------------------#
def OnePeakGaussianFit(self):
"""Calls on the gaussian fit function for one peak and saves fitted data in array.
"""
error = self.onePeakGaussianFit()
if error is False:
self.dockedOpt.onePeakStat = True
self.dockedOpt.fitStat = True
self.dockedOpt.GraphingFitOptionsTree("G")
def onePeakGaussianFit(self):
"""Gaussian Fit for one Peak.
:param xx: x-value
:param yy: y-values
:return: fitted data and error
"""
try:
nRow, nCol = self.dockedOpt.fileInfo()
self.binFitData = zeros((nRow, 0))
self.OnePkFitData = zeros((nCol, 6)) # Creates the empty 2D List
QMessageBox.warning(self.myMainWindow, "Hi", "Hola")
for j in range(nCol):
yy = self.dockedOpt.TT[:, j]
xx = arange(0, len(yy))
x1 = xx[0]
x2 = xx[-1]
y1 = yy[0]
y2 = yy[-1]
m = (y2 - y1) / (x2 - x1)
b = y2 - m * x2
mod = GaussianModel()
pars = mod.guess(yy, x=xx)
mod = mod + LinearModel()
pars.add('intercept', value=b, vary=True)
pars.add('slope', value=m, vary=True)
out = mod.fit(yy, pars, x=xx)
self.OnePkFitData[j, :] = (out.best_values['amplitude'], 0, out.best_values['center'], 0,
out.best_values['sigma'], 0)
# Saves fitted data of each fit
fitData = out.best_fit
binFit = np.reshape(fitData, (len(fitData), 1))
self.binFitData = np.concatenate((self.binFitData, binFit), axis=1)
if self.continueGraphingEachFit == True:
self.graphEachFitRawData(xx, yy, out.best_fit, 'G')
return False
except:
QMessageBox.warning(self.myMainWindow, "Error", "Please make sure the guesses are realistic when fitting.")
return True
def TwoPeakGaussianFit(self):
try:
error = self.twoPeakGaussianFit()
if error == False:
self.dockedOpt.twoPeakStat = True
self.dockedOpt.fitStat = True
self.dockedOpt.GraphingFitOptionsTree("G")
except Exception as ex:
QMessageBox.warning(self.myMainWindow, "Error", "Please make sure the guesses are realistic when fitting."
"\n\nException: " + str(ex))
def twoPeakGaussianFit(self):
try:
nRow, nCol = self.dockedOpt.fileInfo()
self.binFitData = zeros((nRow, 0))
self.TwoPkGausFitData = zeros((nCol, 12)) # Creates the empty 2D List
for j in range(nCol):
yy1 = []
yy2 = []
yy = self.dockedOpt.TT[:, j]
i = 0
for y in yy:
if i < len(yy) / 2:
yy1.append(y)
else:
yy2.append(y)
i += 1
xx = arange(0, len(yy))
xx1 = arange(0, len(yy) / 2)
xx2 = arange(len(yy) / 2, len(yy))
x1 = xx[0]
x2 = xx[-1]
y1 = yy[0]
y2 = yy[-1]
m = (y2 - y1) / (x2 - x1)
b = y2 - m * x2
mod1 = GaussianModel(prefix='p1_')
mod2 = GaussianModel(prefix='p2_')
pars1 = mod1.guess(yy1, x=xx1)
pars2 = mod2.guess(yy2, x=xx2)
mod = mod1 + mod2 + LinearModel()
pars = pars1 + pars2
pars.add('intercept', value=b, vary=True)
pars.add('slope', value=m, vary=True)
out = mod.fit(yy, pars, x=xx, slope=m)
QMessageBox.warning(self.myMainWindow, "Hi", "About to start fitting")
self.TwoPkGausFitData[j, :] = (out.best_values['p1_amplitude'], 0, out.best_values['p1_center'],
0, out.best_values['p1_sigma'], 0,
out.best_values['p2_amplitude'], 0, out.best_values['p2_center'],
0, out.best_values['p2_sigma'], 0)
# Saves fitted data of each fit
fitData = out.best_fit
binFit = np.reshape(fitData, (len(fitData), 1))
self.binFitData = np.concatenate((self.binFitData, binFit), axis=1)
if self.continueGraphingEachFit == True:
self.graphEachFitRawData(xx, yy, out.best_fit, 'G')
return False
except Exception as ex:
QMessageBox.warning(self.myMainWindow, "Error", "Please make sure the guesses are realistic when fitting."
"\n\nException: " + str(ex))
return True
def graphEachFitRawData(self, xx, yy, fitData, whichFit):
"""This method graphs the raw data and the fitted data for each column.
:param xx: bins
:param yy: raw data column
:param popt: from the gaussian fit
:param whichPeak: number of peaks
"""
try:
self.mainGraph = QDialog(self.myMainWindow)
self.mainGraph.resize(600, 600)
dpi = 100
fig = Figure((3.0, 3.0), dpi=dpi)
canvas = FigureCanvas(fig)
canvas.setParent(self.mainGraph)
axes = fig.add_subplot(111)
axes.plot(xx, yy, 'b+:', label='data')
if whichFit == 'G':
axes.plot(xx, fitData, 'ro:', label='fit')
axes.set_title('Gaussian Fit')
elif whichFit == 'L':
axes.plot(xx, fitData, 'ro:', label='fit')
axes.set_title("Lorentzian Fit")
elif whichFit == 'V':
axes.plot(xx, fitData, 'ro:', label='fit')
axes.set_title("Voigt Fit")
axes.legend()
axes.set_xlabel('Bins')
axes.set_ylabel('Intensity')
canvas.draw()
vbox = QVBoxLayout()
hbox = QHBoxLayout()
self.skipEachFitGraphButton()
self.nextFitGraphButton()
hbox.addWidget(self.skipEachFitGraphBtn)
hbox.addStretch(1)
hbox.addWidget(self.nextFitGraphBtn)
graphNavigationBar = NavigationToolbar(canvas, self.mainGraph)
vbox.addLayout(hbox)
vbox.addWidget(graphNavigationBar)
vbox.addWidget(canvas)
self.mainGraph.setLayout(vbox)
self.mainGraph.exec_()
except Exception as e:
QMessageBox.warning(self.myMainWindow, "Error", "Please make sure the guesses are realistic when fitting. \n\n" + str(e))
def skipEachFitGraphButton(self):
"""Button that allows the user to skip each fit graph.
"""
self.skipEachFitGraphBtn = QPushButton('Skip')
self.skipEachFitGraphBtn.setStatusTip("Skip the graphing of each fit")
self.skipEachFitGraphBtn.clicked.connect(self.skipEachFit)
def nextFitGraphButton(self):
"""Button that shows the next fit graph.
"""
self.nextFitGraphBtn = QPushButton('Next')
self.nextFitGraphBtn.clicked.connect(self.nextFitGraph)
self.nextFitGraphBtn.setStatusTip("Graphs the next fit and the original data")
def nextFitGraph(self):
"""Closes the current fit graph to show the next.
"""
self.mainGraph.close()
def skipEachFit(self):
"""Closes the current fit graph and sets continueGraphingEachFit to false
so that other graphs are not showed.
"""
self.continueGraphingEachFit = False
self.mainGraph.close()
def GraphUtilGaussianFitGraphs(self, name, x, y, error, xLabel, yLabel, whichGraph):
"""Generic plotting method that plots depending on which graph is being plotted.
:param canvas: canvas for widget
:param fig: figure for graph
:param name: name of tab
:param x: x-values
:param y: y-values
:param error: error values for gaussian fit graphs
:param xLabel: x-axis label
:param yLabel: y-axis label
:param whichGraph: char that represents either gaussian or lattice fit
"""
mainGraph = QWidget()
fig = Figure((5.0, 4.0), dpi=100)
canvas = FigureCanvas(fig)
canvas.setParent(mainGraph)
axes = fig.add_subplot(111)
axes.plot(x, y)
if whichGraph == 'G':
axes.errorbar(x, y, yerr=error, fmt='o')
elif whichGraph == 'L':
axes.plot(x, y, 'go')
axes.yaxis.set_major_formatter(FormatStrFormatter('%.4f'))
axes.set_title(name)
axes.set_xlabel(xLabel)
axes.set_ylabel(yLabel)
canvas.draw()
tab = QWidget()
tab.setStatusTip(name)
vbox = QVBoxLayout()
graphNavigationBar = NavigationToolbar(canvas, mainGraph)
vbox.addWidget(graphNavigationBar)
vbox.addWidget(canvas)
tab.setLayout(vbox)
self.myMainWindow.savingCanvasTabs(tab, name, canvas, fig)
def graphOnePeakAmplitude(self):
"""This method graphs the Amplitude for one peak.
"""
x = self.getVoltage()
y = self.OnePkFitData[:, 0]
error = self.OnePkFitData[:, 1]
xLabel = 'Voltage'
yLabel = 'Intensity'
name = 'Amplitude (Scan#: ' + self.readSpec.scan + ')'
self.GraphUtilGaussianFitGraphs(name, x, y, error, xLabel, yLabel, 'G')
def graphOnePeakPosition(self):
"""This method graphs the peak position for one peak.
"""
x = self.getVoltage()
y = self.OnePkFitData[:, 2]
error = self.OnePkFitData[:, 3]
xLabel = 'Voltage'
yLabel = 'Position'
name = 'Position (Scan#: ' + self.readSpec.scan + ')'
self.GraphUtilGaussianFitGraphs(name, x, y, error, xLabel, yLabel, 'G')
def graphOnePeakWidth(self):
"""This method graphs the Peak width for one peak.
"""
x = self.getVoltage()
y = self.OnePkFitData[:, 4]
error = self.OnePkFitData[:, 5]
xLabel = 'Voltage'
yLabel = 'Width'
name = 'Width (Scan#: ' + self.readSpec.scan + ')'
self.GraphUtilGaussianFitGraphs(name, x, y, error, xLabel, yLabel, 'G')
def graphOnePeakAmplitudeXWidth(self):
"""This method graphs the amplitude x width for one peak.
"""
x = self.getVoltage()
yA = self.OnePkFitData[:, 0]
yW = self.OnePkFitData[:, 4]
a_err = self.OnePkFitData[:, 1]
w_err = self.OnePkFitData[:, 5]
y = yA * yW
error = ((y * a_err) + (y * w_err)) / y
xLabel = 'Voltage'
yLabel = 'A x W'
name = 'Amplitude X Width (Scan#: ' + self.readSpec.scan + ')'
self.GraphUtilGaussianFitGraphs(name, x, y, error, xLabel, yLabel, 'G')
def graphTwoPeakAmplitude1(self):
"""This method graphs the peak one amplitude for two peak.
"""
x = self.getVoltage()
y = self.TwoPkGausFitData[:, 0]
error = self.TwoPkGausFitData[:, 1]
xLabel = 'Voltage'
yLabel = 'Intensity'
name = 'Peak #1 Amplitude (Scan#: ' + self.readSpec.scan + ')'
self.GraphUtilGaussianFitGraphs(name, x, y, error, xLabel, yLabel, 'G')
def graphTwoPeakPosition1(self):
"""This method graphs the peak one position for two peak.
"""
x = self.getVoltage()
y = self.TwoPkGausFitData[:, 2]
error = self.TwoPkGausFitData[:, 3]
xLabel = 'Voltage'
yLabel = 'Position'
name = 'Peak #1 Position (Scan#: ' + self.readSpec.scan + ')'
self.GraphUtilGaussianFitGraphs(name, x, y, error, xLabel, yLabel, 'G')
def graphTwoPeakWidth1(self):
"""This method graphs the peak one width for two peak.
"""
x = self.getVoltage()
y = self.TwoPkGausFitData[:, 4]
error = self.TwoPkGausFitData[:, 5]
xLabel = 'Voltage'
yLabel = 'Width'
name = 'Peak #1 Width (Scan#: ' + self.readSpec.scan + ')'
self.GraphUtilGaussianFitGraphs(name, x, y, error, xLabel, yLabel, 'G')
def graphTwoPeakAmplitudeXWidth1(self):
"""This method graphs the peak one amplitude x width for two peak.
"""
x = self.getVoltage()
yA = self.TwoPkGausFitData[:, 0]
yW = self.TwoPkGausFitData[:, 4]
a_err = self.TwoPkGausFitData[:, 1]
w_err = self.TwoPkGausFitData[:, 5]
y = yA * yW
error = ((y * a_err) + (y * w_err))/y
xLabel = 'Voltage'
yLabel = 'A x W'
name = 'Peak #1 Amplitude X Width (Scan#: ' + self.readSpec.scan + ')'
self.GraphUtilGaussianFitGraphs(name, x, y, error, xLabel, yLabel, 'G')
def graphTwoPeakAmplitude2(self):
"""This method graphs the peak two Amplitude for two peak.
"""
x = self.getVoltage()
y = self.TwoPkGausFitData[:, 6]
error = self.TwoPkGausFitData[:, 7]
xLabel = 'Voltage'
yLabel = 'Intensity'
name = 'Peak #2 Amplitude (Scan#: ' + self.readSpec.scan + ')'
self.GraphUtilGaussianFitGraphs(
name, x, y, error, xLabel, yLabel, 'G')
def graphTwoPeakPosition2(self):
"""This method graphs the peak two position for two peak.
"""
x = self.getVoltage()
y = self.TwoPkGausFitData[:, 8]
error = self.TwoPkGausFitData[:, 9]
xLabel = 'Voltage'
yLabel = 'Position'
name = 'Peak #2 Position (Scan#: ' + self.readSpec.scan + ')'
self.GraphUtilGaussianFitGraphs(name, x, y, error, xLabel, yLabel, 'G')
def graphTwoPeakWidth2(self):
"""This method graphs the peak two width for two peak.
"""
x = self.getVoltage()
y = self.TwoPkGausFitData[:, 10]
error = self.TwoPkGausFitData[:, 11]
xLabel = 'Voltage'
yLabel = 'Width'
name = 'Peak #2 Width (Scan#: ' + self.readSpec.scan + ')'
self.GraphUtilGaussianFitGraphs(name, x, y, error, xLabel, yLabel, 'G')
def graphTwoPeakAmplitudeXWidth2(self):
"""This method graphs the peak two amplitude x width for the two peak.
"""
x = self.getVoltage()
yA = self.TwoPkGausFitData[:, 6]
yW = self.TwoPkGausFitData[:, 10]
a_err = self.TwoPkGausFitData[:, 7]
w_err = self.TwoPkGausFitData[:, 11]
y = yA * yW
error = ((y * a_err) + (y * w_err)) / y
xLabel = 'Voltage'
yLabel = 'A x W'
name = 'Peak #2 Amplitude X Width (Scan#: ' + self.readSpec.scan + ')'
self.GraphUtilGaussianFitGraphs(name, x, y, error, xLabel, yLabel, 'G')
def getVoltage(self):
"""This method gets the voltage of the bins.
:return: the voltage
"""
try:
x = [] # X array initialized
PVInfo = {}
# Gets the amplitude
inF = open(self.dockedOpt.fileName, 'r')
lines = inF.readlines()
header = ''
for (iL, line) in enumerate(lines):
if line.startswith('#'):
header = line
break
inF.close()
headerParts = header.split('(')
titles = headerParts[1].split(',')
values = headerParts[2].split(',')
for i in range(len(titles)):
if i == (len(titles) - 1):
lastT = titles[i].split(')')
lastV = values[i].split(')')
PVInfo.update({str(lastT[0].strip()): float(lastV[0])})
else:
PVInfo.update({str(titles[i].strip()): float(values[i])})
bins = PVInfo['N_bins']
amp = PVInfo['amplitude']
print ("Amplitude: ", amp)
print ("Bins: ", bins)
# Uses the data to find the x axis
ampStart = 0
rate = (amp/2)/(bins/4)
for j in range(int(bins/4)):
ampStart = ampStart + rate
x.append(ampStart)
for j in range(int(bins/2)):
ampStart = ampStart - rate
x.append(ampStart)
for j in range(int(bins / 4)):
ampStart = ampStart + rate
x.append(ampStart)
return x
except Exception as e:
QMessageBox.warning(self.myMainWindow, "Error", "Unable to detect voltage. Please make sure the PVvalue "
"contains the voltage in the comments.\n\n"
"Exception: " + str(e))
# -----------------------------------------Lattice Fit-------------------------------------------------------------#
def PositionLFit(self, pos, rows):
"""This method calculates the lattice based on the passed paramaters.
:param pos: position of the peak
:param rows: number of total points
"""
l = (1/(((pos/rows)*(self.readSpec.lMax-self.readSpec.lMin)+self.readSpec.lMin)/2))*self.readSpec.lElement
return l
def doLFit(self):
"""This function stores the lattice in arrays depending on the peak.
"""
try:
nRow, nCol = self.dockedOpt.fileInfo()
if self.dockedOpt.onePeakStat == True :
self.LPosData = [] # L Constant for One Peak
for i in range(nCol):
self.LPosData.append(self.PositionLFit(self.OnePkFitData[i, 2], nRow))
elif self.dockedOpt.twoPeakStat == True:
self.LPos1Data = [] # L Constant for Two Peak [#1]
self.LPos2Data = [] # L Constant for Two Peak [#2]
# Position 1
for i in range(nCol):
self.LPos1Data.append(self.PositionLFit(self.TwoPkGausFitData[i, 4], nCol))
# Position 2
for i in range(nCol):
self.LPos2Data.append(self.PositionLFit(self.TwoPkGausFitData[i, 6], nCol))
except:
QMessageBox.warning(self.myMainWindow, "Error", "Please make sure the gaussian fit was done correctly.")
def graphOnePeakLFitPos(self):
"""This method graphs the Lattice fit position for one peak.
"""
x = self.getVoltage()
y = self.LPosData
xLabel = 'Voltage'
yLabel = 'L Constant (\u00c5)'
name = 'Lattice - Position (Scan#: ' + self.readSpec.scan + ')'
self.GraphUtilGaussianFitGraphs(name, x, y, None, xLabel, yLabel, 'L')
def graphTwoPeakLFitPos1(self):
"""This method graphs the peak one Lattice fit position for two peak.
"""
x = self.getVoltage()
y = self.LPos1Data
xLabel = 'Voltage'
yLabel = 'L Constant'
name = 'Lattice - Position #1 (Scan#: ' + self.readSpec.scan + ')'
self.GraphUtilGaussianFitGraphs(name, x, y, None, xLabel, yLabel, 'L')
def graphTwoPeakLFitPos2(self):
"""This method graphs the peak two Lattice fit position for two peak.
"""
x = self.getVoltage()
y = self.LPos2Data
xLabel = 'Voltage'
yLabel = 'L Constant'
name = 'Lattice - Position #2 (Scan#: ' + self.readSpec.scan + ')'
self.GraphUtilGaussianFitGraphs(name, x, y, None, xLabel, yLabel, 'L')
def doLFitPercentChange(self):
"""This function finds the percentage change of the lattice, depending on the peak.
"""
try:
self.LPosPrcChangeData = []
if self.dockedOpt.onePeakStat == True:
for i in range(0, len(self.LPosData)):
pctChangeData = ((self.LPosData[i] - self.LPosData[0]) / self.LPosData[0]) * 100
self.LPosPrcChangeData.append(pctChangeData)
elif self.dockedOpt.twoPeakStat == True:
self.LPos1PrcChangeData = []
self.LPos2PrcChangeData = []
for i in range(0, len(self.LPos1Data)):
pctChangeData = ((self.LPos1Data[i] - self.LPos1Data[0]) / self.LPos1Data[0]) * 100
self.LPos1PrcChangeData.append(pctChangeData)
for i in range(0, len(self.LPos2Data)):
pctChangeData = ((self.LPos2Data[i] - self.LPos2Data[0]) / self.LPos2Data[0]) * 100
self.LPos2PrcChangeData.append(pctChangeData)
except:
QMessageBox.warning(self.myMainWindow, "Error", "Something went wrong while doing the percentage change"
"lattice fit. Make sure the lattice fit was "
"done correctly.")
def percentageChangeLConstantOnePeak(self):
"""This method graphs the lattice %-change for one peak.
"""
x = self.getVoltage()
y = self.LPosPrcChangeData
xLabel = 'Voltage'
yLabel = '%-Change'
name = 'Lattice %-Change (Scan#: ' + self.readSpec.scan + ')'
self.GraphUtilGaussianFitGraphs(name, x, y, None, xLabel, yLabel, 'L')
def percentageChangeLConstantPeakOne(self):
"""This method graphs the peak one lattice %-change for two peak.
"""
x = self.getVoltage()
y = self.LPos1PrcChangeData
xLabel = 'Voltage'
yLabel = '%-Change'
name = 'Lattice %-Change #1 (Scan#: ' + self.readSpec.scan + ')'
self.GraphUtilGaussianFitGraphs(name, x, y, None, xLabel, yLabel, 'L')
def percentageChangeLConstantPeakTwo(self):
"""This method graphs the peak two lattice %-change for two peak.
"""
x = self.getVoltage()
y = self.LPos2PrcChangeData
xLabel = 'Voltage'
yLabel = '%-Change'
name = 'Lattice %-Change #2 (Scan#: ' + self.readSpec.scan + ')'
self.GraphUtilGaussianFitGraphs(name, x, y, None, xLabel, yLabel, 'L')
def EachFitDataReport(self):
try:
if self.dockedOpt.fitStat == True:
selectedFilters = ".txt"
reportFile, reportFileFilter = QFileDialog.getSaveFileName(self.myMainWindow, "Save Report", None, selectedFilters)
if reportFile != "":
reportFile += reportFileFilter
_, nCol = self.dockedOpt.fileInfo()
header = "#Bin "
i = 1
while i <= nCol:
header += str(i)+" "
i += 1
scanNum = self.readSpec.scan
comment = "#C PVvalue #" + scanNum + "\n"
if self.dockedOpt.onePeakStat == True:
np.savetxt(reportFile, self.binFitData, fmt=str('%f'), header=header, comments=comment)
elif self.dockedOpt.twoPeakStat == True:
np.savetxt(reportFile, self.binFitData, fmt=str('%-14.6f'), delimiter=" ", header=header, comments=comment)
except:
QMessageBox.warning(self.myMainWindow, "Error", "Make sure the gaussian fit was done properly, before "
"exporting the report again.")
| [
"xPlotUtil.Source.AlgebraicExpressions.AlgebraicExpress",
"lmfit.models.GaussianModel",
"numpy.concatenate",
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
"matplotlib.backends.backend_qt5.NavigationToolbar2QT",
"lmfit.models.LinearModel"
] | [((1304, 1333), 'xPlotUtil.Source.AlgebraicExpressions.AlgebraicExpress', 'AlgebraicExpress', ([], {'parent': 'self'}), '(parent=self)\n', (1320, 1333), False, 'from xPlotUtil.Source.AlgebraicExpressions import AlgebraicExpress\n'), ((10292, 10309), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (10304, 10309), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n'), ((10859, 10895), 'matplotlib.backends.backend_qt5.NavigationToolbar2QT', 'NavigationToolbar', (['canvas', 'mainGraph'], {}), '(canvas, mainGraph)\n', (10876, 10895), True, 'from matplotlib.backends.backend_qt5 import NavigationToolbar2QT as NavigationToolbar\n'), ((7234, 7251), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (7246, 7251), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n'), ((8249, 8290), 'matplotlib.backends.backend_qt5.NavigationToolbar2QT', 'NavigationToolbar', (['canvas', 'self.mainGraph'], {}), '(canvas, self.mainGraph)\n', (8266, 8290), True, 'from matplotlib.backends.backend_qt5 import NavigationToolbar2QT as NavigationToolbar\n'), ((2700, 2715), 'lmfit.models.GaussianModel', 'GaussianModel', ([], {}), '()\n', (2713, 2715), False, 'from lmfit.models import LorentzianModel, GaussianModel, LinearModel, VoigtModel\n'), ((3333, 3382), 'numpy.concatenate', 'np.concatenate', (['(self.binFitData, binFit)'], {'axis': '(1)'}), '((self.binFitData, binFit), axis=1)\n', (3347, 3382), True, 'import numpy as np\n'), ((5144, 5171), 'lmfit.models.GaussianModel', 'GaussianModel', ([], {'prefix': '"""p1_"""'}), "(prefix='p1_')\n", (5157, 5171), False, 'from lmfit.models import LorentzianModel, GaussianModel, LinearModel, VoigtModel\n'), ((5196, 5223), 'lmfit.models.GaussianModel', 'GaussianModel', ([], {'prefix': '"""p2_"""'}), "(prefix='p2_')\n", (5209, 5223), False, 'from lmfit.models import LorentzianModel, GaussianModel, LinearModel, VoigtModel\n'), ((6263, 6312), 'numpy.concatenate', 'np.concatenate', (['(self.binFitData, binFit)'], {'axis': '(1)'}), '((self.binFitData, binFit), axis=1)\n', (6277, 6312), True, 'import numpy as np\n'), ((2787, 2800), 'lmfit.models.LinearModel', 'LinearModel', ([], {}), '()\n', (2798, 2800), False, 'from lmfit.models import LorentzianModel, GaussianModel, LinearModel, VoigtModel\n'), ((5356, 5369), 'lmfit.models.LinearModel', 'LinearModel', ([], {}), '()\n', (5367, 5369), False, 'from lmfit.models import LorentzianModel, GaussianModel, LinearModel, VoigtModel\n')] |
import os
import numpy as np
import torch
from PIL import Image
from tqdm import trange
from plusseg.data.base_dataset import BaseDataset
class PascalContextSegDataset(BaseDataset):
# BASE_DIR = 'VOCdevkit/VOC2010'
NUM_CLASS = 59
def __init__(self,
img_dir,
ann_file,
mask_file,
split='train',
mode=None,
transform=None,
target_transform=None,
base_size=520,
crop_size=480):
super(PascalContextSegDataset, self).__init__(
split, mode, transform, target_transform, base_size, crop_size)
from detail import Detail
# root = os.path.join(root, self.BASE_DIR)
# ann_file = os.path.join(root, 'trainval_merged.json')
# img_dir = os.path.join(root, 'JPEGImages')
# Trainig mode
self.detail = Detail(ann_file, img_dir, split)
self.transform = transform
self.target_transform = self.target_transform
self.ids = self.detail.getImgs()
# Generated masks
self.label_mapping = np.sort(np.array([
0, 2, 259, 260, 415, 324, 9, 258, 144, 18, 19, 22,
23, 397, 25, 284, 158, 159, 416, 33, 162, 420, 454, 295, 296,
427, 44, 45, 46, 308, 59, 440, 445, 31, 232, 65, 354, 424,
68, 326, 72, 458, 34, 207, 80, 355, 85, 347, 220, 349, 360,
98, 187, 104, 105, 366, 189, 368, 113, 115
]))
self.keys = np.array(range(len(self.label_mapping))).astype('uint8')
# mask_file = os.path.join(root, self.split+'.pth')
print('Pascal Context Dataset, Mask File:{}'.format(mask_file))
if os.path.exists(mask_file):
self.masks = torch.load(mask_file)
else:
self.masks = self.preprocess_mask(mask_file)
def class2index(self, mask):
values = np.unique(mask)
for i in range(len(values)):
assert(values[i] in self.label_mapping)
index = np.digitize(mask.ravel(), self.label_mapping, right=True)
return self.keys[index].reshape(mask.shape)
def preprocess_mask(self, mask_file):
"""Generate mask files for pascal context dataset
Args:
mask_file: (str) file path
"""
masks = {}
tbar = trange(len(self.ids))
print('Preprocess the segmentation masks files for the first time running, this will take a while')
for i in tbar:
img_id = self.ids[i]
mask = Image.fromarray(
self.class2index(
self.detail.getMask(img_id)
)
)
masks[img_id['image_id']] = mask
tbar.set_description("Preprocess {}".format(img_id['image_id']))
torch.save(masks, mask_file)
return masks
def __getitem__(self, index):
img_id = self.ids[index]
path = img_id['file_name']
iid = img_id['image_id']
img = Image.open(os.path.join(self.detail.img_folder, path)).convert('RGB')
if self.mode == 'test':
if self.transform is not None:
img = self.transform(img)
return img, os.path.basename(path)
# Convert the mask to 60 categories
mask = self.masks[iid]
# synchrosized transform
if self.mode == 'train':
img, mask = self.sync_transform(img, mask)
elif self.mode == 'val':
img, mask = self.val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
mask = self.mask_transform(mask)
# General Resize, Normalize and ToTensor
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
mask = self.target_transform(mask)
return img, mask
def mask_transform(self, mask):
target = np.array(mask).astype('int32') - 1
return torch.from_numpy(target).long()
def __len__(self):
return len(self.ids)
@property
def pred_offset(self):
return 1 | [
"os.path.exists",
"numpy.unique",
"torch.load",
"os.path.join",
"torch.from_numpy",
"numpy.array",
"os.path.basename",
"torch.save",
"detail.Detail"
] | [((855, 887), 'detail.Detail', 'Detail', (['ann_file', 'img_dir', 'split'], {}), '(ann_file, img_dir, split)\n', (861, 887), False, 'from detail import Detail\n'), ((1665, 1690), 'os.path.exists', 'os.path.exists', (['mask_file'], {}), '(mask_file)\n', (1679, 1690), False, 'import os\n'), ((1865, 1880), 'numpy.unique', 'np.unique', (['mask'], {}), '(mask)\n', (1874, 1880), True, 'import numpy as np\n'), ((2829, 2857), 'torch.save', 'torch.save', (['masks', 'mask_file'], {}), '(masks, mask_file)\n', (2839, 2857), False, 'import torch\n'), ((1082, 1382), 'numpy.array', 'np.array', (['[0, 2, 259, 260, 415, 324, 9, 258, 144, 18, 19, 22, 23, 397, 25, 284, 158, \n 159, 416, 33, 162, 420, 454, 295, 296, 427, 44, 45, 46, 308, 59, 440, \n 445, 31, 232, 65, 354, 424, 68, 326, 72, 458, 34, 207, 80, 355, 85, 347,\n 220, 349, 360, 98, 187, 104, 105, 366, 189, 368, 113, 115]'], {}), '([0, 2, 259, 260, 415, 324, 9, 258, 144, 18, 19, 22, 23, 397, 25, \n 284, 158, 159, 416, 33, 162, 420, 454, 295, 296, 427, 44, 45, 46, 308, \n 59, 440, 445, 31, 232, 65, 354, 424, 68, 326, 72, 458, 34, 207, 80, 355,\n 85, 347, 220, 349, 360, 98, 187, 104, 105, 366, 189, 368, 113, 115])\n', (1090, 1382), True, 'import numpy as np\n'), ((1717, 1738), 'torch.load', 'torch.load', (['mask_file'], {}), '(mask_file)\n', (1727, 1738), False, 'import torch\n'), ((3241, 3263), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (3257, 3263), False, 'import os\n'), ((4020, 4044), 'torch.from_numpy', 'torch.from_numpy', (['target'], {}), '(target)\n', (4036, 4044), False, 'import torch\n'), ((3040, 3082), 'os.path.join', 'os.path.join', (['self.detail.img_folder', 'path'], {}), '(self.detail.img_folder, path)\n', (3052, 3082), False, 'import os\n'), ((3970, 3984), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (3978, 3984), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import sklearn.datasets as skdata
import numpy as np
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
import sklearn
numeros = skdata.load_digits()
target = numeros['target']
imagenes = numeros['images']
n_imagenes = len(target)
data = imagenes.reshape((n_imagenes, -1))
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
scaler = StandardScaler()
x_train, x_test, y_train, y_test = train_test_split(data, target, train_size=0.5)
y_train[y_train!=1] = 0
y_test[y_test!=1]=0
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
numero = 1
dd = y_train==numero
cov = np.cov(x_train[dd].T)
valores, vectores = np.linalg.eig(cov)
valores = np.real(valores)
vectores = np.real(vectores)
ii = np.argsort(-valores)
valores = valores[ii]
vectores = vectores[:,ii]
clf = LinearDiscriminantAnalysis()
proyeccion_train = np.dot(x_train,vectores)
proyeccion_test = np.dot(x_test,vectores)
clf.fit(proyeccion_train[:,:10], y_train.T)
probabilidades = clf.predict_proba(proyeccion_test[:,:10])
precision1, recall1, treshold1 = sklearn.metrics.precision_recall_curve(y_test, probabilidades[:,1])
f1_score1 = 2*precision1*recall1/(precision1+recall1)
cov = np.cov(x_train.T)
valores, vectores = np.linalg.eig(cov)
valores = np.real(valores)
vectores = np.real(vectores)
ii = np.argsort(-valores)
valores = valores[ii]
vectores = vectores[:,ii]
clf = LinearDiscriminantAnalysis()
proyeccion_train = np.dot(x_train,vectores)
proyeccion_test = np.dot(x_test,vectores)
clf.fit(proyeccion_train[:,:10], y_train.T)
probabilidades_todos = clf.predict_proba(proyeccion_test[:,:10])
precision_todos, recall_todos, treshold_todos = sklearn.metrics.precision_recall_curve(y_test, probabilidades_todos[:,1])
f1_score_todos = 2*precision_todos*recall_todos/(precision_todos+recall_todos)
numero = 0
dd = y_train==numero
cov = np.cov(x_train[dd].T)
valores, vectores = np.linalg.eig(cov)
valores = np.real(valores)
vectores = np.real(vectores)
ii = np.argsort(-valores)
valores = valores[ii]
vectores = vectores[:,ii]
clf = LinearDiscriminantAnalysis()
proyeccion_train = np.dot(x_train,vectores)
proyeccion_test = np.dot(x_test,vectores)
clf.fit(proyeccion_train[:,:10], y_train.T)
probabilidades = clf.predict_proba(proyeccion_test[:,:10])
precision0, recall0, treshold0 = sklearn.metrics.precision_recall_curve(y_test, probabilidades[:,1])
f1_score0 = 2*precision0*recall0/(precision0+recall0)
plt.figure(figsize = (10,5))
plt.subplot(1,2,1)
plt.plot(treshold1,f1_score1[:-1], label = 'Solo 1')
indice = np.where(f1_score1[:-1] == np.max(f1_score1[:-1]))
print(indice)
plt.scatter(treshold1[indice], f1_score1[:-1][indice], color = 'r')
plt.legend()
plt.xlabel('Probabilidad')
plt.ylabel('F1')
plt.subplot(1,2,2)
plt.plot(recall1,precision1, label = 'solo1')
plt.legend()
plt.scatter(recall1[indice], precision1[indice], color = 'r')
plt.xlabel('Recall')
plt.ylabel('Precisión')
plt.subplot(1,2,1)
plt.plot(treshold_todos,f1_score_todos[:-1], label = 'Todos')
plt.legend()
indice = np.where(f1_score_todos[:-1] == np.max(f1_score_todos[:-1]))
print(indice)
plt.scatter(treshold_todos[indice], f1_score_todos[:-1][indice], color = 'r')
plt.xlabel('Probabilidad')
plt.ylabel('F1')
plt.subplot(1,2,2)
plt.plot(recall_todos,precision_todos, label = 'Todos')
plt.scatter(recall_todos[indice], precision_todos[indice], color = 'r')
plt.xlabel('Recall')
plt.ylabel('Precisión')
plt.legend()
plt.subplot(1,2,1)
plt.plot(treshold0,f1_score0[:-1], label = 'Solo 0')
plt.legend()
indice = np.where(f1_score0[:-1] == np.max(f1_score0[:-1]))
plt.scatter(treshold0[indice], f1_score0[:-1][indice], color = 'r')
print(indice)
plt.xlabel('Probabilidad')
plt.ylabel('F1')
plt.subplot(1,2,2)
plt.plot(recall0,precision0, label = 'Solo 0')
plt.scatter(recall0[indice], precision0[indice], color = 'r')
plt.xlabel('Recall')
plt.ylabel('Precisión')
plt.legend()
plt.savefig('F1_prec_recall.png') | [
"matplotlib.pyplot.ylabel",
"numpy.argsort",
"numpy.cov",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.real",
"numpy.dot",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.savefig",
"numpy.linalg.eig",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.p... | [((180, 200), 'sklearn.datasets.load_digits', 'skdata.load_digits', ([], {}), '()\n', (198, 200), True, 'import sklearn.datasets as skdata\n'), ((440, 456), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (454, 456), False, 'from sklearn.preprocessing import StandardScaler\n'), ((492, 538), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'target'], {'train_size': '(0.5)'}), '(data, target, train_size=0.5)\n', (508, 538), False, 'from sklearn.model_selection import train_test_split\n'), ((697, 718), 'numpy.cov', 'np.cov', (['x_train[dd].T'], {}), '(x_train[dd].T)\n', (703, 718), True, 'import numpy as np\n'), ((739, 757), 'numpy.linalg.eig', 'np.linalg.eig', (['cov'], {}), '(cov)\n', (752, 757), True, 'import numpy as np\n'), ((768, 784), 'numpy.real', 'np.real', (['valores'], {}), '(valores)\n', (775, 784), True, 'import numpy as np\n'), ((796, 813), 'numpy.real', 'np.real', (['vectores'], {}), '(vectores)\n', (803, 813), True, 'import numpy as np\n'), ((819, 839), 'numpy.argsort', 'np.argsort', (['(-valores)'], {}), '(-valores)\n', (829, 839), True, 'import numpy as np\n'), ((895, 923), 'sklearn.discriminant_analysis.LinearDiscriminantAnalysis', 'LinearDiscriminantAnalysis', ([], {}), '()\n', (921, 923), False, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n'), ((945, 970), 'numpy.dot', 'np.dot', (['x_train', 'vectores'], {}), '(x_train, vectores)\n', (951, 970), True, 'import numpy as np\n'), ((988, 1012), 'numpy.dot', 'np.dot', (['x_test', 'vectores'], {}), '(x_test, vectores)\n', (994, 1012), True, 'import numpy as np\n'), ((1154, 1222), 'sklearn.metrics.precision_recall_curve', 'sklearn.metrics.precision_recall_curve', (['y_test', 'probabilidades[:, 1]'], {}), '(y_test, probabilidades[:, 1])\n', (1192, 1222), False, 'import sklearn\n'), ((1284, 1301), 'numpy.cov', 'np.cov', (['x_train.T'], {}), '(x_train.T)\n', (1290, 1301), True, 'import numpy as np\n'), ((1322, 1340), 'numpy.linalg.eig', 'np.linalg.eig', (['cov'], {}), '(cov)\n', (1335, 1340), True, 'import numpy as np\n'), ((1351, 1367), 'numpy.real', 'np.real', (['valores'], {}), '(valores)\n', (1358, 1367), True, 'import numpy as np\n'), ((1379, 1396), 'numpy.real', 'np.real', (['vectores'], {}), '(vectores)\n', (1386, 1396), True, 'import numpy as np\n'), ((1402, 1422), 'numpy.argsort', 'np.argsort', (['(-valores)'], {}), '(-valores)\n', (1412, 1422), True, 'import numpy as np\n'), ((1478, 1506), 'sklearn.discriminant_analysis.LinearDiscriminantAnalysis', 'LinearDiscriminantAnalysis', ([], {}), '()\n', (1504, 1506), False, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n'), ((1528, 1553), 'numpy.dot', 'np.dot', (['x_train', 'vectores'], {}), '(x_train, vectores)\n', (1534, 1553), True, 'import numpy as np\n'), ((1571, 1595), 'numpy.dot', 'np.dot', (['x_test', 'vectores'], {}), '(x_test, vectores)\n', (1577, 1595), True, 'import numpy as np\n'), ((1758, 1832), 'sklearn.metrics.precision_recall_curve', 'sklearn.metrics.precision_recall_curve', (['y_test', 'probabilidades_todos[:, 1]'], {}), '(y_test, probabilidades_todos[:, 1])\n', (1796, 1832), False, 'import sklearn\n'), ((1960, 1981), 'numpy.cov', 'np.cov', (['x_train[dd].T'], {}), '(x_train[dd].T)\n', (1966, 1981), True, 'import numpy as np\n'), ((2002, 2020), 'numpy.linalg.eig', 'np.linalg.eig', (['cov'], {}), '(cov)\n', (2015, 2020), True, 'import numpy as np\n'), ((2031, 2047), 'numpy.real', 'np.real', (['valores'], {}), '(valores)\n', (2038, 2047), True, 'import numpy as np\n'), ((2059, 2076), 'numpy.real', 'np.real', (['vectores'], {}), '(vectores)\n', (2066, 2076), True, 'import numpy as np\n'), ((2082, 2102), 'numpy.argsort', 'np.argsort', (['(-valores)'], {}), '(-valores)\n', (2092, 2102), True, 'import numpy as np\n'), ((2158, 2186), 'sklearn.discriminant_analysis.LinearDiscriminantAnalysis', 'LinearDiscriminantAnalysis', ([], {}), '()\n', (2184, 2186), False, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n'), ((2208, 2233), 'numpy.dot', 'np.dot', (['x_train', 'vectores'], {}), '(x_train, vectores)\n', (2214, 2233), True, 'import numpy as np\n'), ((2251, 2275), 'numpy.dot', 'np.dot', (['x_test', 'vectores'], {}), '(x_test, vectores)\n', (2257, 2275), True, 'import numpy as np\n'), ((2417, 2485), 'sklearn.metrics.precision_recall_curve', 'sklearn.metrics.precision_recall_curve', (['y_test', 'probabilidades[:, 1]'], {}), '(y_test, probabilidades[:, 1])\n', (2455, 2485), False, 'import sklearn\n'), ((2546, 2573), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (2556, 2573), True, 'import matplotlib.pyplot as plt\n'), ((2575, 2595), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (2586, 2595), True, 'import matplotlib.pyplot as plt\n'), ((2594, 2645), 'matplotlib.pyplot.plot', 'plt.plot', (['treshold1', 'f1_score1[:-1]'], {'label': '"""Solo 1"""'}), "(treshold1, f1_score1[:-1], label='Solo 1')\n", (2602, 2645), True, 'import matplotlib.pyplot as plt\n'), ((2721, 2786), 'matplotlib.pyplot.scatter', 'plt.scatter', (['treshold1[indice]', 'f1_score1[:-1][indice]'], {'color': '"""r"""'}), "(treshold1[indice], f1_score1[:-1][indice], color='r')\n", (2732, 2786), True, 'import matplotlib.pyplot as plt\n'), ((2789, 2801), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2799, 2801), True, 'import matplotlib.pyplot as plt\n'), ((2802, 2828), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Probabilidad"""'], {}), "('Probabilidad')\n", (2812, 2828), True, 'import matplotlib.pyplot as plt\n'), ((2829, 2845), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""F1"""'], {}), "('F1')\n", (2839, 2845), True, 'import matplotlib.pyplot as plt\n'), ((2846, 2866), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (2857, 2866), True, 'import matplotlib.pyplot as plt\n'), ((2865, 2909), 'matplotlib.pyplot.plot', 'plt.plot', (['recall1', 'precision1'], {'label': '"""solo1"""'}), "(recall1, precision1, label='solo1')\n", (2873, 2909), True, 'import matplotlib.pyplot as plt\n'), ((2911, 2923), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2921, 2923), True, 'import matplotlib.pyplot as plt\n'), ((2924, 2983), 'matplotlib.pyplot.scatter', 'plt.scatter', (['recall1[indice]', 'precision1[indice]'], {'color': '"""r"""'}), "(recall1[indice], precision1[indice], color='r')\n", (2935, 2983), True, 'import matplotlib.pyplot as plt\n'), ((2986, 3006), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (2996, 3006), True, 'import matplotlib.pyplot as plt\n'), ((3007, 3030), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precisión"""'], {}), "('Precisión')\n", (3017, 3030), True, 'import matplotlib.pyplot as plt\n'), ((3033, 3053), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (3044, 3053), True, 'import matplotlib.pyplot as plt\n'), ((3052, 3112), 'matplotlib.pyplot.plot', 'plt.plot', (['treshold_todos', 'f1_score_todos[:-1]'], {'label': '"""Todos"""'}), "(treshold_todos, f1_score_todos[:-1], label='Todos')\n", (3060, 3112), True, 'import matplotlib.pyplot as plt\n'), ((3114, 3126), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3124, 3126), True, 'import matplotlib.pyplot as plt\n'), ((3211, 3286), 'matplotlib.pyplot.scatter', 'plt.scatter', (['treshold_todos[indice]', 'f1_score_todos[:-1][indice]'], {'color': '"""r"""'}), "(treshold_todos[indice], f1_score_todos[:-1][indice], color='r')\n", (3222, 3286), True, 'import matplotlib.pyplot as plt\n'), ((3289, 3315), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Probabilidad"""'], {}), "('Probabilidad')\n", (3299, 3315), True, 'import matplotlib.pyplot as plt\n'), ((3316, 3332), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""F1"""'], {}), "('F1')\n", (3326, 3332), True, 'import matplotlib.pyplot as plt\n'), ((3333, 3353), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (3344, 3353), True, 'import matplotlib.pyplot as plt\n'), ((3352, 3406), 'matplotlib.pyplot.plot', 'plt.plot', (['recall_todos', 'precision_todos'], {'label': '"""Todos"""'}), "(recall_todos, precision_todos, label='Todos')\n", (3360, 3406), True, 'import matplotlib.pyplot as plt\n'), ((3408, 3477), 'matplotlib.pyplot.scatter', 'plt.scatter', (['recall_todos[indice]', 'precision_todos[indice]'], {'color': '"""r"""'}), "(recall_todos[indice], precision_todos[indice], color='r')\n", (3419, 3477), True, 'import matplotlib.pyplot as plt\n'), ((3480, 3500), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (3490, 3500), True, 'import matplotlib.pyplot as plt\n'), ((3501, 3524), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precisión"""'], {}), "('Precisión')\n", (3511, 3524), True, 'import matplotlib.pyplot as plt\n'), ((3525, 3537), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3535, 3537), True, 'import matplotlib.pyplot as plt\n'), ((3540, 3560), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (3551, 3560), True, 'import matplotlib.pyplot as plt\n'), ((3559, 3610), 'matplotlib.pyplot.plot', 'plt.plot', (['treshold0', 'f1_score0[:-1]'], {'label': '"""Solo 0"""'}), "(treshold0, f1_score0[:-1], label='Solo 0')\n", (3567, 3610), True, 'import matplotlib.pyplot as plt\n'), ((3612, 3624), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3622, 3624), True, 'import matplotlib.pyplot as plt\n'), ((3686, 3751), 'matplotlib.pyplot.scatter', 'plt.scatter', (['treshold0[indice]', 'f1_score0[:-1][indice]'], {'color': '"""r"""'}), "(treshold0[indice], f1_score0[:-1][indice], color='r')\n", (3697, 3751), True, 'import matplotlib.pyplot as plt\n'), ((3768, 3794), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Probabilidad"""'], {}), "('Probabilidad')\n", (3778, 3794), True, 'import matplotlib.pyplot as plt\n'), ((3795, 3811), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""F1"""'], {}), "('F1')\n", (3805, 3811), True, 'import matplotlib.pyplot as plt\n'), ((3812, 3832), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (3823, 3832), True, 'import matplotlib.pyplot as plt\n'), ((3831, 3876), 'matplotlib.pyplot.plot', 'plt.plot', (['recall0', 'precision0'], {'label': '"""Solo 0"""'}), "(recall0, precision0, label='Solo 0')\n", (3839, 3876), True, 'import matplotlib.pyplot as plt\n'), ((3878, 3937), 'matplotlib.pyplot.scatter', 'plt.scatter', (['recall0[indice]', 'precision0[indice]'], {'color': '"""r"""'}), "(recall0[indice], precision0[indice], color='r')\n", (3889, 3937), True, 'import matplotlib.pyplot as plt\n'), ((3940, 3960), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (3950, 3960), True, 'import matplotlib.pyplot as plt\n'), ((3961, 3984), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precisión"""'], {}), "('Precisión')\n", (3971, 3984), True, 'import matplotlib.pyplot as plt\n'), ((3985, 3997), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3995, 3997), True, 'import matplotlib.pyplot as plt\n'), ((4001, 4034), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""F1_prec_recall.png"""'], {}), "('F1_prec_recall.png')\n", (4012, 4034), True, 'import matplotlib.pyplot as plt\n'), ((2683, 2705), 'numpy.max', 'np.max', (['f1_score1[:-1]'], {}), '(f1_score1[:-1])\n', (2689, 2705), True, 'import numpy as np\n'), ((3168, 3195), 'numpy.max', 'np.max', (['f1_score_todos[:-1]'], {}), '(f1_score_todos[:-1])\n', (3174, 3195), True, 'import numpy as np\n'), ((3662, 3684), 'numpy.max', 'np.max', (['f1_score0[:-1]'], {}), '(f1_score0[:-1])\n', (3668, 3684), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import math
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
def mean_absolute_percentage_error(y_true,y_pred):
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def PPTS(y_true,y_pred,gamma):
"""
Compute peak percentage threshold statistic
args:
y_true:the observed records
y_pred:the predictions
gamma:lower value percentage
"""
# y_true
r = pd.DataFrame(y_true,columns=['r'])
# print('original time series:\n{}'.format(r))
# predictions
p = pd.DataFrame(y_pred,columns=['p'])
# print('predicted time series:\n{}'.format(p))
# The number of samples
N = r['r'].size
print('series size={}'.format(N))
# The number of top data
G = round((gamma/100)*N)
rp = pd.concat([r,p],axis=1)
rps=rp.sort_values(by=['r'],ascending=False)
rps_g=rps.iloc[:G]
y_true = (rps_g['r']).values
y_pred = (rps_g['p']).values
abss=np.abs((y_true-y_pred)/y_true*100)
print('abs error={}'.format(abss))
sums = np.sum(abss)
print('sum of abs error={}'.format(abss))
ppts = sums*(1/((gamma/100)*N))
print('ppts('+str(gamma)+'%)={}'.format(ppts))
return ppts
def r2_score(y_true,y_pred):
y_true_avg=sum(y_true)/len(y_true)
y_pred_avg=sum(y_pred)/len(y_pred)
r2=sum((y_true-y_true_avg)*(y_pred-y_pred_avg))/math.sqrt(sum((y_true-y_true_avg)**2)*sum((y_pred-y_pred_avg)**2))
return r2
if __name__ == '__main__':
data=pd.read_csv(root_path+"/Huaxian_vmd/projects/esvr/multi_step_1_month_forecast/esvr_Huaxian_vmd_sum_test_result.csv")
print(data)
y_true = data['orig']
y_pred = data['pred']
r2 = r2_score(y_true=y_true,y_pred=y_pred)
print(r2)
# data = pd.read_excel(root_path+'\\e-svr-models\\gbr_pred_e_svr.xlsx')
# # test_data_size=4325
# y_test = data['y_train'][1:test_data_size+1]
# test_predictions=data['train_pred'][1:test_data_size+1]
# # print(y_test)
# ppts = PPTS(y_test.values,test_predictions.values,5)
# # print(ppts)
# data = pd.read_excel(root_path+'\\bpnn-models\\gbr_pred_bpnn.xlsx')
# # test_data_size=4325
# y_test = data['y_train'][1:test_data_size+1]
# test_predictions=data['train_pred'][1:test_data_size+1]
# # print(y_test)
# ppts = PPTS(y_test.values,test_predictions.values,5)
# # print(ppts)
# print('='*100)
# data = pd.read_csv(root_path+'\\lstm-models\\lstm_ensemble_test_result.csv')
# # test_data_size=4325
# y_test = data['orig']
# test_predictions=data['pred']
# # print(y_test)
# ppts5 = PPTS(y_test.values,test_predictions.values,5)
# # print('ppts5={}'.format(ppts5))
# ppts15 = PPTS(y_test.values,test_predictions.values,15)
# # print('ppts15={}'.format(ppts15))
# ppts20 = PPTS(y_test.values,test_predictions.values,20)
# # print('ppts20={}'.format(ppts20))
# ppts25 = PPTS(y_test.values,test_predictions.values,25)
# # print('ppts25={}'.format(ppts25))
| [
"numpy.abs",
"pandas.DataFrame",
"pandas.read_csv",
"numpy.sum",
"os.path.abspath",
"pandas.concat"
] | [((89, 116), 'os.path.abspath', 'os.path.abspath', (['"""__file__"""'], {}), "('__file__')\n", (104, 116), False, 'import os\n'), ((464, 499), 'pandas.DataFrame', 'pd.DataFrame', (['y_true'], {'columns': "['r']"}), "(y_true, columns=['r'])\n", (476, 499), True, 'import pandas as pd\n'), ((576, 611), 'pandas.DataFrame', 'pd.DataFrame', (['y_pred'], {'columns': "['p']"}), "(y_pred, columns=['p'])\n", (588, 611), True, 'import pandas as pd\n'), ((816, 841), 'pandas.concat', 'pd.concat', (['[r, p]'], {'axis': '(1)'}), '([r, p], axis=1)\n', (825, 841), True, 'import pandas as pd\n'), ((987, 1027), 'numpy.abs', 'np.abs', (['((y_true - y_pred) / y_true * 100)'], {}), '((y_true - y_pred) / y_true * 100)\n', (993, 1027), True, 'import numpy as np\n'), ((1072, 1084), 'numpy.sum', 'np.sum', (['abss'], {}), '(abss)\n', (1078, 1084), True, 'import numpy as np\n'), ((1512, 1639), 'pandas.read_csv', 'pd.read_csv', (["(root_path +\n '/Huaxian_vmd/projects/esvr/multi_step_1_month_forecast/esvr_Huaxian_vmd_sum_test_result.csv'\n )"], {}), "(root_path +\n '/Huaxian_vmd/projects/esvr/multi_step_1_month_forecast/esvr_Huaxian_vmd_sum_test_result.csv'\n )\n", (1523, 1639), True, 'import pandas as pd\n'), ((189, 223), 'numpy.abs', 'np.abs', (['((y_true - y_pred) / y_true)'], {}), '((y_true - y_pred) / y_true)\n', (195, 223), True, 'import numpy as np\n')] |
"""
given set of observations, fit irt model and write outputs to disk
subject: train_noise
item: imageID
y: response
"""
import argparse
import csv
import numpy as np
import pyro
import torch
import torch.nn as nn
from py_irt.models.one_param_logistic import OneParamLog
from py_irt.models.two_param_logistic import TwoParamLog
from scipy.special import expit
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--num-epochs", default=1000, type=int)
parser.add_argument("--gpu", action="store_true")
parser.add_argument("-v", "--verbose", action="store_true")
args = parser.parse_args()
device = torch.device("cpu")
if args.gpu:
device = torch.device("cuda")
# 1. load data from file
# 2. combine into obs 3-tuples
models = []
items = []
responses = []
num_subjects = 2000
num_items = 100
real_theta = np.random.normal(size=[num_subjects])
real_diff = np.random.normal(size=[num_items])
real_slope = np.random.normal(size=[num_items])
print(real_slope)
obs = []
for i in range(len(real_theta)):
for j in range(len(real_diff)):
y = np.random.binomial(1, expit(real_slope[j] * (real_theta[i] - real_diff[j])))
models.append(i)
items.append(j)
responses.append(y)
with open("test_data_params.csv", "w") as outfile:
D = np.zeros((len(real_theta), len(real_diff)))
for i in range(len(responses)):
model = models[i]
item = items[i]
response = responses[i]
D[model, item] = response
np.savetxt(outfile, D, delimiter=",", fmt="%.1f")
with open("test_data_params.knowndiffs.csv", "w") as outfile:
np.savetxt(outfile, real_diff, delimiter="\n", fmt="%.5f")
with open("test_data_params.knownthetas.csv", "w") as outfile:
np.savetxt(outfile, real_theta, delimiter="\n", fmt="%.5f")
with open("test_data_params.knownslopes.csv", "w") as outfile:
np.savetxt(outfile, real_slope, delimiter="\n", fmt="%.5f")
num_subjects = len(set(models))
num_items = len(set(items))
models = torch.tensor(models, dtype=torch.long, device=device)
items = torch.tensor(items, dtype=torch.long, device=device)
responses = torch.tensor(responses, dtype=torch.float, device=device)
# 3. define model and guide accordingly
m1v = OneParamLog("vague", device, num_items, num_subjects, args.verbose)
m1h = OneParamLog("hierarchical", device, num_items, num_subjects, args.verbose)
m2v = TwoParamLog("vague", device, num_items, num_subjects, args.verbose)
m2h = TwoParamLog("hierarchical", device, num_items, num_subjects, args.verbose)
for m in [m1v, m2v, m1h, m2h]:
# 4. fit irt model with svi, trace-elbo loss
m.fit(models, items, responses, args.num_epochs)
# 5. once model is fit, write outputs (diffs and thetas) to disk,
# retaining original modelIDs and itemIDs so we can use them
for name in pyro.get_param_store().get_all_param_names():
if name not in ["loc_diff", "loc_ability", "loc_slope"]:
continue
print(name)
val = pyro.param(name).data.numpy()
if args.verbose:
print(val)
if name == "loc_diff":
print("rmse: {}".format(np.sqrt(np.mean((val - real_diff) ** 2))))
elif name == "loc_ability":
print("rmse: {}".format(np.sqrt(np.mean((val - real_theta) ** 2))))
elif name == "loc_slope":
print("rmse: {}".format(np.sqrt(np.mean((val - real_slope) ** 2))))
if args.verbose:
print(val)
| [
"numpy.random.normal",
"numpy.mean",
"py_irt.models.two_param_logistic.TwoParamLog",
"argparse.ArgumentParser",
"pyro.get_param_store",
"pyro.param",
"scipy.special.expit",
"torch.tensor",
"numpy.savetxt",
"py_irt.models.one_param_logistic.OneParamLog",
"torch.device"
] | [((375, 400), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (398, 400), False, 'import argparse\n'), ((614, 633), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (626, 633), False, 'import torch\n'), ((828, 865), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[num_subjects]'}), '(size=[num_subjects])\n', (844, 865), True, 'import numpy as np\n'), ((878, 912), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[num_items]'}), '(size=[num_items])\n', (894, 912), True, 'import numpy as np\n'), ((926, 960), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[num_items]'}), '(size=[num_items])\n', (942, 960), True, 'import numpy as np\n'), ((1986, 2039), 'torch.tensor', 'torch.tensor', (['models'], {'dtype': 'torch.long', 'device': 'device'}), '(models, dtype=torch.long, device=device)\n', (1998, 2039), False, 'import torch\n'), ((2048, 2100), 'torch.tensor', 'torch.tensor', (['items'], {'dtype': 'torch.long', 'device': 'device'}), '(items, dtype=torch.long, device=device)\n', (2060, 2100), False, 'import torch\n'), ((2113, 2170), 'torch.tensor', 'torch.tensor', (['responses'], {'dtype': 'torch.float', 'device': 'device'}), '(responses, dtype=torch.float, device=device)\n', (2125, 2170), False, 'import torch\n'), ((2219, 2286), 'py_irt.models.one_param_logistic.OneParamLog', 'OneParamLog', (['"""vague"""', 'device', 'num_items', 'num_subjects', 'args.verbose'], {}), "('vague', device, num_items, num_subjects, args.verbose)\n", (2230, 2286), False, 'from py_irt.models.one_param_logistic import OneParamLog\n'), ((2293, 2367), 'py_irt.models.one_param_logistic.OneParamLog', 'OneParamLog', (['"""hierarchical"""', 'device', 'num_items', 'num_subjects', 'args.verbose'], {}), "('hierarchical', device, num_items, num_subjects, args.verbose)\n", (2304, 2367), False, 'from py_irt.models.one_param_logistic import OneParamLog\n'), ((2374, 2441), 'py_irt.models.two_param_logistic.TwoParamLog', 'TwoParamLog', (['"""vague"""', 'device', 'num_items', 'num_subjects', 'args.verbose'], {}), "('vague', device, num_items, num_subjects, args.verbose)\n", (2385, 2441), False, 'from py_irt.models.two_param_logistic import TwoParamLog\n'), ((2448, 2522), 'py_irt.models.two_param_logistic.TwoParamLog', 'TwoParamLog', (['"""hierarchical"""', 'device', 'num_items', 'num_subjects', 'args.verbose'], {}), "('hierarchical', device, num_items, num_subjects, args.verbose)\n", (2459, 2522), False, 'from py_irt.models.two_param_logistic import TwoParamLog\n'), ((660, 680), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (672, 680), False, 'import torch\n'), ((1484, 1533), 'numpy.savetxt', 'np.savetxt', (['outfile', 'D'], {'delimiter': '""","""', 'fmt': '"""%.1f"""'}), "(outfile, D, delimiter=',', fmt='%.1f')\n", (1494, 1533), True, 'import numpy as np\n'), ((1601, 1659), 'numpy.savetxt', 'np.savetxt', (['outfile', 'real_diff'], {'delimiter': '"""\n"""', 'fmt': '"""%.5f"""'}), "(outfile, real_diff, delimiter='\\n', fmt='%.5f')\n", (1611, 1659), True, 'import numpy as np\n'), ((1727, 1786), 'numpy.savetxt', 'np.savetxt', (['outfile', 'real_theta'], {'delimiter': '"""\n"""', 'fmt': '"""%.5f"""'}), "(outfile, real_theta, delimiter='\\n', fmt='%.5f')\n", (1737, 1786), True, 'import numpy as np\n'), ((1854, 1913), 'numpy.savetxt', 'np.savetxt', (['outfile', 'real_slope'], {'delimiter': '"""\n"""', 'fmt': '"""%.5f"""'}), "(outfile, real_slope, delimiter='\\n', fmt='%.5f')\n", (1864, 1913), True, 'import numpy as np\n'), ((1092, 1145), 'scipy.special.expit', 'expit', (['(real_slope[j] * (real_theta[i] - real_diff[j]))'], {}), '(real_slope[j] * (real_theta[i] - real_diff[j]))\n', (1097, 1145), False, 'from scipy.special import expit\n'), ((2816, 2838), 'pyro.get_param_store', 'pyro.get_param_store', ([], {}), '()\n', (2836, 2838), False, 'import pyro\n'), ((2982, 2998), 'pyro.param', 'pyro.param', (['name'], {}), '(name)\n', (2992, 2998), False, 'import pyro\n'), ((3135, 3166), 'numpy.mean', 'np.mean', (['((val - real_diff) ** 2)'], {}), '((val - real_diff) ** 2)\n', (3142, 3166), True, 'import numpy as np\n'), ((3250, 3282), 'numpy.mean', 'np.mean', (['((val - real_theta) ** 2)'], {}), '((val - real_theta) ** 2)\n', (3257, 3282), True, 'import numpy as np\n'), ((3364, 3396), 'numpy.mean', 'np.mean', (['((val - real_slope) ** 2)'], {}), '((val - real_slope) ** 2)\n', (3371, 3396), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import itertools
from typing import Callable, Sequence, Tuple
import numpy as np
import torch
import torch.nn as nn
from torch.nn.utils.clip_grad import clip_grad_norm_
from torch.optim import Optimizer
from torch.utils.data import DataLoader, TensorDataset, SubsetRandomSampler
from molecular_cross_validation.train.cosine_scheduler import CosineWithRestarts
Transform = Callable[[torch.Tensor], torch.Tensor]
def split_dataset(
*xs: torch.Tensor, batch_size: int, indices: np.ndarray = None, n_train: int = None
):
if indices is None:
indices = np.random.permutation(xs[0].shape[0])
if n_train is None:
n_train = int(0.875 * xs[0].shape[0])
ds = TensorDataset(*xs)
training_dl = DataLoader(
dataset=ds,
batch_size=batch_size,
sampler=SubsetRandomSampler(indices[:n_train]),
)
validation_dl = DataLoader(
dataset=ds,
batch_size=batch_size,
sampler=SubsetRandomSampler(indices[n_train:]),
)
return training_dl, validation_dl
def train_epoch(
model: nn.Module,
criterion: nn.Module,
optim: Optimizer,
data_loader: DataLoader,
input_t: Transform,
clip_norm: float = None,
):
"""Iterate through training data, compute losses and take gradient steps
:param model: a torch Module that can take input data and return the prediction
:param criterion: a loss function
:param optim: a torch Optimizer
:param data_loader: training dataset. Should produce a tuple of tensors: the first
is used as input and the last is the target. If the tuple has
only one element then it's used for both
:param input_t: Transformation to apply to the input
:param clip_norm: clip gradient norm to a given absolute value
:return: total loss for the epoch, averaged over the number of batches
"""
total_epoch_loss = 0.0
for data in data_loader:
y = model(input_t(data[0]))
loss = criterion(y, data[0])
total_epoch_loss += loss.data.item()
optim.zero_grad()
loss.backward()
if clip_norm is not None:
clip_grad_norm_(model.parameters(), clip_norm)
optim.step()
return total_epoch_loss / len(data_loader)
def evaluate_epoch(
model: nn.Module,
criterion: nn.Module,
data_loader: DataLoader,
input_t: Transform,
eval_i: Sequence[int],
):
"""Iterate through test data and compute losses
:param model: a torch Module that can take input data and return the prediction
:param criterion: a loss function
:param data_loader: validation dataset. Should produce a tuple of tensors: the first
is used as input and the last is the target. If the tuple has
only one element then it's used for both
:param input_t: Transformation to apply to the input
:param eval_i: Index into the DataLoader tuple for evaluation
:return: total loss for the epoch, averaged over the number of batches
"""
total_epoch_loss = 0.0
for data in data_loader:
y = model(input_t(data[0]))
loss = criterion(y, *(data[i] for i in eval_i))
total_epoch_loss += loss.data.item()
return total_epoch_loss / len(data_loader)
def train_until_plateau(
model: nn.Module,
training_loss: nn.Module,
optim: Optimizer,
training_data: DataLoader,
validation_data: DataLoader,
input_t: Transform,
min_cycles: int = 3,
threshold: float = 0.01,
scheduler_kw: dict = None,
verbose: bool = False,
) -> Tuple[list, list]:
"""Train a model with cosine scheduling until validation loss stabilizes. This
function uses CosineWithRestarts to train until the learning rate stops improving.
:param model: torch Module that can take input data and return the prediction
:param training_loss: The loss function used for training the model
:param optim: torch Optimizer (will zero the gradient after testing)
:param training_data: Training dataset. Should produce tuples of Tensors, all but
the last are considered to be input and the last is the target
:param validation_data: Validation dataset in the same format
:param input_t: Function to apply to the input
:param min_cycles: Minimum number of cycles to run before checking for convergence
:param threshold: Tolerance threshold for calling convergence
:param scheduler_kw: dictionary of keyword arguments for CosineWithRestarts
:param verbose: Print training progress to stdout
:return: Lists of training and validation loss and correlation values
"""
assert 0.0 <= threshold < 1.0
if scheduler_kw is None:
scheduler_kw = dict()
train_loss = []
val_loss = []
scheduler = CosineWithRestarts(optim, **scheduler_kw)
best = np.inf
rel_epsilon = 1.0 - threshold
neg_epsilon = 1.0 + threshold
cycle = 0
for epoch in itertools.count():
optim.zero_grad() # just make sure things are zeroed before train loop
model.train()
train_loss.append(
train_epoch(
model=model,
criterion=training_loss,
optim=optim,
data_loader=training_data,
input_t=input_t,
clip_norm=100.0,
)
)
model.eval()
val_loss.append(
evaluate_epoch(
model=model,
criterion=training_loss,
data_loader=validation_data,
input_t=input_t,
eval_i=[0],
)
)
scheduler.step()
if scheduler.starting_cycle:
if verbose:
print(
f"[epoch {epoch:03d}] average training loss: {train_loss[-1]:.5f}"
)
cycle += 1
if 0 <= val_loss[-1] < best * rel_epsilon:
best = val_loss[-1]
elif 0 > val_loss[-1] and val_loss[-1] < best * neg_epsilon:
best = val_loss[-1]
elif cycle >= min_cycles:
break
return train_loss, val_loss
| [
"torch.utils.data.SubsetRandomSampler",
"torch.utils.data.TensorDataset",
"itertools.count",
"molecular_cross_validation.train.cosine_scheduler.CosineWithRestarts",
"numpy.random.permutation"
] | [((713, 731), 'torch.utils.data.TensorDataset', 'TensorDataset', (['*xs'], {}), '(*xs)\n', (726, 731), False, 'from torch.utils.data import DataLoader, TensorDataset, SubsetRandomSampler\n'), ((4857, 4898), 'molecular_cross_validation.train.cosine_scheduler.CosineWithRestarts', 'CosineWithRestarts', (['optim'], {}), '(optim, **scheduler_kw)\n', (4875, 4898), False, 'from molecular_cross_validation.train.cosine_scheduler import CosineWithRestarts\n'), ((5017, 5034), 'itertools.count', 'itertools.count', ([], {}), '()\n', (5032, 5034), False, 'import itertools\n'), ((594, 631), 'numpy.random.permutation', 'np.random.permutation', (['xs[0].shape[0]'], {}), '(xs[0].shape[0])\n', (615, 631), True, 'import numpy as np\n'), ((830, 868), 'torch.utils.data.SubsetRandomSampler', 'SubsetRandomSampler', (['indices[:n_train]'], {}), '(indices[:n_train])\n', (849, 868), False, 'from torch.utils.data import DataLoader, TensorDataset, SubsetRandomSampler\n'), ((976, 1014), 'torch.utils.data.SubsetRandomSampler', 'SubsetRandomSampler', (['indices[n_train:]'], {}), '(indices[n_train:])\n', (995, 1014), False, 'from torch.utils.data import DataLoader, TensorDataset, SubsetRandomSampler\n')] |
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
import os
import glob
import skimage.io as io
from skimage import img_as_ubyte
import skimage.transform as trans
import matplotlib.pyplot as plt
def train_preprocessing(img, mask):
"""Small preprocessing on images and masks before training.
Args:
img: array
mask: array
Returns:
tuple of array
"""
if np.max(img) > 1:
img = img / 255
mask = mask / 255
mask[mask > 0.5] = 1
mask[mask <= 0.5] = 0
return (img, mask)
def create_train_generator(
batch_size: int,
train_dataset_path: str,
aug_dict: dict,
image_folder="images",
mask_folder="masks",
image_color_mode="grayscale",
mask_color_mode="grayscale",
image_save_prefix="augmented_image",
mask_save_prefix="augmented_mask",
save_to_dir=None,
target_size=(256, 256),
seed=1,
):
"""Returns an iterator for model predictions.
Arg:
batch_size: int
batch size for training
train_dataset_path: str
path of the training dataset
image_folder: str
name of the train images folder
masks_folder: str
name of the mask images folder
image_color_mode: one of "grayscale", "rgb", "rgba".
Whether the images will be converted to have 1 or 3 color channels.
mask_color_mode: one of "grayscale", "rgb", "rgba".
Whether the masks will be converted to have 1 or 3 color channels.
save_to_dir: None or str (default: None).
Allows you to optionally specify a directory to which to save the augmented pictures being generated (useful for visualizing what you are doing).
image_save_prefix: str
Prefix to use for filenames of saved images (only relevant if save_to_dir is set).
masks_save_prefix: str
Prefix to use for filenames of saved masks (only relevant if save_to_dir is set)
target_size: tuple of integers
The dimensions to which all images found will be resized.
seed: int
Random seed for data augmentation
Returns:
iterator of images and masks
"""
# Initialize the ImageDataGenerator instances for images and masks
image_data_generator = ImageDataGenerator(**aug_dict)
mask_data_generator = ImageDataGenerator(**aug_dict)
# Fill them with images from the dataset
image_data_generator = image_data_generator.flow_from_directory(
train_dataset_path,
classes=[image_folder],
class_mode=None,
color_mode=image_color_mode,
target_size=target_size,
batch_size=batch_size,
save_to_dir=save_to_dir,
save_prefix=image_save_prefix,
seed=seed,
)
mask_data_generator = mask_data_generator.flow_from_directory(
train_dataset_path,
classes=[mask_folder],
class_mode=None,
color_mode=mask_color_mode,
target_size=target_size,
batch_size=batch_size,
save_to_dir=save_to_dir,
save_prefix=mask_save_prefix,
seed=seed,
)
# Combine them for image segmentation training
train_generator = zip(image_data_generator, mask_data_generator)
# Preprocessing
for (img, mask) in train_generator:
img, mask = train_preprocessing(img, mask)
yield (img, mask)
def create_test_generator(
test_path: str, num_image: int, target_size=(256, 256), as_gray=True
):
"""Returns an iterator for model predictions.
Args:
test_dataset_path: str
path to the test dataset
num_images: int
number of images in the dataset
target_size: tuple of integers
The dimensions to which all images found will be resized
as_gray: bool
steps per epochs
Returns:
iterator of images
"""
for i in range(num_image):
img = io.imread(os.path.join(test_path, "%d.png" % i), as_gray=as_gray)
img = img / 255
img = trans.resize(img, target_size)
img = np.reshape(img, img.shape + (1,))
img = np.reshape(img, (1,) + img.shape)
yield img
def label_visualize(num_class, color_dict, img):
"""Helper function for visualization of prediction in the case of multi class."""
img = img[:, :, 0] if len(img.shape) == 3 else img
img_out = np.zeros(img.shape + (3,))
for i in range(num_class):
img_out[img == i, :] = color_dict[i]
return img_out / 255
def save_result(color_dict, save_path, results, flag_multi_class=False, num_class=2):
"""Function to save predictions images.
Args:
color_dict: dict
dict of color parameters
save_path: str
path to save the images
results: numpy array
array of the predictions maps
flag_multi_class: bool
wheter the prediction is multi class, kept for legacy as this repo is not focused on multi class
num_classes: int
number of classes
"""
for i, item in enumerate(results):
img = (
labelVisualize(num_class, color_dict, item)
if flag_multi_class
else item[:, :, 0]
)
io.imsave(os.path.join(save_path, "%d_predict.png" % i), img_as_ubyte(img))
| [
"numpy.reshape",
"os.path.join",
"keras.preprocessing.image.ImageDataGenerator",
"numpy.max",
"numpy.zeros",
"skimage.img_as_ubyte",
"skimage.transform.resize"
] | [((2339, 2369), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**aug_dict)\n', (2357, 2369), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((2396, 2426), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**aug_dict)\n', (2414, 2426), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((4442, 4468), 'numpy.zeros', 'np.zeros', (['(img.shape + (3,))'], {}), '(img.shape + (3,))\n', (4450, 4468), True, 'import numpy as np\n'), ((425, 436), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (431, 436), True, 'import numpy as np\n'), ((4091, 4121), 'skimage.transform.resize', 'trans.resize', (['img', 'target_size'], {}), '(img, target_size)\n', (4103, 4121), True, 'import skimage.transform as trans\n'), ((4136, 4169), 'numpy.reshape', 'np.reshape', (['img', '(img.shape + (1,))'], {}), '(img, img.shape + (1,))\n', (4146, 4169), True, 'import numpy as np\n'), ((4184, 4217), 'numpy.reshape', 'np.reshape', (['img', '((1,) + img.shape)'], {}), '(img, (1,) + img.shape)\n', (4194, 4217), True, 'import numpy as np\n'), ((3997, 4034), 'os.path.join', 'os.path.join', (['test_path', "('%d.png' % i)"], {}), "(test_path, '%d.png' % i)\n", (4009, 4034), False, 'import os\n'), ((5310, 5355), 'os.path.join', 'os.path.join', (['save_path', "('%d_predict.png' % i)"], {}), "(save_path, '%d_predict.png' % i)\n", (5322, 5355), False, 'import os\n'), ((5357, 5374), 'skimage.img_as_ubyte', 'img_as_ubyte', (['img'], {}), '(img)\n', (5369, 5374), False, 'from skimage import img_as_ubyte\n')] |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from constants import constants
from tools import generate_detections as gdet
from tools import chart
from deep_sort.tracker import Tracker
from deep_sort.detection import Detection
from deep_sort import preprocessing, nn_matching
from tensorflow.compat.v1 import InteractiveSession
from tensorflow.compat.v1 import ConfigProto
from PIL import Image
from core.config import cfg
from tensorflow.python.saved_model import tag_constants
from core.yolov4 import filter_boxes
from absl.flags import FLAGS
from absl import app, flags, logging
import core.utils as utils
import tensorflow as tf
import time
import datetime
import matplotlib.pyplot as plt
import numpy as np
import cv2
from tools import functions as func
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
flags.DEFINE_string('weights', './checkpoints/yolov4-416',
'path to weights file')
flags.DEFINE_integer('size', 416, 'resize images to')
flags.DEFINE_string('video', './data/video/test.mp4',
'path to input video or set to 0 for webcam')
flags.DEFINE_string('output', None, 'path to output video')
flags.DEFINE_string('output_format', 'XVID',
'codec used in VideoWriter when saving video to file')
flags.DEFINE_float('iou', 0.45, 'iou threshold')
flags.DEFINE_float('score', 0.50, 'score threshold')
flags.DEFINE_boolean('dont_show', False, 'dont show video output')
flags.DEFINE_boolean('tiny', False, 'yolo or yolo-tiny')
flags.DEFINE_string('model', 'yolov4', 'yolov3 or yolov4')
flags.DEFINE_string('name', None, 'name of crossroad')
flags.DEFINE_string('direction', None, 'direction of crossroad')
def main(_argv):
max_cosine_distance = 0.4
nn_budget = None
nms_max_overlap = 1.0
model_filename = 'model_data/mars-small128.pb'
encoder = gdet.create_box_encoder(model_filename, batch_size=1)
metric = nn_matching.NearestNeighborDistanceMetric("cosine",
max_cosine_distance,
nn_budget)
tracker = Tracker(metric)
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)
input_size = FLAGS.size
video_path = FLAGS.video
saved_model_loaded = tf.saved_model.load(FLAGS.weights,
tags=[tag_constants.SERVING])
infer = saved_model_loaded.signatures['serving_default']
try:
vid = cv2.VideoCapture(int(video_path))
except:
vid = cv2.VideoCapture(video_path)
out = None
if FLAGS.output:
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*FLAGS.output_format)
out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height))
while True:
return_value, frame = vid.read()
if return_value:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(frame)
else:
print('Done!')
break
frame_size = frame.shape[:2]
image_data = cv2.resize(frame, (input_size, input_size))
image_data = image_data / 255.
image_data = image_data[np.newaxis, ...].astype(np.float32)
start_time = time.time()
batch_data = tf.constant(image_data)
pred_bbox = infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf,
(tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=FLAGS.iou,
score_threshold=FLAGS.score)
num_objects = valid_detections.numpy()[0]
bboxes = boxes.numpy()[0]
bboxes = bboxes[0:int(num_objects)]
scores = scores.numpy()[0]
scores = scores[0:int(num_objects)]
classes = classes.numpy()[0]
classes = classes[0:int(num_objects)]
original_h, original_w, _ = frame.shape
bboxes = utils.format_boxes(bboxes, original_h, original_w)
pred_bbox = [bboxes, scores, classes, num_objects]
class_names = utils.read_class_names(cfg.YOLO.CLASSES)
allowed_classes = ['car', 'bicycle', 'motorbike', 'bus', 'truck']
names = []
deleted_indx = []
for i in range(num_objects):
class_indx = int(classes[i])
class_name = class_names[class_indx]
if class_name not in allowed_classes:
deleted_indx.append(i)
else:
names.append(class_name)
names = np.array(names)
count = len(names)
bboxes = np.delete(bboxes, deleted_indx, axis=0)
scores = np.delete(scores, deleted_indx, axis=0)
# encode yolo detections and feed to tracker
features = encoder(frame, bboxes)
detections = [
Detection(bbox, score, class_name, feature)
for bbox, score, class_name, feature in zip(
bboxes, scores, names, features)
]
# color map
cmap = plt.get_cmap('tab20b')
colors = [cmap(i)[:3] for i in np.linspace(0, 1, 20)]
# non-maxima suppression
boxs = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
classes = np.array([d.class_name for d in detections])
indices = preprocessing.non_max_suppression(boxs, classes,
nms_max_overlap, scores)
detections = [detections[i] for i in indices]
# tracker
tracker.predict()
tracker.update(detections)
for track in tracker.tracks:
if not track.is_confirmed() or track.time_since_update > 1:
continue
bbox = track.to_tlbr()
class_name = track.get_class()
# bbox
rects = []
box = np.array(
[int(bbox[0]),
int(bbox[1]),
int(bbox[2]),
int(bbox[3])])
rects.append(box.astype(int))
centroid = np.zeros((len(rects), 2), dtype="int")
cX = int((int(bbox[0]) + int(bbox[2])) / 2.0)
cY = int((int(bbox[1]) + int(bbox[3])) / 2.0)
centroid = (cX, cY)
cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
color = colors[int(track.track_id) % len(colors)]
color = [i * 255 for i in color]
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
(int(bbox[2]), int(bbox[3])), (color), 2)
cv2.putText(frame, class_name + " " + str(track.track_id),
(int(bbox[0]), int(bbox[1] - 10)), 0, 0.4,
(255, 255, 255), 1)
if track.track_id in constants.MovingObjects.keys():
directionx = centroid[0] - \
constants.MovingObjects[track.track_id][0][0]
directiony = centroid[1] - \
constants.MovingObjects[track.track_id][0][1]
if constants.MovingObjects[track.track_id][1] == "":
entry_direction = func.get_entry_direction(
constants.MovingObjects[track.track_id][0][0],
constants.MovingObjects[track.track_id][0][1], width,
height)
entry_time = datetime.datetime.now()
constants.MovingObjects[
track.track_id][1] = entry_direction
constants.MovingObjects[
track.track_id][3] = entry_time.strftime("%X %x")
if np.abs(directionx) > 5 and np.abs(directiony) > 5:
exit_direction = func.get_direction(directionx, directiony)
exit_time = datetime.datetime.now()
constants.MovingObjects[track.track_id][2] = exit_direction
constants.MovingObjects[
track.track_id][4] = exit_time.strftime("%X %x")
else:
constants.MovingObjects[track.track_id] = [
centroid, "", "", "", ""
]
#compass
direction = FLAGS.direction
result = func.compass(frame,direction)
result = np.asarray(frame)
result = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
if not FLAGS.dont_show:
cv2.imshow("Output Video", result)
if FLAGS.output:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
#graph
name = FLAGS.name
chart.draw_chart(func.counters(name))
#time text
func.get_time()
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
| [
"core.utils.read_class_names",
"tensorflow.shape",
"tools.functions.get_time",
"core.utils.format_boxes",
"tools.functions.compass",
"cv2.imshow",
"numpy.array",
"tools.generate_detections.create_box_encoder",
"cv2.destroyAllWindows",
"core.utils.load_config",
"tools.functions.get_direction",
... | [((786, 837), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (830, 837), True, 'import tensorflow as tf\n'), ((941, 1027), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""weights"""', '"""./checkpoints/yolov4-416"""', '"""path to weights file"""'], {}), "('weights', './checkpoints/yolov4-416',\n 'path to weights file')\n", (960, 1027), False, 'from absl import app, flags, logging\n'), ((1044, 1097), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""size"""', '(416)', '"""resize images to"""'], {}), "('size', 416, 'resize images to')\n", (1064, 1097), False, 'from absl import app, flags, logging\n'), ((1098, 1201), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""video"""', '"""./data/video/test.mp4"""', '"""path to input video or set to 0 for webcam"""'], {}), "('video', './data/video/test.mp4',\n 'path to input video or set to 0 for webcam')\n", (1117, 1201), False, 'from absl import app, flags, logging\n'), ((1218, 1277), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""output"""', 'None', '"""path to output video"""'], {}), "('output', None, 'path to output video')\n", (1237, 1277), False, 'from absl import app, flags, logging\n'), ((1278, 1381), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""output_format"""', '"""XVID"""', '"""codec used in VideoWriter when saving video to file"""'], {}), "('output_format', 'XVID',\n 'codec used in VideoWriter when saving video to file')\n", (1297, 1381), False, 'from absl import app, flags, logging\n'), ((1398, 1446), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""iou"""', '(0.45)', '"""iou threshold"""'], {}), "('iou', 0.45, 'iou threshold')\n", (1416, 1446), False, 'from absl import app, flags, logging\n'), ((1447, 1498), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""score"""', '(0.5)', '"""score threshold"""'], {}), "('score', 0.5, 'score threshold')\n", (1465, 1498), False, 'from absl import app, flags, logging\n'), ((1500, 1566), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""dont_show"""', '(False)', '"""dont show video output"""'], {}), "('dont_show', False, 'dont show video output')\n", (1520, 1566), False, 'from absl import app, flags, logging\n'), ((1567, 1623), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""tiny"""', '(False)', '"""yolo or yolo-tiny"""'], {}), "('tiny', False, 'yolo or yolo-tiny')\n", (1587, 1623), False, 'from absl import app, flags, logging\n'), ((1624, 1682), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""model"""', '"""yolov4"""', '"""yolov3 or yolov4"""'], {}), "('model', 'yolov4', 'yolov3 or yolov4')\n", (1643, 1682), False, 'from absl import app, flags, logging\n'), ((1683, 1737), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""name"""', 'None', '"""name of crossroad"""'], {}), "('name', None, 'name of crossroad')\n", (1702, 1737), False, 'from absl import app, flags, logging\n'), ((1738, 1802), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""direction"""', 'None', '"""direction of crossroad"""'], {}), "('direction', None, 'direction of crossroad')\n", (1757, 1802), False, 'from absl import app, flags, logging\n'), ((872, 939), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['physical_devices[0]', '(True)'], {}), '(physical_devices[0], True)\n', (912, 939), True, 'import tensorflow as tf\n'), ((1964, 2017), 'tools.generate_detections.create_box_encoder', 'gdet.create_box_encoder', (['model_filename'], {'batch_size': '(1)'}), '(model_filename, batch_size=1)\n', (1987, 2017), True, 'from tools import generate_detections as gdet\n'), ((2031, 2118), 'deep_sort.nn_matching.NearestNeighborDistanceMetric', 'nn_matching.NearestNeighborDistanceMetric', (['"""cosine"""', 'max_cosine_distance', 'nn_budget'], {}), "('cosine', max_cosine_distance,\n nn_budget)\n", (2072, 2118), False, 'from deep_sort import preprocessing, nn_matching\n'), ((2239, 2254), 'deep_sort.tracker.Tracker', 'Tracker', (['metric'], {}), '(metric)\n', (2246, 2254), False, 'from deep_sort.tracker import Tracker\n'), ((2269, 2282), 'tensorflow.compat.v1.ConfigProto', 'ConfigProto', ([], {}), '()\n', (2280, 2282), False, 'from tensorflow.compat.v1 import ConfigProto\n'), ((2340, 2373), 'tensorflow.compat.v1.InteractiveSession', 'InteractiveSession', ([], {'config': 'config'}), '(config=config)\n', (2358, 2373), False, 'from tensorflow.compat.v1 import InteractiveSession\n'), ((2417, 2441), 'core.utils.load_config', 'utils.load_config', (['FLAGS'], {}), '(FLAGS)\n', (2434, 2441), True, 'import core.utils as utils\n'), ((2525, 2589), 'tensorflow.saved_model.load', 'tf.saved_model.load', (['FLAGS.weights'], {'tags': '[tag_constants.SERVING]'}), '(FLAGS.weights, tags=[tag_constants.SERVING])\n', (2544, 2589), True, 'import tensorflow as tf\n'), ((9391, 9406), 'tools.functions.get_time', 'func.get_time', ([], {}), '()\n', (9404, 9406), True, 'from tools import functions as func\n'), ((3020, 3064), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['*FLAGS.output_format'], {}), '(*FLAGS.output_format)\n', (3042, 3064), False, 'import cv2\n'), ((3079, 3137), 'cv2.VideoWriter', 'cv2.VideoWriter', (['FLAGS.output', 'codec', 'fps', '(width, height)'], {}), '(FLAGS.output, codec, fps, (width, height))\n', (3094, 3137), False, 'import cv2\n'), ((3442, 3485), 'cv2.resize', 'cv2.resize', (['frame', '(input_size, input_size)'], {}), '(frame, (input_size, input_size))\n', (3452, 3485), False, 'import cv2\n'), ((3614, 3625), 'time.time', 'time.time', ([], {}), '()\n', (3623, 3625), False, 'import time\n'), ((3648, 3671), 'tensorflow.constant', 'tf.constant', (['image_data'], {}), '(image_data)\n', (3659, 3671), True, 'import tensorflow as tf\n'), ((4630, 4680), 'core.utils.format_boxes', 'utils.format_boxes', (['bboxes', 'original_h', 'original_w'], {}), '(bboxes, original_h, original_w)\n', (4648, 4680), True, 'import core.utils as utils\n'), ((4762, 4802), 'core.utils.read_class_names', 'utils.read_class_names', (['cfg.YOLO.CLASSES'], {}), '(cfg.YOLO.CLASSES)\n', (4784, 4802), True, 'import core.utils as utils\n'), ((5215, 5230), 'numpy.array', 'np.array', (['names'], {}), '(names)\n', (5223, 5230), True, 'import numpy as np\n'), ((5276, 5315), 'numpy.delete', 'np.delete', (['bboxes', 'deleted_indx'], {'axis': '(0)'}), '(bboxes, deleted_indx, axis=0)\n', (5285, 5315), True, 'import numpy as np\n'), ((5333, 5372), 'numpy.delete', 'np.delete', (['scores', 'deleted_indx'], {'axis': '(0)'}), '(scores, deleted_indx, axis=0)\n', (5342, 5372), True, 'import numpy as np\n'), ((5700, 5722), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab20b"""'], {}), "('tab20b')\n", (5712, 5722), True, 'import matplotlib.pyplot as plt\n'), ((5834, 5872), 'numpy.array', 'np.array', (['[d.tlwh for d in detections]'], {}), '([d.tlwh for d in detections])\n', (5842, 5872), True, 'import numpy as np\n'), ((5890, 5934), 'numpy.array', 'np.array', (['[d.confidence for d in detections]'], {}), '([d.confidence for d in detections])\n', (5898, 5934), True, 'import numpy as np\n'), ((5953, 5997), 'numpy.array', 'np.array', (['[d.class_name for d in detections]'], {}), '([d.class_name for d in detections])\n', (5961, 5997), True, 'import numpy as np\n'), ((6016, 6089), 'deep_sort.preprocessing.non_max_suppression', 'preprocessing.non_max_suppression', (['boxs', 'classes', 'nms_max_overlap', 'scores'], {}), '(boxs, classes, nms_max_overlap, scores)\n', (6049, 6089), False, 'from deep_sort import preprocessing, nn_matching\n'), ((8938, 8968), 'tools.functions.compass', 'func.compass', (['frame', 'direction'], {}), '(frame, direction)\n', (8950, 8968), True, 'from tools import functions as func\n'), ((8991, 9008), 'numpy.asarray', 'np.asarray', (['frame'], {}), '(frame)\n', (9001, 9008), True, 'import numpy as np\n'), ((9026, 9064), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_RGB2BGR'], {}), '(frame, cv2.COLOR_RGB2BGR)\n', (9038, 9064), False, 'import cv2\n'), ((9351, 9370), 'tools.functions.counters', 'func.counters', (['name'], {}), '(name)\n', (9364, 9370), True, 'from tools import functions as func\n'), ((9453, 9466), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (9460, 9466), False, 'from absl import app, flags, logging\n'), ((2780, 2808), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (2796, 2808), False, 'import cv2\n'), ((3242, 3280), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (3254, 3280), False, 'import cv2\n'), ((3301, 3323), 'PIL.Image.fromarray', 'Image.fromarray', (['frame'], {}), '(frame)\n', (3316, 3323), False, 'from PIL import Image\n'), ((5504, 5547), 'deep_sort.detection.Detection', 'Detection', (['bbox', 'score', 'class_name', 'feature'], {}), '(bbox, score, class_name, feature)\n', (5513, 5547), False, 'from deep_sort.detection import Detection\n'), ((6949, 7014), 'cv2.circle', 'cv2.circle', (['frame', '(centroid[0], centroid[1])', '(4)', '(0, 255, 0)', '(-1)'], {}), '(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)\n', (6959, 7014), False, 'import cv2\n'), ((9110, 9144), 'cv2.imshow', 'cv2.imshow', (['"""Output Video"""', 'result'], {}), "('Output Video', result)\n", (9120, 9144), False, 'import cv2\n'), ((9272, 9295), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (9293, 9295), False, 'import cv2\n'), ((5762, 5783), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(20)'], {}), '(0, 1, 20)\n', (5773, 5783), True, 'import numpy as np\n'), ((7469, 7499), 'constants.constants.MovingObjects.keys', 'constants.MovingObjects.keys', ([], {}), '()\n', (7497, 7499), False, 'from constants import constants\n'), ((9225, 9239), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (9236, 9239), False, 'import cv2\n'), ((7831, 7968), 'tools.functions.get_entry_direction', 'func.get_entry_direction', (['constants.MovingObjects[track.track_id][0][0]', 'constants.MovingObjects[track.track_id][0][1]', 'width', 'height'], {}), '(constants.MovingObjects[track.track_id][0][0],\n constants.MovingObjects[track.track_id][0][1], width, height)\n', (7855, 7968), True, 'from tools import functions as func\n'), ((8071, 8094), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8092, 8094), False, 'import datetime\n'), ((8428, 8470), 'tools.functions.get_direction', 'func.get_direction', (['directionx', 'directiony'], {}), '(directionx, directiony)\n', (8446, 8470), True, 'from tools import functions as func\n'), ((8503, 8526), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8524, 8526), False, 'import datetime\n'), ((8340, 8358), 'numpy.abs', 'np.abs', (['directionx'], {}), '(directionx)\n', (8346, 8358), True, 'import numpy as np\n'), ((8367, 8385), 'numpy.abs', 'np.abs', (['directiony'], {}), '(directiony)\n', (8373, 8385), True, 'import numpy as np\n'), ((3960, 3975), 'tensorflow.shape', 'tf.shape', (['boxes'], {}), '(boxes)\n', (3968, 3975), True, 'import tensorflow as tf\n'), ((4067, 4086), 'tensorflow.shape', 'tf.shape', (['pred_conf'], {}), '(pred_conf)\n', (4075, 4086), True, 'import tensorflow as tf\n'), ((4095, 4114), 'tensorflow.shape', 'tf.shape', (['pred_conf'], {}), '(pred_conf)\n', (4103, 4114), True, 'import tensorflow as tf\n')] |
import numpy as np
global d
def do_something():
n = 10
m = 45
if n or m:
d = np.array([[1, 2, 3], [4, 5, 6]])
elif n and m:
return n, m
elif n - m > 0:
n = n - m
if m + 1 >= 1:
s = 'do_something'
elif n * 0 > 0:
r = np.array([[1, 1, 1, 1]])
else:
r = 'skip'
return n, m
| [
"numpy.array"
] | [((100, 132), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (108, 132), True, 'import numpy as np\n'), ((288, 312), 'numpy.array', 'np.array', (['[[1, 1, 1, 1]]'], {}), '([[1, 1, 1, 1]])\n', (296, 312), True, 'import numpy as np\n')] |
#! /usr/bin/env python
#
# USAGE
# python util_TestXMLParameterStorage.py ; ligolw_print -t sim_inspiral -c alpha5 output.xml.gz
import RIFT.lalsimutils as lalsimutils
import numpy as np
import lal
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--output-file", default="output")
opts = parser.parse_args()
P_list_in =[]
for i in np.arange(5):
P_list_in.append( lalsimutils.ChooseWaveformParams(m1=lal.MSUN_SI*(i+1)))
P_list_in[-1].lambda1 = 3*i+1
P_list_in[-1].lambda2 = 3*i+2
P_list_in[-1].approx = 3*i+2
P_list_in[-1].print_params()
lalsimutils.ChooseWaveformParams_array_to_xml(P_list_in, fname=opts.output_file)
print(" ------ ")
P_list_out = lalsimutils.xml_to_ChooseWaveformParams_array(opts.output_file+".xml.gz")
for P in P_list_out:
P.print_params()
| [
"RIFT.lalsimutils.ChooseWaveformParams_array_to_xml",
"argparse.ArgumentParser",
"RIFT.lalsimutils.xml_to_ChooseWaveformParams_array",
"RIFT.lalsimutils.ChooseWaveformParams",
"numpy.arange"
] | [((225, 250), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (248, 250), False, 'import argparse\n'), ((357, 369), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (366, 369), True, 'import numpy as np\n'), ((584, 669), 'RIFT.lalsimutils.ChooseWaveformParams_array_to_xml', 'lalsimutils.ChooseWaveformParams_array_to_xml', (['P_list_in'], {'fname': 'opts.output_file'}), '(P_list_in, fname=opts.output_file\n )\n', (629, 669), True, 'import RIFT.lalsimutils as lalsimutils\n'), ((697, 772), 'RIFT.lalsimutils.xml_to_ChooseWaveformParams_array', 'lalsimutils.xml_to_ChooseWaveformParams_array', (["(opts.output_file + '.xml.gz')"], {}), "(opts.output_file + '.xml.gz')\n", (742, 772), True, 'import RIFT.lalsimutils as lalsimutils\n'), ((393, 451), 'RIFT.lalsimutils.ChooseWaveformParams', 'lalsimutils.ChooseWaveformParams', ([], {'m1': '(lal.MSUN_SI * (i + 1))'}), '(m1=lal.MSUN_SI * (i + 1))\n', (425, 451), True, 'import RIFT.lalsimutils as lalsimutils\n')] |
import numpy as np
import cv2 as cv
if __name__ == '__main__':
img = cv.imread('/root/gitdata/zhou_test/KNN_test/digits.png')
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# Now we split the image to 5000 cells, each 20x20 size
cells = [np.hsplit(row, 100) for row in np.vsplit(gray, 50)]
# Make it into a Numpy array. It size will be (50, 100, 20, 20)
x = np.array(cells)
# Now we prepare train_data and test_data.
train = x[:, :50].reshape(-1, 400).astype(np.float32) # Size = (2500, 400)
test = x[:, 50:100].reshape(-1, 400).astype(np.float32) # Size = (2500, 400)
# Create labels for train and test data
k = np.arange(10)
train_labels = np.repeat(k, 250)[:, np.newaxis] # train_labels.shape=(2500, 1)
test_labels = train_labels.copy()
ml_traindata = cv.ml.TrainData_create(train, cv.ml.ROW_SAMPLE, train_labels) # <class 'cv2.ml_TrainData'>
# Initiate kNN, train the data, then test it with test data for k=1
knn = cv.ml.KNearest_create() # <class 'cv2.ml_KNearest'>
# samples training samples
# layout See ml::SampleTypes.
# responses vector of responses associated with the training samples.
# retval = cv.ml_StatModel.train(samples, layout, responses)
train_ret = knn.train(ml_traindata)
# train_ret = knn.train(train, cv.ml.ROW_SAMPLE, train_labels) # train_ret = True
ret, result, neighbours, dist = knn.findNearest(test, k=5)
tt2 = ml_traindata.getTestResponses()
# result.shape = (2500, 1), neighbours.shape=(2500, 5), dist.shape=(2500, 5)
# Now we check the accuracy of classification
# For that, compare the result with test_labels and check which are wrong
matches = result == test_labels
correct = np.count_nonzero(matches)
accuracy = correct*100.0/result.size
print(accuracy)
| [
"numpy.hsplit",
"numpy.repeat",
"cv2.ml.KNearest_create",
"numpy.count_nonzero",
"numpy.array",
"numpy.vsplit",
"cv2.cvtColor",
"cv2.ml.TrainData_create",
"cv2.imread",
"numpy.arange"
] | [((80, 136), 'cv2.imread', 'cv.imread', (['"""/root/gitdata/zhou_test/KNN_test/digits.png"""'], {}), "('/root/gitdata/zhou_test/KNN_test/digits.png')\n", (89, 136), True, 'import cv2 as cv\n'), ((149, 184), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2GRAY'], {}), '(img, cv.COLOR_BGR2GRAY)\n', (160, 184), True, 'import cv2 as cv\n'), ((386, 401), 'numpy.array', 'np.array', (['cells'], {}), '(cells)\n', (394, 401), True, 'import numpy as np\n'), ((667, 680), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (676, 680), True, 'import numpy as np\n'), ((824, 885), 'cv2.ml.TrainData_create', 'cv.ml.TrainData_create', (['train', 'cv.ml.ROW_SAMPLE', 'train_labels'], {}), '(train, cv.ml.ROW_SAMPLE, train_labels)\n', (846, 885), True, 'import cv2 as cv\n'), ((1002, 1025), 'cv2.ml.KNearest_create', 'cv.ml.KNearest_create', ([], {}), '()\n', (1023, 1025), True, 'import cv2 as cv\n'), ((1761, 1786), 'numpy.count_nonzero', 'np.count_nonzero', (['matches'], {}), '(matches)\n', (1777, 1786), True, 'import numpy as np\n'), ((258, 277), 'numpy.hsplit', 'np.hsplit', (['row', '(100)'], {}), '(row, 100)\n', (267, 277), True, 'import numpy as np\n'), ((700, 717), 'numpy.repeat', 'np.repeat', (['k', '(250)'], {}), '(k, 250)\n', (709, 717), True, 'import numpy as np\n'), ((289, 308), 'numpy.vsplit', 'np.vsplit', (['gray', '(50)'], {}), '(gray, 50)\n', (298, 308), True, 'import numpy as np\n')] |
import numpy as np
import generator as gen
import multiclass_logistic_regression as mlr
############################################################################
# GEN and Multi-class Logistic Regression
############################################################################
n = 1000
d = 2
k = 6
bias = 1
x, y, mu, sig = gen.gengaussianmixture(n, d, k, scale=.05)
x = gen.standardize(x)
x = np.concatenate([x, bias * np.ones([n, 1])], axis=1)
reg = 1
model = mlr.MulticlassLogisticRegression(reg, x, y)
alpha0 = 1 / k * np.ones([n, k])
obj_sdca, time_sdca = model.sdca(x, y, alpha0, npass=14, precision=1e-5)
| [
"multiclass_logistic_regression.MulticlassLogisticRegression",
"generator.gengaussianmixture",
"generator.standardize",
"numpy.ones"
] | [((332, 375), 'generator.gengaussianmixture', 'gen.gengaussianmixture', (['n', 'd', 'k'], {'scale': '(0.05)'}), '(n, d, k, scale=0.05)\n', (354, 375), True, 'import generator as gen\n'), ((379, 397), 'generator.standardize', 'gen.standardize', (['x'], {}), '(x)\n', (394, 397), True, 'import generator as gen\n'), ((471, 514), 'multiclass_logistic_regression.MulticlassLogisticRegression', 'mlr.MulticlassLogisticRegression', (['reg', 'x', 'y'], {}), '(reg, x, y)\n', (503, 514), True, 'import multiclass_logistic_regression as mlr\n'), ((533, 548), 'numpy.ones', 'np.ones', (['[n, k]'], {}), '([n, k])\n', (540, 548), True, 'import numpy as np\n'), ((428, 443), 'numpy.ones', 'np.ones', (['[n, 1]'], {}), '([n, 1])\n', (435, 443), True, 'import numpy as np\n')] |
"""Utilities collection.
"""
from datetime import datetime
import math
import time
import astropy.coordinates as coord
from astropy.time import Time
import astropy.units as u
import numpy as np
class Announce(object):
def __init__(self, action, end, disable=False):
self.action = action
self.end = end
self.disable = disable
def __enter__(self):
if not self.disable:
print(self.action)
self.start_time = time.time()
def __exit__(self, type, value, traceback):
duration = time.time() - self.start_time
if not self.disable:
print(self.end, "(%.2fs)" % duration)
def R_x(alpha):
return np.asarray([[1, 0, 0],
[0, np.cos(alpha), -np.sin(alpha)],
[0, np.sin(alpha), np.cos(alpha)]])
def R_y(alpha):
return np.asarray([[np.cos(alpha), 0, np.sin(alpha)],
[0, 1, 0],
[-np.sin(alpha), 0, np.cos(alpha)]])
def R_z(alpha):
return np.asarray([[np.cos(alpha), -np.sin(alpha), 0],
[np.sin(alpha), np.cos(alpha), 0],
[0, 0, 1]])
def get_sun_state(lat: float, lon: float, t: datetime):
"""Given a time and location get the sun position relative to this location.
Args:
lat (float): latitude
lon (float): longitude
t (datetime): query time
Returns:
tuple: zenith and azimuth angles
"""
loc = coord.EarthLocation(lon=lon * u.deg,
lat=lat * u.deg)
query_time_string = t.strftime("%Y-%m-%dT%H:%M:%S.%f")
t = Time(query_time_string, format="isot", scale="utc")
altaz = coord.AltAz(location=loc, obstime=t)
sun = coord.get_sun(t)
transformer = sun.transform_to(altaz)
theta_z = transformer.zen
theta_A = transformer.az
return theta_z.degree, theta_A.degree
def project_sunray(P, zenith, azimut):
az_radian = azimut/360 * 2 * math.pi
zen_radian = zenith/360 * 2 * math.pi
elev_radian = math.pi/2 - zen_radian
# projected points on the sunray
R = P @ R_z(-az_radian).T @ R_y(elev_radian).T
return R
| [
"astropy.coordinates.get_sun",
"astropy.coordinates.EarthLocation",
"astropy.time.Time",
"numpy.cos",
"numpy.sin",
"time.time",
"astropy.coordinates.AltAz"
] | [((1487, 1540), 'astropy.coordinates.EarthLocation', 'coord.EarthLocation', ([], {'lon': '(lon * u.deg)', 'lat': '(lat * u.deg)'}), '(lon=lon * u.deg, lat=lat * u.deg)\n', (1506, 1540), True, 'import astropy.coordinates as coord\n'), ((1639, 1690), 'astropy.time.Time', 'Time', (['query_time_string'], {'format': '"""isot"""', 'scale': '"""utc"""'}), "(query_time_string, format='isot', scale='utc')\n", (1643, 1690), False, 'from astropy.time import Time\n'), ((1704, 1740), 'astropy.coordinates.AltAz', 'coord.AltAz', ([], {'location': 'loc', 'obstime': 't'}), '(location=loc, obstime=t)\n', (1715, 1740), True, 'import astropy.coordinates as coord\n'), ((1751, 1767), 'astropy.coordinates.get_sun', 'coord.get_sun', (['t'], {}), '(t)\n', (1764, 1767), True, 'import astropy.coordinates as coord\n'), ((470, 481), 'time.time', 'time.time', ([], {}), '()\n', (479, 481), False, 'import time\n'), ((550, 561), 'time.time', 'time.time', ([], {}), '()\n', (559, 561), False, 'import time\n'), ((738, 751), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (744, 751), True, 'import numpy as np\n'), ((797, 810), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (803, 810), True, 'import numpy as np\n'), ((812, 825), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (818, 825), True, 'import numpy as np\n'), ((871, 884), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (877, 884), True, 'import numpy as np\n'), ((889, 902), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (895, 902), True, 'import numpy as np\n'), ((982, 995), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (988, 995), True, 'import numpy as np\n'), ((1041, 1054), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (1047, 1054), True, 'import numpy as np\n'), ((1100, 1113), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (1106, 1113), True, 'import numpy as np\n'), ((1115, 1128), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (1121, 1128), True, 'import numpy as np\n'), ((754, 767), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (760, 767), True, 'import numpy as np\n'), ((964, 977), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (970, 977), True, 'import numpy as np\n'), ((1057, 1070), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (1063, 1070), True, 'import numpy as np\n')] |
# This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
import nltk
nltk.download('punkt')
from nltk.stem.porter import PorterStemmer
stemmer = PorterStemmer()
# things we need for Tensorflow
import numpy as np
#from tensorflow.keras.models import Sequential
#from tensorflow.keras.layers import Dense, Activation, Dropout
#from tensorflow.keras.optimizers import SGD
import pandas as pd
import random
import json
from tensorflow.keras.models import load_model
from os.path import dirname, join
from nltk.tokenize import word_tokenize
class Model:
def clean_up_sentence(self, sentence):
# tokenize the pattern - split words into array
sentence_words = word_tokenize(sentence)
#print(sentence_words)
# stem each word - create short form for word
sentence_words = [stemmer.stem(word.lower()) for word in sentence_words]
return sentence_words
# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence
def bow(self, sentence, words, show_details=True):
# tokenize the pattern
sentence_words = self.clean_up_sentence(sentence)
# bag of words - matrix of N words, vocabulary matrix
bag = [0] * len(words)
for s in sentence_words:
for i, w in enumerate(words):
if w == s:
# assign 1 if current word is in the vocabulary position
bag[i] = 1
if show_details:
pass
#print(np.array(bag))
return (np.array(bag))
def classify_local(self, sentence, intents):
ERROR_THRESHOLD = 0.25
filename = join(dirname(__file__), "mod.keras")
# filename2 = join(dirname(__file__), "intents.json")
model = load_model(filename, compile=True)
words = []
classes = []
documents = []
responses = []
ignore_words = ['?', ',', '.']
# loop through each sentence in our intents patterns
for intent in intents['intents']:
for pattern in intent['patterns']:
# tokenize each word in the sentence
w = nltk.word_tokenize(pattern)
# add to our words list
words.extend(w)
# add to documents in our corpus
documents.append((w, intent['tag']))
# add to our classes list
if intent['tag'] not in classes:
classes.append(intent['tag'])
# stem and lower each word and remove duplicates
words = [stemmer.stem(w.lower()) for w in words if w not in ignore_words]
words = sorted(list(set(words)))
# sort classes
classes = sorted(list(set(classes)))
# generate probabilities from the model
input_data = pd.DataFrame([self.bow(sentence, words)], dtype=float, index=['input'])
#print(input_data)
results = model.predict([input_data])[0]
# filter out predictions below a threshold, and provide intent index
results = [[i, r] for i, r in enumerate(results) if r > ERROR_THRESHOLD]
# sort by strength of probability
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append((classes[r[0]], str(r[1])))
# return tuple of intent and probability
return return_list
class FrenBotApp:
def show_data(self, sentence):
#print(self.username.text)
global responses
mod = Model()
filename = join(dirname(__file__), "intents.json")
with open(filename) as file:
intents = json.load(file)
tag = mod.classify_local(sentence,intents)[0][0]
for tg in intents["intents"]:
if tg['tag'] == tag:
responses = tg['responses']
# return mod
return random.choice(responses)
def getResults(sente):
fren = FrenBotApp()
return fren.show_data(sente)
#getResults("hi there")
| [
"random.choice",
"nltk.download",
"nltk.word_tokenize",
"nltk.tokenize.word_tokenize",
"numpy.array",
"nltk.stem.porter.PorterStemmer",
"os.path.dirname",
"tensorflow.keras.models.load_model",
"json.load"
] | [((209, 231), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (222, 231), False, 'import nltk\n'), ((287, 302), 'nltk.stem.porter.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (300, 302), False, 'from nltk.stem.porter import PorterStemmer\n'), ((821, 844), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['sentence'], {}), '(sentence)\n', (834, 844), False, 'from nltk.tokenize import word_tokenize\n'), ((1693, 1706), 'numpy.array', 'np.array', (['bag'], {}), '(bag)\n', (1701, 1706), True, 'import numpy as np\n'), ((1922, 1956), 'tensorflow.keras.models.load_model', 'load_model', (['filename'], {'compile': '(True)'}), '(filename, compile=True)\n', (1932, 1956), False, 'from tensorflow.keras.models import load_model\n'), ((4045, 4069), 'random.choice', 'random.choice', (['responses'], {}), '(responses)\n', (4058, 4069), False, 'import random\n'), ((1813, 1830), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (1820, 1830), False, 'from os.path import dirname, join\n'), ((3722, 3739), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (3729, 3739), False, 'from os.path import dirname, join\n'), ((3818, 3833), 'json.load', 'json.load', (['file'], {}), '(file)\n', (3827, 3833), False, 'import json\n'), ((2308, 2335), 'nltk.word_tokenize', 'nltk.word_tokenize', (['pattern'], {}), '(pattern)\n', (2326, 2335), False, 'import nltk\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from CountingGridsPy.models import CountingGridModel
from CountingGridsPy.EngineToBrowseCloudPipeline import SlidingWindowTrainer
from scipy import io
import pandas as pd
import numpy as np
import scipy as sp
import os
from sklearn.feature_extraction.text import CountVectorizer
class CGEngineWrapper(object):
def __init__(self, extent_size=32, window_size=5, layers=2, heartBeaters=None):
self.cg_size = extent_size
self.wd_size = window_size
self.no_layers = layers
self.heartBeaters = heartBeaters
def ready_the_matlab_engine(self, X, labels_filtered, DIRECTORY_DATA):
m, n = X.shape
s = X.data
i = X.tocoo().row
j = X.indices
io.savemat(DIRECTORY_DATA + '/counts.mat',
{'m': m, 'n': n, 's': s, 'i': i, 'j': j})
if labels_filtered is not None:
io.savemat(DIRECTORY_DATA + '/labels.mat',
{'labels': labels_filtered})
def __fitCountVectorizor(self, DIRECTORY_DATA, CLEAN_DATA_FILE_NAME, labels, MIN_FREQUENCY):
df = pd.read_table(DIRECTORY_DATA + CLEAN_DATA_FILE_NAME)
df = df[df.columns[1:]]
TEXT = df['pos_filtered'].tolist()
id_grid = np.array(
[i for i, t in enumerate(TEXT) if len(str(t).split(" ")) > 3]
)
addl_keep = np.zeros(len(TEXT))
addl_keep[id_grid] += 1
addl_keep = addl_keep.astype(bool)
df = df.ix[id_grid].reset_index(drop=True)
self.labelsS = np.array(
labels)[id_grid] if labels is not None else None
vect = CountVectorizer(decode_error="ignore", min_df=MIN_FREQUENCY)
X = vect.fit_transform(df['pos_filtered'].tolist())
return (vect, X, addl_keep)
def get_vocab(self, DIRECTORY_DATA, CLEAN_DATA_FILE_NAME, labels, MIN_FREQUENCY, keep):
vect, _, addl_keep = self.__fitCountVectorizor(
DIRECTORY_DATA,
CLEAN_DATA_FILE_NAME,
labels,
MIN_FREQUENCY
)
return (vect.get_feature_names(), np.array(keep) & np.array(addl_keep))
def incremental_fit(
self, DIRECTORY_DATA, CLEAN_DATA_FILE_NAME, labels,
MIN_FREQUENCY, keep, engine, initial_max_iter=100,
w=2000, s=1000, runInitialTrain=True
):
vect, X, addl_keep = self.__fitCountVectorizor(
DIRECTORY_DATA,
CLEAN_DATA_FILE_NAME,
labels,
MIN_FREQUENCY
)
if engine != "numpy":
raise ValueError("Not implemented!")
extent = np.array([self.cg_size, self.cg_size])
window = np.array([self.wd_size, self.wd_size])
model = CountingGridModel(extent, window)
T = X.shape[0]
training_max_iter_vec = [1 for x in range(int(np.ceil((T-w)/s)) + 1)]
train = model.fit
kwargs = {
"data": X.toarray(),
"max_iter": initial_max_iter,
"returnSumSquareDifferencesOfPi": False,
"layers": self.no_layers,
"noise": .00001,
"output_directory": DIRECTORY_DATA
}
print("Iteration vectors:")
print("Running for this number of iteration:" +
str(len([initial_max_iter] + training_max_iter_vec)))
print([initial_max_iter] + training_max_iter_vec)
SlidingWindowTrainer.SlidingTrainer(
w,
s,
training_max_iter_vec,
train,
kwargs,
runInitialTrain=runInitialTrain
)
return (vect.get_feature_names(), np.array(keep) & np.array(addl_keep))
def fit(self, DIRECTORY_DATA, CLEAN_DATA_FILE_NAME, labels, MIN_FREQUENCY, keep, engine):
vect, X, addl_keep = self.__fitCountVectorizor(
DIRECTORY_DATA,
CLEAN_DATA_FILE_NAME,
labels,
MIN_FREQUENCY
)
if engine == "matlab":
self.ready_the_matlab_engine(X, labels, DIRECTORY_DATA)
os.system(r"cg_exe_buja.exe {0} counts.mat ".format(
DIRECTORY_DATA) + str(self.cg_size) + " " + str(self.wd_size) + " " + str(self.no_layers))
elif engine == "numpy":
extent = np.array([self.cg_size, self.cg_size])
window = np.array([self.wd_size, self.wd_size])
model = CountingGridModel(extent, window)
model.fit(
X.toarray(),
max_iter=100,
returnSumSquareDifferencesOfPi=False,
layers=self.no_layers,
noise=.00000001,
output_directory=DIRECTORY_DATA,
heartBeaters=self.heartBeaters
)
elif engine == "torch":
raise ValueError("Not implemented yet.")
else:
raise ValueError("The {} engine does not exist.".format(engine))
return (vect.get_feature_names(), np.array(keep) & np.array(addl_keep))
| [
"numpy.ceil",
"scipy.io.savemat",
"CountingGridsPy.EngineToBrowseCloudPipeline.SlidingWindowTrainer.SlidingTrainer",
"sklearn.feature_extraction.text.CountVectorizer",
"numpy.array",
"CountingGridsPy.models.CountingGridModel",
"pandas.read_table"
] | [((808, 896), 'scipy.io.savemat', 'io.savemat', (["(DIRECTORY_DATA + '/counts.mat')", "{'m': m, 'n': n, 's': s, 'i': i, 'j': j}"], {}), "(DIRECTORY_DATA + '/counts.mat', {'m': m, 'n': n, 's': s, 'i': i,\n 'j': j})\n", (818, 896), False, 'from scipy import io\n'), ((1170, 1222), 'pandas.read_table', 'pd.read_table', (['(DIRECTORY_DATA + CLEAN_DATA_FILE_NAME)'], {}), '(DIRECTORY_DATA + CLEAN_DATA_FILE_NAME)\n', (1183, 1222), True, 'import pandas as pd\n'), ((1685, 1745), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'decode_error': '"""ignore"""', 'min_df': 'MIN_FREQUENCY'}), "(decode_error='ignore', min_df=MIN_FREQUENCY)\n", (1700, 1745), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((2658, 2696), 'numpy.array', 'np.array', (['[self.cg_size, self.cg_size]'], {}), '([self.cg_size, self.cg_size])\n', (2666, 2696), True, 'import numpy as np\n'), ((2714, 2752), 'numpy.array', 'np.array', (['[self.wd_size, self.wd_size]'], {}), '([self.wd_size, self.wd_size])\n', (2722, 2752), True, 'import numpy as np\n'), ((2769, 2802), 'CountingGridsPy.models.CountingGridModel', 'CountingGridModel', (['extent', 'window'], {}), '(extent, window)\n', (2786, 2802), False, 'from CountingGridsPy.models import CountingGridModel\n'), ((3430, 3546), 'CountingGridsPy.EngineToBrowseCloudPipeline.SlidingWindowTrainer.SlidingTrainer', 'SlidingWindowTrainer.SlidingTrainer', (['w', 's', 'training_max_iter_vec', 'train', 'kwargs'], {'runInitialTrain': 'runInitialTrain'}), '(w, s, training_max_iter_vec, train,\n kwargs, runInitialTrain=runInitialTrain)\n', (3465, 3546), False, 'from CountingGridsPy.EngineToBrowseCloudPipeline import SlidingWindowTrainer\n'), ((964, 1035), 'scipy.io.savemat', 'io.savemat', (["(DIRECTORY_DATA + '/labels.mat')", "{'labels': labels_filtered}"], {}), "(DIRECTORY_DATA + '/labels.mat', {'labels': labels_filtered})\n", (974, 1035), False, 'from scipy import io\n'), ((1599, 1615), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1607, 1615), True, 'import numpy as np\n'), ((2151, 2165), 'numpy.array', 'np.array', (['keep'], {}), '(keep)\n', (2159, 2165), True, 'import numpy as np\n'), ((2168, 2187), 'numpy.array', 'np.array', (['addl_keep'], {}), '(addl_keep)\n', (2176, 2187), True, 'import numpy as np\n'), ((3668, 3682), 'numpy.array', 'np.array', (['keep'], {}), '(keep)\n', (3676, 3682), True, 'import numpy as np\n'), ((3685, 3704), 'numpy.array', 'np.array', (['addl_keep'], {}), '(addl_keep)\n', (3693, 3704), True, 'import numpy as np\n'), ((4300, 4338), 'numpy.array', 'np.array', (['[self.cg_size, self.cg_size]'], {}), '([self.cg_size, self.cg_size])\n', (4308, 4338), True, 'import numpy as np\n'), ((4360, 4398), 'numpy.array', 'np.array', (['[self.wd_size, self.wd_size]'], {}), '([self.wd_size, self.wd_size])\n', (4368, 4398), True, 'import numpy as np\n'), ((4419, 4452), 'CountingGridsPy.models.CountingGridModel', 'CountingGridModel', (['extent', 'window'], {}), '(extent, window)\n', (4436, 4452), False, 'from CountingGridsPy.models import CountingGridModel\n'), ((4990, 5004), 'numpy.array', 'np.array', (['keep'], {}), '(keep)\n', (4998, 5004), True, 'import numpy as np\n'), ((5007, 5026), 'numpy.array', 'np.array', (['addl_keep'], {}), '(addl_keep)\n', (5015, 5026), True, 'import numpy as np\n'), ((2881, 2901), 'numpy.ceil', 'np.ceil', (['((T - w) / s)'], {}), '((T - w) / s)\n', (2888, 2901), True, 'import numpy as np\n')] |
# Advent of Code 2021, Day 25
# (c) blu3r4y
from itertools import count
import numpy as np
from aocd.models import Puzzle
from funcy import print_calls, remove
# identifies '.', '>', 'v'
FREE, EAST, SOUTH = 0, 1, 2
@print_calls
def part1(grid):
for i in count(1):
n1 = move_char(grid, EAST)
n2 = move_char(grid, SOUTH)
if n1 + n2 == 0:
return i
def move_char(grid, char):
lookahead = east if char == EAST else south
moves = []
# check if this char can move
for xy, val in np.ndenumerate(grid):
if val == char:
px, py = lookahead(*xy, grid.shape)
if grid[px, py] == FREE:
moves.append((*xy, px, py))
# perform the moves
for x, y, px, py in moves:
grid[x, y] = FREE
grid[px, py] = char
return len(moves)
def east(x, y, shape):
return (x, (y + 1) % shape[1])
def south(x, y, shape):
return ((x + 1) % shape[0], y)
def load(data):
# parse into a 2d matrix of integers
data = data.replace(".", "0").replace(">", "1").replace("v", "2")
return np.fromiter(remove("\n", data), dtype=int).reshape(-1, data.index("\n"))
if __name__ == "__main__":
puzzle = Puzzle(year=2021, day=25)
ans1 = part1(load(puzzle.input_data))
# puzzle.answer_a = ans1
| [
"aocd.models.Puzzle",
"itertools.count",
"funcy.remove",
"numpy.ndenumerate"
] | [((263, 271), 'itertools.count', 'count', (['(1)'], {}), '(1)\n', (268, 271), False, 'from itertools import count\n'), ((536, 556), 'numpy.ndenumerate', 'np.ndenumerate', (['grid'], {}), '(grid)\n', (550, 556), True, 'import numpy as np\n'), ((1220, 1245), 'aocd.models.Puzzle', 'Puzzle', ([], {'year': '(2021)', 'day': '(25)'}), '(year=2021, day=25)\n', (1226, 1245), False, 'from aocd.models import Puzzle\n'), ((1117, 1135), 'funcy.remove', 'remove', (['"""\n"""', 'data'], {}), "('\\n', data)\n", (1123, 1135), False, 'from funcy import print_calls, remove\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 24 22:47:45 2019
@author: czfyy
"""
import numpy as np
import matplotlib.pyplot as plt
def flip_coins(n):
'''n枚硬币,每枚掷十次
n:硬币数量
'''
#result:掷硬币结果,生成n行10列的ndarray,行数代表硬币数,列数代表掷十次的结果
result = np.random.randint(0, 2, [n, 10])
#正面朝上的数量
heads = np.sum(result, axis=1) #axis=1 是把列消掉,只剩行数; axis=0是把行消掉
v1 = heads[0]
vrand = heads[np.random.randint(0, n)]
vmin = np.min(heads)
return v1, vrand, vmin
#计算正面的次数
def total(x):
s = 0
for key,val in enumerate(x):
s += key*val
return s
def hist(n1, nrand, nmin, t):
fig = plt.figure(figsize=(5,8))
fig.clf()
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.subplot(3,1,1)
plt.bar(range(11),n1)
plt.title('C1平均正面比例: %.4f' % (total(n1) / t))
plt.subplot(3,1,2)
plt.bar(range(11),nrand)
plt.title('Crand平均正面比例: %.4f' % (total(nrand) / t))
plt.subplot(3,1,3)
plt.bar(range(11),nmin)
plt.title('Cmin平均正面比例: %.4f' % (total(nmin) / t))
fig.tight_layout()
'''tight_layout紧凑布局,可自定义三个参数
Pad:用于设置绘图区边缘与画布边缘的距离大小
w_pad:用于设置绘图区之间的水平距离的大小
H_pad:用于设置绘图区之间的垂直距离的大小
'''
plt.show()
def main():
'''画直方图
n:硬币数量
m:重复m次实验
t:某一枚被选中的硬币投掷总次数
'''
n = 1000
m = 10000
t = m*10
#记录正面硬币得分0-10的次数,存入一个长度为11的列表,第i个元素表示得分为i的次数
n1 = np.zeros(11)
nrand = n1.copy()
nmin = n1.copy()
for i in range(m):
v1, vrand, vmin = flip_coins(n)
n1[v1] += 1
nrand[vrand] += 1
nmin[vmin] += 1
print(n1)
hist(n1, nrand, nmin, t)
if __name__ == '__main__':
main()
| [
"numpy.sum",
"numpy.random.randint",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.min",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((274, 306), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '[n, 10]'], {}), '(0, 2, [n, 10])\n', (291, 306), True, 'import numpy as np\n'), ((340, 362), 'numpy.sum', 'np.sum', (['result'], {'axis': '(1)'}), '(result, axis=1)\n', (346, 362), True, 'import numpy as np\n'), ((477, 490), 'numpy.min', 'np.min', (['heads'], {}), '(heads)\n', (483, 490), True, 'import numpy as np\n'), ((677, 703), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 8)'}), '(figsize=(5, 8))\n', (687, 703), True, 'import matplotlib.pyplot as plt\n'), ((795, 815), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (806, 815), True, 'import matplotlib.pyplot as plt\n'), ((905, 925), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (916, 925), True, 'import matplotlib.pyplot as plt\n'), ((1022, 1042), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (1033, 1042), True, 'import matplotlib.pyplot as plt\n'), ((1290, 1300), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1298, 1300), True, 'import matplotlib.pyplot as plt\n'), ((1505, 1517), 'numpy.zeros', 'np.zeros', (['(11)'], {}), '(11)\n', (1513, 1517), True, 'import numpy as np\n'), ((440, 463), 'numpy.random.randint', 'np.random.randint', (['(0)', 'n'], {}), '(0, n)\n', (457, 463), True, 'import numpy as np\n')] |
import numpy as np
def compute_error_for_line_given_points(b, m, points):
# initialize error at 0
totalError = 0
# for every point
for i in range(len(points)):
# get x and y value
x = points[i, 0]
y = points[i, 1]
# get the difference and square it
totalError += (y - (m * x + b)) ** 2
# get the average
return totalError / float(len(points))
def gradient_descent_runner(points, starting_b, starting_m, learning_rate, num_of_iterations):
b = starting_b
m = starting_m
for i in range(num_of_iterations):
b, m = step_gradient(b, m, np.array(points), learning_rate)
return [b, m]
def step_gradient(current_b, current_m, points, learning_rates):
b_gradient = 0
m_gradient = 0
N = float(len(points))
for i in range(len(points)):
x = points[i, 0]
y = points[i, 1]
b_gradient += -(2/N) * (y - ((current_m * x) + current_b))
m_gradient += -(2/N) * x * (y - ((current_m * x) + current_b))
new_b = current_b - (learning_rates * b_gradient)
new_m = current_m - (learning_rates * m_gradient)
return [new_b, new_m]
def run():
# collect our data
points = np.genfromtxt('data.csv', delimiter=',')
# define our hyperparameters
learning_rate = 0.0001
# slope : y = mx+b
initial_b = 0
initial_m = 0
num_of_iterations = 1000
print(f"starting gradient descent at b = {initial_b}, m = {initial_m}, "
f"error = {compute_error_for_line_given_points(initial_b, initial_m, points)}")
[b, m] = gradient_descent_runner(points, initial_b, initial_m, learning_rate, num_of_iterations)
print(f"After {num_of_iterations } b = {b}, m = {m}, error = {compute_error_for_line_given_points(b, m, points)}")
if __name__ == "__main__":
run()
| [
"numpy.array",
"numpy.genfromtxt"
] | [((1214, 1254), 'numpy.genfromtxt', 'np.genfromtxt', (['"""data.csv"""'], {'delimiter': '""","""'}), "('data.csv', delimiter=',')\n", (1227, 1254), True, 'import numpy as np\n'), ((622, 638), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (630, 638), True, 'import numpy as np\n')] |
import json
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
import numpy as np
import nltk
from nltk.stem.lancaster import LancasterStemmer
stemmer = LancasterStemmer()
oov_tok = "<OOV>"
tokenizer = Tokenizer()
input_data= "hELLO! \n good morning\n good bye!\n what's your name? \n what's your age?"
corpus = input_data.lower().split('\n')
print(f"corpus: {corpus}")
tokenizer.fit_on_texts(corpus)
# for key in tokenizer.word_index:
# tokenizer.word_index[stemmer.stem(key)] = tokenizer.word_index.pop(key)
# total_words = len(tokenizer.word_index) + 1
# print(f"word_index:{tokenizer.word_index}")
# print(f"total words: {total_words}")
label_data = "greeting\n greeting \n bye \n general \n general \n"
label_corpus = label_data.lower().split('\n')
print(f"label corpus: {label_corpus}")
# tokenizer_label = Tokenizer()
tokenizer.fit_on_texts(label_corpus)
total_label_words = len(tokenizer.word_index) + 1
# print(f"label_word_index:{tokenizer.word_index}")
# print(f"total label words: {total_label_words}")
total_words = len(tokenizer.word_index) + 1
print(f"word_index:{tokenizer.word_index}")
print(f"total words: {total_words}")
input_sequences = []
for line in corpus:
token_list = tokenizer.texts_to_sequences([line])[0]
input_sequences.append(token_list)
max_sequence_len = max(len(x) for x in input_sequences)
print(f"input_sequences:{input_sequences}\nmax_sequence_len: {max_sequence_len}")
# input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='post'))
print(f"padded input_sequences: \n{input_sequences}") # shape: 3 * 5
label_sequences = []
for line in label_corpus:
if line == '':
continue
token_list = tokenizer.texts_to_sequences([line])[0]
label_sequences.append(token_list)
label_max_sequence_len = max([len(x) for x in label_sequences])
print(f"label_sequences:{label_sequences}\nlabel_max_sequence_len: {label_max_sequence_len}")
input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='post'))
label_sequences = np.array(pad_sequences(label_sequences, maxlen=label_max_sequence_len, padding='post'))
xs = input_sequences
print(f"xs: \n{input_sequences}\nlabels: \n{label_sequences}")
ys = tf.keras.utils.to_categorical(label_sequences, num_classes=total_label_words)
print(f"ys: \n{ys }")
model = Sequential()
model.add(Embedding(total_words, 64, input_length=max_sequence_len))
# model.add(Bidirectional(LSTM(20))) # LSTM(150)
model.add(LSTM(20))
model.add(Dense(total_words, activation='softmax'))
adam = Adam(lr=0.01) # learning rate
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
history = model.fit(xs, ys, epochs=500, verbose=1)
import matplotlib.pyplot as plt
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.show()
plot_graphs(history, 'accuracy')
# prediction
seed_text = "What can I call your name?"
token_list = tokenizer.texts_to_sequences([seed_text])[0]
token_list = pad_sequences([token_list], maxlen=max_sequence_len, padding='post')
predicted = model.predict_classes(token_list, verbose=0)
prob = model.predict_proba(token_list)
print(f"predicted: {predicted}")
print(f"prob: {prob * 100}")
print(f"max index of prob is {np.argmax(prob)}")
dict = dict([(value, key) for (key, value) in tokenizer.word_index.items()])
print(f"tag: {dict[predicted[0]]}")
# e = model.layers[0]
# weights = e.get_weights()[0]
# print(weights.shape) # shape: (vocab_size, embedding_dim)
# (vocab_size, embedding_dim) = weights.shape
#
# import io
#
# out_v = io.open('vecs.tsv', 'w', encoding='utf-8')
# out_m = io.open('meta.tsv', 'w', encoding='utf-8')
# for word_num in range(1, vocab_size):
# word = dict[word_num]
# embeddings = weights[word_num]
# out_m.write(word + "\n")
# out_v.write('\t'.join([str(x) for x in embeddings]) + '\n')
# out_v.close()
# out_m.close()
#
# try:
# from google.colab import files
# except ImportError:
# pass
# else:
# files.download('vecs.tsv')
# files.download('meta.tsv')
pass | [
"tensorflow.keras.utils.to_categorical",
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"nltk.stem.lancaster.LancasterStemmer",
"numpy.argmax",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Emb... | [((417, 435), 'nltk.stem.lancaster.LancasterStemmer', 'LancasterStemmer', ([], {}), '()\n', (433, 435), False, 'from nltk.stem.lancaster import LancasterStemmer\n'), ((467, 478), 'tensorflow.keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {}), '()\n', (476, 478), False, 'from tensorflow.keras.preprocessing.text import Tokenizer\n'), ((2503, 2580), 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['label_sequences'], {'num_classes': 'total_label_words'}), '(label_sequences, num_classes=total_label_words)\n', (2532, 2580), True, 'import tensorflow as tf\n'), ((2612, 2624), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2622, 2624), False, 'from tensorflow.keras.models import Sequential\n'), ((2823, 2836), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.01)'}), '(lr=0.01)\n', (2827, 2836), False, 'from tensorflow.keras.optimizers import Adam\n'), ((3320, 3388), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['[token_list]'], {'maxlen': 'max_sequence_len', 'padding': '"""post"""'}), "([token_list], maxlen=max_sequence_len, padding='post')\n", (3333, 3388), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((2233, 2304), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['input_sequences'], {'maxlen': 'max_sequence_len', 'padding': '"""post"""'}), "(input_sequences, maxlen=max_sequence_len, padding='post')\n", (2246, 2304), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((2333, 2410), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['label_sequences'], {'maxlen': 'label_max_sequence_len', 'padding': '"""post"""'}), "(label_sequences, maxlen=label_max_sequence_len, padding='post')\n", (2346, 2410), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((2635, 2692), 'tensorflow.keras.layers.Embedding', 'Embedding', (['total_words', '(64)'], {'input_length': 'max_sequence_len'}), '(total_words, 64, input_length=max_sequence_len)\n', (2644, 2692), False, 'from tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional\n'), ((2754, 2762), 'tensorflow.keras.layers.LSTM', 'LSTM', (['(20)'], {}), '(20)\n', (2758, 2762), False, 'from tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional\n'), ((2774, 2814), 'tensorflow.keras.layers.Dense', 'Dense', (['total_words'], {'activation': '"""softmax"""'}), "(total_words, activation='softmax')\n", (2779, 2814), False, 'from tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional\n'), ((3063, 3096), 'matplotlib.pyplot.plot', 'plt.plot', (['history.history[string]'], {}), '(history.history[string])\n', (3071, 3096), True, 'import matplotlib.pyplot as plt\n'), ((3101, 3121), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (3111, 3121), True, 'import matplotlib.pyplot as plt\n'), ((3126, 3144), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['string'], {}), '(string)\n', (3136, 3144), True, 'import matplotlib.pyplot as plt\n'), ((3149, 3159), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3157, 3159), True, 'import matplotlib.pyplot as plt\n'), ((3577, 3592), 'numpy.argmax', 'np.argmax', (['prob'], {}), '(prob)\n', (3586, 3592), True, 'import numpy as np\n')] |
import numpy as np
import torch
from .base import Sampler
from ...utils.types import unpack_tensor_tuple, pack_tensor_in_list
__all__ = ["DataLoaderSampler", "DataSetSampler"]
class _ToDeviceSampler(Sampler):
"""A sampler that can move data between devices and data types"""
def __init__(self, device, dtype, return_hook=lambda x: x, **kwargs):
super().__init__(return_hook=return_hook, **kwargs)
# context dummy tensor to store the device and data type;
# by specifying this here, the user can decide
# if he/she wants to store the data on the gpu or cpu.
self.register_buffer("ctx", torch.tensor([], device=device, dtype=dtype))
def sample(self, *args, **kwargs):
samples = super().sample(*args, **kwargs)
samples = pack_tensor_in_list(samples)
samples = [s.to(self.ctx) for s in samples]
return unpack_tensor_tuple(samples)
class DataLoaderSampler(_ToDeviceSampler):
"""A torch.DataLoader instance wrapped as a sampler.
Parameters
----------
dataloader : torch.utils.data.DataLoader
The data loader instance.
device : torch.device.device
The device on which the sampled tensors should live.
dtype : torch.dtype
Data type of the sampled tensors.
Notes
-----
Only implemented for dataloader.batch_size == n_samples
"""
def __init__(self, dataloader, device, dtype, return_hook=lambda x: x):
super().__init__(device=device, dtype=dtype, return_hook=return_hook)
self._dataloader = dataloader
self._iterator = iter(self._dataloader)
def _sample(self, n_samples, *args, **kwargs):
if n_samples != self._dataloader.batch_size:
raise ValueError("DataLoaderSampler only implemented for batch_size == n_samples")
samples = next(self._iterator)
return unpack_tensor_tuple(samples)
class DataSetSampler(_ToDeviceSampler, torch.utils.data.Dataset):
"""Sample from data.
Parameters
----------
*data : torch.Tensor
Potentially multiple torch tensors of the same length.
shuffle : bool
Whether the data should be accessed in random order.
device : torch.device.device
The device that the sampled tensors should live on.
dtype : torch.dtype
Data type of the sampled tensors.
Attributes
----------
data : list[torch.Tensor]
The data set from which to draw samples.
"""
def __init__(self, *data: torch.Tensor, shuffle=True, device=None, dtype=None, return_hook=lambda x: x):
device = data[0].device if device is None else device
dtype = data[0].dtype if dtype is None else dtype
super().__init__(device=device, dtype=dtype, return_hook=return_hook)
if not all(len(d) == len(data[0]) for d in data):
raise ValueError("All data items must have the same length.")
self.data = pack_tensor_in_list(data)
self._current_index = 0
self._shuffle = shuffle
if shuffle:
self._idxs = np.random.permutation(len(data[0]))
else:
self._idxs = np.arange(len(data[0]))
def __len__(self):
return len(self._idxs)
def __getitem__(self, idx):
return tuple(d[idx] for d in self.data)
def _sample(self, n_samples: int, *args, **kwargs):
samples = [torch.tensor([]).to(self.data[0]) for _ in self.data]
if self._current_index + n_samples < len(self.data[0]):
idxs = self._idxs[self._current_index:self._current_index + n_samples]
self._current_index += n_samples
for i in range(len(self.data)):
samples[i] = torch.cat([samples[i], self.data[i][idxs]], dim=0)
else:
idxs = self._idxs[self._current_index:]
for i in range(len(self.data)):
samples[i] = torch.cat([samples[i], self.data[i][idxs]], dim=0)
# reset
if self._shuffle:
np.random.shuffle(self._idxs)
self._current_index = 0
# recurse
remaining = self._sample(n_samples - len(samples[0]))
remaining = pack_tensor_in_list(remaining)
for i, other in enumerate(remaining):
samples[i] = torch.cat([samples[i], other], dim=0)
return unpack_tensor_tuple(samples)
def reshuffle_(self):
"""Shuffle the dataset randomly in-place."""
self._idxs = np.random.permutation(len(self.data[0]))
self._current_index = 0
return self
def resize_(self, new_size):
"""Resize the data set to `new_size` and reinitialize the randomization.
- When resizing to a bigger size, samples from the set are randomly repeated.
- When resizing to a smaller size, samples from the set are randomly deleted.
Returns
-------
indices : np.ndarray
The indices used for reshuffling.
Notes
-----
This is an in-place operation.
"""
if new_size != len(self):
indices = np.random.randint(low=0, high=len(self), size=new_size)
for i in range(len(self.data)):
self.data[i] = self.data[i][indices]
self._idxs = np.random.permutation(new_size)
self._current_index = 0
return indices
else:
return np.arange(len(self))
| [
"torch.tensor",
"numpy.random.shuffle",
"torch.cat",
"numpy.random.permutation"
] | [((638, 682), 'torch.tensor', 'torch.tensor', (['[]'], {'device': 'device', 'dtype': 'dtype'}), '([], device=device, dtype=dtype)\n', (650, 682), False, 'import torch\n'), ((5275, 5306), 'numpy.random.permutation', 'np.random.permutation', (['new_size'], {}), '(new_size)\n', (5296, 5306), True, 'import numpy as np\n'), ((3695, 3745), 'torch.cat', 'torch.cat', (['[samples[i], self.data[i][idxs]]'], {'dim': '(0)'}), '([samples[i], self.data[i][idxs]], dim=0)\n', (3704, 3745), False, 'import torch\n'), ((3885, 3935), 'torch.cat', 'torch.cat', (['[samples[i], self.data[i][idxs]]'], {'dim': '(0)'}), '([samples[i], self.data[i][idxs]], dim=0)\n', (3894, 3935), False, 'import torch\n'), ((4002, 4031), 'numpy.random.shuffle', 'np.random.shuffle', (['self._idxs'], {}), '(self._idxs)\n', (4019, 4031), True, 'import numpy as np\n'), ((4290, 4327), 'torch.cat', 'torch.cat', (['[samples[i], other]'], {'dim': '(0)'}), '([samples[i], other], dim=0)\n', (4299, 4327), False, 'import torch\n'), ((3376, 3392), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (3388, 3392), False, 'import torch\n')] |
import gdal
from pywps import FORMATS, UOM
from pywps.app import Process
from pywps.inout import LiteralOutput
from .process_defaults import process_defaults, LiteralInputD, ComplexInputD
from pywps.app.Common import Metadata
from pywps.response.execute import ExecuteResponse
from gdalos.calc import get_pixel_from_raster
from processes import process_helper
import numpy as np
# from pywps.inout.literaltypes import LITERAL_DATA_TYPES
class RasterValue(Process):
def __init__(self):
process_id = 'ras_val'
defaults = process_defaults(process_id)
inputs = [
ComplexInputD(defaults, 'r', 'input_raster', supported_formats=[FORMATS.GEOTIFF]),
LiteralInputD(defaults, 'bi', 'band_index', data_type='positiveInteger', min_occurs=0, max_occurs=1, default=1),
LiteralInputD(defaults, 'x', 'x or longitude or pixel', data_type='float', min_occurs=1, uoms=[UOM('metre')]),
LiteralInputD(defaults, 'y', 'y or latitude or line', data_type='float', min_occurs=1, uoms=[UOM('metre')]),
LiteralInputD(defaults, 'c', 'coordinate kind: ll/xy/pl', data_type='string', min_occurs=1, max_occurs=1, default='ll'),
LiteralInputD(defaults, 'interpolate', 'interpolate ', data_type='boolean', min_occurs=1, max_occurs=1, default=True),
]
outputs = [LiteralOutput('v', 'raster value at the requested coordinate as float', data_type='float'),
LiteralOutput('output', 'raster value at the requested coordinate (as string)', data_type='string')]
super().__init__(
self._handler,
identifier=process_id,
version='1.0.0',
title='raster values',
abstract='get raster values at given coordinates',
profile='',
metadata=[Metadata('raster')],
inputs=inputs,
outputs=outputs,
store_supported=True,
status_supported=True
)
def _handler(self, request, response: ExecuteResponse):
raster_filename, ds = process_helper.open_ds_from_wps_input(request.inputs['r'][0])
band: gdal.Band = ds.GetRasterBand(request.inputs['bi'][0].data)
if band is None:
raise Exception('band number out of range')
c = request.inputs['c'][0].data.lower() if 'c' in request.inputs else None
if c is None:
srs = None
else:
if c == 'pl':
srs = None
elif c == 'xy':
srs = False
elif c == 'll':
srs = True
else:
raise Exception('Unknown xy format {}'.format(c))
x = process_helper.get_input_data_array(request.inputs['x'])
y = process_helper.get_input_data_array(request.inputs['y'])
if len(x) != len(y):
raise Exception('length(x)={} is different from length(y)={}'.format(len(x), len(y)))
if len(x) == 1:
value = get_pixel_from_raster.get_pixel_from_raster(ds, x[0], y[0], srs)
response.outputs['v'].output_format = FORMATS.TEXT
response.outputs['v'].data = value
response.outputs['output'].output_format = FORMATS.TEXT
response.outputs['output'].data = str(value)
elif len(x) > 1:
points = np.dstack((x, y))[0]
values = get_pixel_from_raster.get_pixel_from_raster_multi(ds, points, srs)
if values:
response.outputs['v'].output_format = FORMATS.TEXT
response.outputs['v'].data = values
response.outputs['output'].output_format = FORMATS.TEXT
response.outputs['output'].data = str(values)
del ds
return response
| [
"numpy.dstack",
"pywps.app.Common.Metadata",
"gdalos.calc.get_pixel_from_raster.get_pixel_from_raster_multi",
"pywps.inout.LiteralOutput",
"pywps.UOM",
"gdalos.calc.get_pixel_from_raster.get_pixel_from_raster",
"processes.process_helper.open_ds_from_wps_input",
"processes.process_helper.get_input_data... | [((2070, 2131), 'processes.process_helper.open_ds_from_wps_input', 'process_helper.open_ds_from_wps_input', (["request.inputs['r'][0]"], {}), "(request.inputs['r'][0])\n", (2107, 2131), False, 'from processes import process_helper\n'), ((2691, 2747), 'processes.process_helper.get_input_data_array', 'process_helper.get_input_data_array', (["request.inputs['x']"], {}), "(request.inputs['x'])\n", (2726, 2747), False, 'from processes import process_helper\n'), ((2760, 2816), 'processes.process_helper.get_input_data_array', 'process_helper.get_input_data_array', (["request.inputs['y']"], {}), "(request.inputs['y'])\n", (2795, 2816), False, 'from processes import process_helper\n'), ((1350, 1444), 'pywps.inout.LiteralOutput', 'LiteralOutput', (['"""v"""', '"""raster value at the requested coordinate as float"""'], {'data_type': '"""float"""'}), "('v', 'raster value at the requested coordinate as float',\n data_type='float')\n", (1363, 1444), False, 'from pywps.inout import LiteralOutput\n'), ((1461, 1564), 'pywps.inout.LiteralOutput', 'LiteralOutput', (['"""output"""', '"""raster value at the requested coordinate (as string)"""'], {'data_type': '"""string"""'}), "('output',\n 'raster value at the requested coordinate (as string)', data_type='string')\n", (1474, 1564), False, 'from pywps.inout import LiteralOutput\n'), ((2990, 3054), 'gdalos.calc.get_pixel_from_raster.get_pixel_from_raster', 'get_pixel_from_raster.get_pixel_from_raster', (['ds', 'x[0]', 'y[0]', 'srs'], {}), '(ds, x[0], y[0], srs)\n', (3033, 3054), False, 'from gdalos.calc import get_pixel_from_raster\n'), ((3378, 3444), 'gdalos.calc.get_pixel_from_raster.get_pixel_from_raster_multi', 'get_pixel_from_raster.get_pixel_from_raster_multi', (['ds', 'points', 'srs'], {}), '(ds, points, srs)\n', (3427, 3444), False, 'from gdalos.calc import get_pixel_from_raster\n'), ((1824, 1842), 'pywps.app.Common.Metadata', 'Metadata', (['"""raster"""'], {}), "('raster')\n", (1832, 1842), False, 'from pywps.app.Common import Metadata\n'), ((3336, 3353), 'numpy.dstack', 'np.dstack', (['(x, y)'], {}), '((x, y))\n', (3345, 3353), True, 'import numpy as np\n'), ((919, 931), 'pywps.UOM', 'UOM', (['"""metre"""'], {}), "('metre')\n", (922, 931), False, 'from pywps import FORMATS, UOM\n'), ((1040, 1052), 'pywps.UOM', 'UOM', (['"""metre"""'], {}), "('metre')\n", (1043, 1052), False, 'from pywps import FORMATS, UOM\n')] |
import pytest
import numpy as np
from fri.model.ordinal_regression import ordinal_scores as score
@pytest.fixture()
def data():
y = np.array([0, 1, 2, 3])
return y
@pytest.fixture()
def imb_data():
y = np.array([0, 1, 2, 2, 2, 2])
return y
def reverse_label(y):
y = np.copy(y)
return np.flip(y[:], axis=0)
def swap_first_last(y):
y = np.copy(y)
y[[0, -1]] = y[[-1, 0]]
return y
@pytest.mark.parametrize("error", ["mze", "mae", "mmae"])
def test_ordinal_score(error, data):
score_perfect = score(data, data, error_type=error)
score_mixed = score(data, swap_first_last(data), error_type=error)
score_worst = score(data, reverse_label(data), error_type=error)
assert score_perfect > score_mixed
assert score_mixed > score_worst
assert score_perfect == 1
def test_score_imbalanced(data, imb_data):
score_mae = score(data, swap_first_last(data), error_type="mae")
score_mmae = score(data, swap_first_last(data), error_type="mmae")
assert score_mae == score_mmae
score_mae = score(imb_data, swap_first_last(imb_data), error_type="mae")
score_mmae = score(imb_data, swap_first_last(imb_data), error_type="mmae")
assert score_mae != score_mmae
| [
"numpy.flip",
"numpy.copy",
"fri.model.ordinal_regression.ordinal_scores",
"pytest.mark.parametrize",
"numpy.array",
"pytest.fixture"
] | [((102, 118), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (116, 118), False, 'import pytest\n'), ((178, 194), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (192, 194), False, 'import pytest\n'), ((426, 482), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""error"""', "['mze', 'mae', 'mmae']"], {}), "('error', ['mze', 'mae', 'mmae'])\n", (449, 482), False, 'import pytest\n'), ((139, 161), 'numpy.array', 'np.array', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (147, 161), True, 'import numpy as np\n'), ((219, 247), 'numpy.array', 'np.array', (['[0, 1, 2, 2, 2, 2]'], {}), '([0, 1, 2, 2, 2, 2])\n', (227, 247), True, 'import numpy as np\n'), ((293, 303), 'numpy.copy', 'np.copy', (['y'], {}), '(y)\n', (300, 303), True, 'import numpy as np\n'), ((315, 336), 'numpy.flip', 'np.flip', (['y[:]'], {'axis': '(0)'}), '(y[:], axis=0)\n', (322, 336), True, 'import numpy as np\n'), ((371, 381), 'numpy.copy', 'np.copy', (['y'], {}), '(y)\n', (378, 381), True, 'import numpy as np\n'), ((540, 575), 'fri.model.ordinal_regression.ordinal_scores', 'score', (['data', 'data'], {'error_type': 'error'}), '(data, data, error_type=error)\n', (545, 575), True, 'from fri.model.ordinal_regression import ordinal_scores as score\n')] |
from typing import List
from PIL import Image, ImageSequence
from math import cos, radians
from traceback import print_exc
import sys
import numpy as np
def rainbow_angle(number: float) -> float:
"""Sinusoidal function."""
return abs(cos(radians(number)))
def rainbow_list(image: Image.Image) -> List[Image.Image]:
"""Make List of rainbow image."""
frames = ImageSequence.all_frames(image)
if len(frames) > 1:
np_frames = []
for frame in frames:
np_frame = np.array(frame.convert("RGBA"))
np_frames.append((np_frame, frame.info))
else:
np_image = np.array(image.convert("RGBA"))
np_frames = [(np.copy(np_image), image.info) for _ in range(30)]
colored_frames = []
for i, (np_frame, info) in enumerate(np_frames):
change_color = i * 180 / len(np_frames)
np_frame[..., 0] = np_frame[..., 0] * rainbow_angle(change_color + 45)
np_frame[..., 1] = np_frame[..., 1] * rainbow_angle(change_color + 90)
np_frame[..., 2] = np_frame[..., 2] * rainbow_angle(change_color)
np_frame[..., 3] = (np_frame[..., 3] > 130) * 255
img = Image.fromarray(np_frame)
img.info = info
colored_frames.append(img)
return colored_frames
def rainbow(path: str, dest: str) -> None:
"""Make rainbow image of image at `path`."""
image = Image.open(path)
frames = rainbow_list(image)
frames[0].save(
dest,
background=frames[0].info.get("background", 255),
fromat='GIF',
version="GIF89a",
save_all=True,
append_images=frames[1:],
optimize=False,
duration=[frame.info.get("duration", 40) for frame in frames],
loop=image.info.get("loop", 0),
transparency=255,
palette=Image.WEB,
disposal=[frame.info.get("disposal", 1) for frame in frames],
comment="Made by Dashstrom"
)
if __name__ == "__main__":
path_number = len(sys.argv) - 1
for progress, path in enumerate(sys.argv[1:], start=1):
print(f"\rConvert {path} ({progress}/{path_number})")
try:
rainbow(path, path + ".rainbow.gif")
except Exception:
print_exc()
| [
"numpy.copy",
"PIL.Image.fromarray",
"PIL.Image.open",
"math.radians",
"PIL.ImageSequence.all_frames",
"traceback.print_exc"
] | [((395, 426), 'PIL.ImageSequence.all_frames', 'ImageSequence.all_frames', (['image'], {}), '(image)\n', (419, 426), False, 'from PIL import Image, ImageSequence\n'), ((1417, 1433), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (1427, 1433), False, 'from PIL import Image, ImageSequence\n'), ((1192, 1217), 'PIL.Image.fromarray', 'Image.fromarray', (['np_frame'], {}), '(np_frame)\n', (1207, 1217), False, 'from PIL import Image, ImageSequence\n'), ((260, 275), 'math.radians', 'radians', (['number'], {}), '(number)\n', (267, 275), False, 'from math import cos, radians\n'), ((702, 719), 'numpy.copy', 'np.copy', (['np_image'], {}), '(np_image)\n', (709, 719), True, 'import numpy as np\n'), ((2277, 2288), 'traceback.print_exc', 'print_exc', ([], {}), '()\n', (2286, 2288), False, 'from traceback import print_exc\n')] |
"""Author: WKirgsn, 2018
Hands-On Training with Self-Normalizing Neural Networks"""
import warnings
warnings.filterwarnings("ignore")
import preprocessing.config as cfg
import numpy as np
import gc
from preprocessing import select_gpu # choose GPU through import
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from tensorflow import set_random_seed
from preprocessing.data import LightDataManager
import preprocessing.file_utils as futils
from preprocessing.snn_model_utils import SNNKerasRegressor, build_snn_model
# Ensure reproducibility
SEED = cfg.data_cfg['random_seed']
np.random.seed(SEED)
set_random_seed(SEED)
def main():
# config
if cfg.debug_cfg['DEBUG']:
print('## DEBUG MODE ON ##')
dm = LightDataManager(cfg.data_cfg['file_path'])
dm.featurize()
x_train = dm.tra_df[dm.x_cols]
y_train = dm.tra_df[dm.y_cols]
x_val = dm.val_df[dm.x_cols]
y_val = dm.val_df[dm.y_cols]
x_tst = dm.tst_df[dm.x_cols]
y_tst = dm.tst_df[dm.y_cols]
gc.collect()
callbacks = [
EarlyStopping(monitor='val_loss',
min_delta=1e-3,
patience=5,
verbose=1),
ReduceLROnPlateau(monitor='loss',
patience=2), ]
# start trials
trial_reports = futils.TrialReports(SEED)
KerasRegressor_config = {'x_shape': (1, len(dm.x_cols)),
'verbose': 1,
}
# add configs from config file (these must match with args of build_fn)
init_cfg = cfg.keras_cfg['snn_params'].copy()
init_cfg.pop('batch_size', None)
KerasRegressor_config.update(init_cfg)
fit_cfg = {'X': x_train, 'y': y_train,
'validation_data': (x_val, y_val),
'callbacks': callbacks,
}
for result in trial_reports.conduct(cfg.keras_cfg['n_trials']):
# create model
model = SNNKerasRegressor(build_fn=build_snn_model,
**KerasRegressor_config)
result.history = model.fit(**fit_cfg)
result.model = model
result.yhat_te = model.predict(x_tst)
result.yhat_te = dm.inverse_transform(result.yhat_te)
result.actual = dm.inverse_transform(y_tst)
result.score, _ = dm.score(result.actual, result.yhat_te)
if __name__ == '__main__':
main()
| [
"preprocessing.file_utils.TrialReports",
"keras.callbacks.ReduceLROnPlateau",
"numpy.random.seed",
"gc.collect",
"keras.callbacks.EarlyStopping",
"preprocessing.data.LightDataManager",
"preprocessing.snn_model_utils.SNNKerasRegressor",
"tensorflow.set_random_seed",
"warnings.filterwarnings"
] | [((101, 134), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (124, 134), False, 'import warnings\n'), ((594, 614), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (608, 614), True, 'import numpy as np\n'), ((615, 636), 'tensorflow.set_random_seed', 'set_random_seed', (['SEED'], {}), '(SEED)\n', (630, 636), False, 'from tensorflow import set_random_seed\n'), ((742, 785), 'preprocessing.data.LightDataManager', 'LightDataManager', (["cfg.data_cfg['file_path']"], {}), "(cfg.data_cfg['file_path'])\n", (758, 785), False, 'from preprocessing.data import LightDataManager\n'), ((1013, 1025), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1023, 1025), False, 'import gc\n'), ((1317, 1342), 'preprocessing.file_utils.TrialReports', 'futils.TrialReports', (['SEED'], {}), '(SEED)\n', (1336, 1342), True, 'import preprocessing.file_utils as futils\n'), ((1053, 1126), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0.001)', 'patience': '(5)', 'verbose': '(1)'}), "(monitor='val_loss', min_delta=0.001, patience=5, verbose=1)\n", (1066, 1126), False, 'from keras.callbacks import EarlyStopping, ReduceLROnPlateau\n'), ((1201, 1246), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""loss"""', 'patience': '(2)'}), "(monitor='loss', patience=2)\n", (1218, 1246), False, 'from keras.callbacks import EarlyStopping, ReduceLROnPlateau\n'), ((1942, 2010), 'preprocessing.snn_model_utils.SNNKerasRegressor', 'SNNKerasRegressor', ([], {'build_fn': 'build_snn_model'}), '(build_fn=build_snn_model, **KerasRegressor_config)\n', (1959, 2010), False, 'from preprocessing.snn_model_utils import SNNKerasRegressor, build_snn_model\n')] |
# MIT License
# Copyright (c) 2018 the NJUNLP groups.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Author baoyu.nlp
# Time 2019-01-23 09:30
import numpy as np
import torch
_FLOAT32_INF = np.float32(np.finfo('float32').max / 10)
def sequence_mask(length_array, max_len=-1, cuda=False):
max_len = max_len if max_len == -1 else length_array[0]
batch_size = len(length_array)
mask = np.ones((batch_size, max_len), dtype=np.uint8)
for i, seq_len in enumerate(length_array):
mask[i][:seq_len] = 0
mask = torch.ByteTensor(mask)
return mask.cuda() if cuda else mask
def distance_mask(distance, pad=0):
"""
Args:
distance: syntax distance with [batch,max_len], just could see which value is lower than me.
Return:
value matrix: [batch,max_len,]
"""
pass
def mask_scores(scores, beam_mask, EOS=2):
"""
Mask scores of next step according to beam mask.
Args:
scores (torch.Tensor): Scores of next tokens with shape [batch_size, beam_size, vocab_size].
Smaller should be better (usually negative log-probs).
beam_mask (torch.Tensor): Mask of beam. 1.0 means not closed and vice verse. The shape is
[batch_size, beam_size]
Returns:
Masked scores of next tokens.
"""
vocab_size = scores.size(-1)
finished_row = beam_mask.new(vocab_size, ).zero_() + float(_FLOAT32_INF)
# If beam finished, only PAD could be generated afterwards.
finished_row[EOS] = 0.0
scores = scores * beam_mask.unsqueeze(2) + torch.matmul((1.0 - beam_mask).unsqueeze(2), finished_row.unsqueeze(0))
return scores
def relative_relation_mask(pos_pred, pos_ref, pos_mask=None):
"""
Args:
pos_pred: [batch,seq_len]
pos_ref: [batch,seq_len]
pos_mask: [batch,seq_len]
Returns:
"""
batch_size, seq_len = pos_mask.size()
pos_mask = pos_mask.long()
mask_tensor = pos_mask.view(batch_size, seq_len, 1) * pos_mask.view(batch_size, 1, seq_len)
# [batch,seq_len,seq_len]
pred_relative = pos_pred.unsqueeze(-2) - pos_pred.unsqueeze(-1)
ref_relative = pos_ref.unsqueeze(-2) - pos_ref.unsqueeze(-1)
return pred_relative, ref_relative, mask_tensor
| [
"numpy.finfo",
"torch.ByteTensor",
"numpy.ones"
] | [((1412, 1458), 'numpy.ones', 'np.ones', (['(batch_size, max_len)'], {'dtype': 'np.uint8'}), '((batch_size, max_len), dtype=np.uint8)\n', (1419, 1458), True, 'import numpy as np\n'), ((1548, 1570), 'torch.ByteTensor', 'torch.ByteTensor', (['mask'], {}), '(mask)\n', (1564, 1570), False, 'import torch\n'), ((1216, 1235), 'numpy.finfo', 'np.finfo', (['"""float32"""'], {}), "('float32')\n", (1224, 1235), True, 'import numpy as np\n')] |
"""
The module is used for working with synchronous data flow graphs.
It implements a sequential execution algorithm.
"""
from typing import Tuple, Dict, List, Any, NamedTuple, Deque, Callable
from abc import ABC, abstractmethod
from functools import reduce
import logging
from sympy import Matrix, lcm, gcd, fraction # pylint: disable=import-error
import numpy as np # pylint: disable=import-error
InputPort = NamedTuple('InputPort', [
('type', type), ('consumption', int)
])
OutputPort = NamedTuple('OutputPort', [
('type', type), ('production', int)
])
class Agent(ABC):
"""All SDF agents should preferably derive from this class"""
@property
@abstractmethod
def inputs(self) -> Dict[str, InputPort]:
"""Input ports of the agent"""
@property
@abstractmethod
def outputs(self) -> Dict[str, OutputPort]:
"""Output ports of the agent"""
@abstractmethod
def calculate(self, input_tokens):
"""The calculation method of the agent"""
PortLabel = str
AgentLabel = str
Dst = NamedTuple('Dst', [
('agent', AgentLabel), ('port', PortLabel)
])
Src = NamedTuple('Src', [
('agent', AgentLabel), ('port', PortLabel)
])
Buffer = NamedTuple('Buffer', [
('src', Src), ('dst', Dst), ('tokens', Deque[Any])
])
Results = NamedTuple('Results', [
('tokens', Dict[Dst, List[Any]])
])
Agents = Dict[str, Agent]
Buffers = List[Buffer]
Graph = Tuple[Agents, Buffers]
class Gain(Agent):
"""An SDF agents which multiplies its input"""
def __init__(self, gain):
self._gain = gain
@property
def inputs(self) -> Dict[str, InputPort]:
return {'u': InputPort(float, 1)}
@property
def outputs(self) -> Dict[str, OutputPort]:
return {'y': OutputPort(float, 1)}
def calculate(self, input_tokens):
return {'y': [input_tokens['u'][0] * self._gain]}
TopologyMatrix = Tuple[List[AgentLabel], List[List[int]]]
Schedule = List[AgentLabel]
Termination = Callable[[Graph, Results], bool]
def get_src_buffers(
agent_name: AgentLabel,
buffers: Buffers
) -> Buffers:
""" List all sources for the actor """
return [buffer for buffer in buffers
if buffer.dst.agent == agent_name]
def get_dst_buffers(
agent_name: AgentLabel,
buffers: Buffers
) -> Buffers:
""" List all destinations for the actor """
return [buffer for buffer in buffers
if buffer.src.agent == agent_name]
def can_fire(
agent_name: AgentLabel,
sdf_graph: Graph,
modified_tokens
) -> bool:
""" Check whether an agent in an SDF graph can fire. """
agents, buffers = sdf_graph
agent = agents[agent_name]
return all(agent.inputs[dst.port].consumption <= modified_tokens[src, dst]
for src, dst, _ in buffers
if dst.agent == agent_name)
def _validate_connections(sdf_graph):
agents, buffers = sdf_graph
for src, dst, _ in buffers:
if dst.agent not in agents:
logging.error(f'''
{src}-{dst} has no valid destination of a connection!
''')
return False
if src.agent not in agents:
logging.error(f'''
{src}-{dst} has no valid source of a connection!
''')
return False
return True
def validate_graph(sdf_graph: Graph) -> bool:
""" It validates a synchronous data flow graph """
validators = (
_validate_connections, calculate_schedule
)
for validate in validators:
if not validate(sdf_graph):
return False
return True
def calculate_topology_matrix(sdf_graph: Graph) -> TopologyMatrix:
""" The topology matrix """
agents, buffers = sdf_graph
kernel_labels = list(agents.keys())
agent_indices = {agent: aidx
for aidx, agent in enumerate(kernel_labels)}
values = [[0 for _ in kernel_labels] for _ in buffers]
for bidx, buffer in enumerate(buffers):
src, dst, _ = buffer
srcidx = agent_indices[src.agent]
dstidx = agent_indices[dst.agent]
src_port = agents[src.agent].outputs[src.port]
dst_port = agents[dst.agent].inputs[dst.port]
values[bidx][srcidx] = src_port.production
values[bidx][dstidx] = -dst_port.consumption
return (kernel_labels, values)
def is_consistent(
topology_matrix: TopologyMatrix
) -> bool:
""" Checks whether an SDF graph has consistent firing rates """
_, matrix = topology_matrix
num_agents = len(matrix[0])
return np.linalg.matrix_rank(matrix) == num_agents - 1
def repetition_vector(
topology_matrix: TopologyMatrix
) -> List[int]:
""" Find a repetition vector for as SDF graph """
_, top_matrix = topology_matrix
matrix = Matrix(top_matrix)
fractional = list(map(fraction, matrix.nullspace()[0]))
denominators = [f[1] for f in fractional]
lcm_null = reduce(lcm, denominators)
integer = list(n * lcm_null / d for n, d in fractional)
gcd_integer = reduce(gcd, integer)
return [x / gcd_integer for x in integer]
def calculate_schedule(sdf_graph: Graph) -> Schedule:
"""
Function which calculates PASS.
Returns an empty list of no PASS exists.
"""
matrix = calculate_topology_matrix(sdf_graph)
if not is_consistent(matrix):
logging.error('''
The SDF graph has inconsistent production and consumption rates!
''')
return []
repetitions: Dict[AgentLabel, int] = {
matrix[0][idx]: val
for idx, val in enumerate(repetition_vector(matrix))
}
agents, buffers = sdf_graph
modified_tokens = {
(buffer.src, buffer.dst): len(buffer.tokens)
for buffer in buffers
}
schedule = []
while True:
agent_scheduled = False
for agent_name in agents:
if (can_fire(agent_name, sdf_graph, modified_tokens)
and repetitions[agent_name] > 0):
schedule.append(agent_name)
agent_scheduled = True
repetitions[agent_name] -= 1
agent = agents[agent_name]
for buffer in get_src_buffers(agent_name, buffers):
consumption = agent.inputs[buffer.dst.port].consumption
modified_tokens[buffer.src, buffer.dst] -= consumption
for buffer in get_dst_buffers(agent_name, buffers):
production = agent.outputs[buffer.src.port].production
modified_tokens[buffer.src, buffer.dst] += production
break
if not agent_scheduled:
logging.error('''
The given graph is in a deadlock! Please, modify the tokens to
avoid the deadlock.
''')
return []
if all(repetitions[agent_name] == 0 for agent_name in agents):
return schedule
def sequential_run(
sdf_graph: Graph,
terminate: Termination
) -> Results:
"""Runs the SDF graph sequentially"""
agents, buffers = sdf_graph
schedule = calculate_schedule(sdf_graph)
results = Results(tokens={
buffer.dst: list(buffer.tokens)
for buffer in buffers
})
while not terminate(sdf_graph, results):
for agent_name in schedule:
agent = agents[agent_name]
input_tokens = {
buffer.dst.port: [
buffer.tokens.popleft() for _ in
range(agent.inputs[buffer.dst.port].consumption)
]
for buffer in get_src_buffers(agent_name, buffers)
}
output_tokens = agent.calculate(input_tokens)
for buffer in get_dst_buffers(agent_name, buffers):
tokens = output_tokens[buffer.src.port]
buffer.tokens.extend(tokens)
results.tokens[buffer.dst].extend(tokens)
return results
def iterations_expired(iterations: int) -> Termination:
"""The termination based on the number of iterations"""
# pylint: disable=unused-argument
def terminate(sdf_graph: Graph, results: Results) -> bool:
nonlocal iterations
iterations -= 1
return iterations < 0
return terminate
| [
"numpy.linalg.matrix_rank",
"functools.reduce",
"sympy.Matrix",
"logging.error",
"typing.NamedTuple"
] | [((416, 479), 'typing.NamedTuple', 'NamedTuple', (['"""InputPort"""', "[('type', type), ('consumption', int)]"], {}), "('InputPort', [('type', type), ('consumption', int)])\n", (426, 479), False, 'from typing import Tuple, Dict, List, Any, NamedTuple, Deque, Callable\n'), ((499, 562), 'typing.NamedTuple', 'NamedTuple', (['"""OutputPort"""', "[('type', type), ('production', int)]"], {}), "('OutputPort', [('type', type), ('production', int)])\n", (509, 562), False, 'from typing import Tuple, Dict, List, Any, NamedTuple, Deque, Callable\n'), ((1048, 1111), 'typing.NamedTuple', 'NamedTuple', (['"""Dst"""', "[('agent', AgentLabel), ('port', PortLabel)]"], {}), "('Dst', [('agent', AgentLabel), ('port', PortLabel)])\n", (1058, 1111), False, 'from typing import Tuple, Dict, List, Any, NamedTuple, Deque, Callable\n'), ((1124, 1187), 'typing.NamedTuple', 'NamedTuple', (['"""Src"""', "[('agent', AgentLabel), ('port', PortLabel)]"], {}), "('Src', [('agent', AgentLabel), ('port', PortLabel)])\n", (1134, 1187), False, 'from typing import Tuple, Dict, List, Any, NamedTuple, Deque, Callable\n'), ((1203, 1277), 'typing.NamedTuple', 'NamedTuple', (['"""Buffer"""', "[('src', Src), ('dst', Dst), ('tokens', Deque[Any])]"], {}), "('Buffer', [('src', Src), ('dst', Dst), ('tokens', Deque[Any])])\n", (1213, 1277), False, 'from typing import Tuple, Dict, List, Any, NamedTuple, Deque, Callable\n'), ((1294, 1351), 'typing.NamedTuple', 'NamedTuple', (['"""Results"""', "[('tokens', Dict[Dst, List[Any]])]"], {}), "('Results', [('tokens', Dict[Dst, List[Any]])])\n", (1304, 1351), False, 'from typing import Tuple, Dict, List, Any, NamedTuple, Deque, Callable\n'), ((4802, 4820), 'sympy.Matrix', 'Matrix', (['top_matrix'], {}), '(top_matrix)\n', (4808, 4820), False, 'from sympy import Matrix, lcm, gcd, fraction\n'), ((4942, 4967), 'functools.reduce', 'reduce', (['lcm', 'denominators'], {}), '(lcm, denominators)\n', (4948, 4967), False, 'from functools import reduce\n'), ((5046, 5066), 'functools.reduce', 'reduce', (['gcd', 'integer'], {}), '(gcd, integer)\n', (5052, 5066), False, 'from functools import reduce\n'), ((4570, 4599), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['matrix'], {}), '(matrix)\n', (4591, 4599), True, 'import numpy as np\n'), ((5358, 5471), 'logging.error', 'logging.error', (['"""\n The SDF graph has inconsistent production and consumption rates!\n """'], {}), '(\n """\n The SDF graph has inconsistent production and consumption rates!\n """\n )\n', (5371, 5471), False, 'import logging\n'), ((3015, 3126), 'logging.error', 'logging.error', (['f"""\n {src}-{dst} has no valid destination of a connection!\n """'], {}), '(\n f"""\n {src}-{dst} has no valid destination of a connection!\n """\n )\n', (3028, 3126), False, 'import logging\n'), ((3190, 3296), 'logging.error', 'logging.error', (['f"""\n {src}-{dst} has no valid source of a connection!\n """'], {}), '(\n f"""\n {src}-{dst} has no valid source of a connection!\n """\n )\n', (3203, 3296), False, 'import logging\n'), ((6657, 6808), 'logging.error', 'logging.error', (['"""\n The given graph is in a deadlock! Please, modify the tokens to\n avoid the deadlock.\n """'], {}), '(\n """\n The given graph is in a deadlock! Please, modify the tokens to\n avoid the deadlock.\n """\n )\n', (6670, 6808), False, 'import logging\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 8 14:10:28 2016
@author: se
"""
#import of modules
import csv
import pandas as pd
from pandas import DataFrame
import numpy as np
import matplotlib.pyplot as plt
from scipy import interpolate
from scipy.interpolate import interp1d
from scipy.interpolate import griddata
#import of Gamry .DTA file with different paragraphs of CV data and a long QCM data paragraph in the end
u = '/home/se/Dropbox/Masterarbeit/Projekt/data/raw/eQCM/C_beschichtet/#2/2016-05-19/QCM_RCVv8_10mV_all/QCM_RCVv8_10mV_all.DTA' #location of .DTA file
f = open(u, 'r') #open .DTA file
origin = os.path.dirname(u)+'/' #folder of .DTA file
data=[] #initiate the list/tuple
for line in f:
line = line.strip()
columns = line.split()
data.append(columns)
data = filter(None, data) #removes empty lines in .dta file
cvlist = [] #starting points of CV cycles
qcmlist = [] #starting point of QCM data
qcmstart = [] #time the QCM was measuring before the electrochemical measurements were started and postition
for i in data:
if i[0].startswith('CURVE'):
#print data.index(i)
cvlist.append(data.index(i))
if 'QCMOFFSETTIME'in i:
qcmstart.append(data[data.index(i)][2])
qcmstart.append(data.index(i)) #time the QCM was measuring before the electrochemical measurements were started in sec.
if 'QCMCurve' in i:
qcmlist = data.index(i)
#print 'qcm position found:'
#print qcmlist
#print data.index(i)
qcm_time_zero = float((qcmstart[0]).replace(',','.'))
#print 'finished'
print ('Es wurden '+repr(len(cvlist)) +' CV Zyklen gefunden.')
print ('Diese starten in den Zeilen: '+ repr(cvlist))
print ('Die QCM Daten beginnen ab Zeile: '+ repr(qcmlist))
header = data[0:(cvlist[0])]
#CV Zyklen splitten:
cvname = [] #initiate cv data names
a=0
for i in cvlist:
a = a+1
cvname.append('CV_{0:02d}'.format(a))
cvdata = [] #initiate echem data collection, enthält alle CV Zyklen(getrennt)
for i in cvlist:
if cvlist.index(i) < len(cvlist)-1:
cvdata.append(data[cvlist[cvlist.index(i)]+1:cvlist[cvlist.index(i)+1]])
cvdata[cvlist.index(i)][1][3] = 'V_vs._Ref.'
cvdata[cvlist.index(i)][1][4:6] = ''
else:
cvdata.append(data[cvlist[cvlist.index(i)]+1:(qcmstart[1]-2)])
cvdata[cvlist.index(i)][1][3] = 'V_vs._Ref.'
cvdata[cvlist.index(i)][1][4:6] = ''
for i in range(len(cvdata)):
for j in range(len(cvdata[i])):
for k in range(len(cvdata[i][j])-1):
cvdata[i][j][k] = cvdata[i][j][k].replace(',','.')
qcmdata = data[qcmlist+1:len(data)]
for i in range(len(qcmdata)):
for j in range(len(qcmdata[i])):
qcmdata[i][j] = qcmdata[i][j].replace(',','.')
#saving QCM and CV data to files
nr = 0
#save header
nameheader = origin + 'header.txt'
file = open(nameheader, 'w')
wr = csv.writer(file, quoting=csv.QUOTE_ALL)
wr.writerows((header))
file.close()
for i in range(len(cvname)):
namecv = origin + cvname[i] + '.txt'
cv_loop =np.asarray(cvdata[i][2:])
cv_loop = np.asarray(cv_loop[:][:,1:5], dtype = np.float32)
np.savetxt(namecv, cv_loop, delimiter=',', header='T/s, Q/C, U_ref/V, I/A')
#file = open(namecv, 'wb')
#wr = csv.writer(file, quoting=csv.QUOTE_ALL)
#wr.writerows((cvdata[nr]))
#file.close()
#nr = nr +1
cvtime = [] #strating time of each cv cycle
for i in range(len(cvname)):
cvtime.append(float(cvdata[i][2][1]))
#modify QCM data to start at 0 seconds
start = 0
while start < len(qcmdata):
if start <2:
start= start + 1
else:
qcmdata[start][1] = float(qcmdata[start][1]) - qcm_time_zero
start = start + 1
nameqcm = origin + 'qcm_data.txt'
file = open(nameqcm, 'w')
for i in range(len(qcmdata)):
file.write("%s \n" % qcmdata[i])
file.close()
# End of wrtining original data to seperate files
q = 0
qcmname = []
for i in cvname:
q = q+1
qcmname.append('QCM_{0:02d}'.format(q))
eqcmdata = np.asarray(qcmdata)[2:][:,1:4]
eqcmdata = np.asarray(eqcmdata, dtype=np.float32)
eqcmname = origin + 'eqcm_data.txt'
np.savetxt(eqcmname, eqcmdata, delimiter=',', header='T/s , fs/MHz , fp/MHz')
#split qcmdata to CV cycles
t = 0
qcmtime = []
for i in cvtime:
if eqcmdata[:][t][0] == 0.0:
qcmtime.append(t)
t = t+1
else:
while eqcmdata[:][t][0] < i:
t = t+1
qcmtime.append(t)
t = t+1
l = 0
for i in range(len(qcmtime)):
if qcmtime[i]<qcmtime[-1]:
np.savetxt(origin + qcmname[i] + '.txt', eqcmdata[l:qcmtime[i+1]], delimiter=',', header='T/s , fs/MHz , fp/MHz')
l = qcmtime[i+1]
else:
np.savetxt(origin +qcmname[i] + '.txt', eqcmdata[qcmtime[-1]:], delimiter=',', header='T/s , fs/MHz , fp/MHz')
for i in range(len(cvname)):
CV = np.loadtxt(origin + cvname[i] + '.txt', delimiter=',', skiprows=1)
QCM = np.loadtxt(origin + qcmname[i] + '.txt', delimiter=',', skiprows=2, usecols =(1,))
#QCMtime = QCMtime.flatten
time = CV[:,[0]]
time = time.flatten()
Q = CV[:,[1]]
Q = Q.flatten()
U = CV[:,[2]]
U = U.flatten()
I = CV[:,[3]]
I = I.flatten()
plt.plot(time, U)
plt.ylabel('U / V')
plt.xlabel('time / s')
savefig(origin + 'U_vs_t_{0:02d}.pdf'.format(i+1), bbox_inches='tight')
plt.clf()
plt.close()
fitQ = interp1d(time, Q)
fitU = interp1d(time, U)
fitI = interp1d(time, I)
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"csv.writer",
"numpy.asarray",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"scipy.interpolate.interp1d",
"matplotlib.pyplot.close",
"numpy.savetxt",
"numpy.loadtxt"
] | [((3003, 3042), 'csv.writer', 'csv.writer', (['file'], {'quoting': 'csv.QUOTE_ALL'}), '(file, quoting=csv.QUOTE_ALL)\n', (3013, 3042), False, 'import csv\n'), ((4185, 4223), 'numpy.asarray', 'np.asarray', (['eqcmdata'], {'dtype': 'np.float32'}), '(eqcmdata, dtype=np.float32)\n', (4195, 4223), True, 'import numpy as np\n'), ((4260, 4337), 'numpy.savetxt', 'np.savetxt', (['eqcmname', 'eqcmdata'], {'delimiter': '""","""', 'header': '"""T/s , fs/MHz , fp/MHz"""'}), "(eqcmname, eqcmdata, delimiter=',', header='T/s , fs/MHz , fp/MHz')\n", (4270, 4337), True, 'import numpy as np\n'), ((3164, 3189), 'numpy.asarray', 'np.asarray', (['cvdata[i][2:]'], {}), '(cvdata[i][2:])\n', (3174, 3189), True, 'import numpy as np\n'), ((3204, 3252), 'numpy.asarray', 'np.asarray', (['cv_loop[:][:, 1:5]'], {'dtype': 'np.float32'}), '(cv_loop[:][:, 1:5], dtype=np.float32)\n', (3214, 3252), True, 'import numpy as np\n'), ((3258, 3333), 'numpy.savetxt', 'np.savetxt', (['namecv', 'cv_loop'], {'delimiter': '""","""', 'header': '"""T/s, Q/C, U_ref/V, I/A"""'}), "(namecv, cv_loop, delimiter=',', header='T/s, Q/C, U_ref/V, I/A')\n", (3268, 3333), True, 'import numpy as np\n'), ((4959, 5025), 'numpy.loadtxt', 'np.loadtxt', (["(origin + cvname[i] + '.txt')"], {'delimiter': '""","""', 'skiprows': '(1)'}), "(origin + cvname[i] + '.txt', delimiter=',', skiprows=1)\n", (4969, 5025), True, 'import numpy as np\n'), ((5036, 5122), 'numpy.loadtxt', 'np.loadtxt', (["(origin + qcmname[i] + '.txt')"], {'delimiter': '""","""', 'skiprows': '(2)', 'usecols': '(1,)'}), "(origin + qcmname[i] + '.txt', delimiter=',', skiprows=2, usecols\n =(1,))\n", (5046, 5122), True, 'import numpy as np\n'), ((5315, 5332), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'U'], {}), '(time, U)\n', (5323, 5332), True, 'import matplotlib.pyplot as plt\n'), ((5337, 5356), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""U / V"""'], {}), "('U / V')\n", (5347, 5356), True, 'import matplotlib.pyplot as plt\n'), ((5361, 5383), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time / s"""'], {}), "('time / s')\n", (5371, 5383), True, 'import matplotlib.pyplot as plt\n'), ((5464, 5473), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5471, 5473), True, 'import matplotlib.pyplot as plt\n'), ((5478, 5489), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5487, 5489), True, 'import matplotlib.pyplot as plt\n'), ((5504, 5521), 'scipy.interpolate.interp1d', 'interp1d', (['time', 'Q'], {}), '(time, Q)\n', (5512, 5521), False, 'from scipy.interpolate import interp1d\n'), ((5533, 5550), 'scipy.interpolate.interp1d', 'interp1d', (['time', 'U'], {}), '(time, U)\n', (5541, 5550), False, 'from scipy.interpolate import interp1d\n'), ((5562, 5579), 'scipy.interpolate.interp1d', 'interp1d', (['time', 'I'], {}), '(time, I)\n', (5570, 5579), False, 'from scipy.interpolate import interp1d\n'), ((4143, 4162), 'numpy.asarray', 'np.asarray', (['qcmdata'], {}), '(qcmdata)\n', (4153, 4162), True, 'import numpy as np\n'), ((4641, 4760), 'numpy.savetxt', 'np.savetxt', (["(origin + qcmname[i] + '.txt')", 'eqcmdata[l:qcmtime[i + 1]]'], {'delimiter': '""","""', 'header': '"""T/s , fs/MHz , fp/MHz"""'}), "(origin + qcmname[i] + '.txt', eqcmdata[l:qcmtime[i + 1]],\n delimiter=',', header='T/s , fs/MHz , fp/MHz')\n", (4651, 4760), True, 'import numpy as np\n'), ((4798, 4914), 'numpy.savetxt', 'np.savetxt', (["(origin + qcmname[i] + '.txt')", 'eqcmdata[qcmtime[-1]:]'], {'delimiter': '""","""', 'header': '"""T/s , fs/MHz , fp/MHz"""'}), "(origin + qcmname[i] + '.txt', eqcmdata[qcmtime[-1]:], delimiter=\n ',', header='T/s , fs/MHz , fp/MHz')\n", (4808, 4914), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# @Time : 2019-11-05 14:04
# @Author : yingyuankai
# @Email : <EMAIL>
# @File : activations.py
import math
import numpy as np
import tensorflow as tf
__all__ = [
"gelu",
"gelu_new",
"swish",
"ACT2FN"
]
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
:param input:
:return:
"""
# cdf = 0.5 * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0)))
cdf = 0.5 * (1.0 + tf.tanh(
(math.sqrt(2 / math.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def gelu_new(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def swish(x):
return x * tf.sigmoid(x)
ACT2FN = {
"gelu": tf.keras.layers.Activation(gelu),
"swish": tf.keras.layers.Activation(swish),
"gelu_new": tf.keras.layers.Activation(gelu_new),
"elu": tf.keras.activations.elu,
"hard_sigmoid": tf.keras.activations.hard_sigmoid,
"linear": tf.keras.activations.linear,
"relu": tf.keras.activations.relu,
"selu": tf.keras.activations.selu,
"sigmoid": tf.keras.activations.sigmoid,
"softmax": tf.keras.activations.softmax,
"softplus": tf.keras.activations.softplus,
"softsign": tf.keras.activations.softsign,
"tanh": tf.keras.activations.tanh
} | [
"numpy.sqrt",
"tensorflow.pow",
"math.sqrt",
"tensorflow.sigmoid",
"tensorflow.keras.layers.Activation"
] | [((1079, 1111), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['gelu'], {}), '(gelu)\n', (1105, 1111), True, 'import tensorflow as tf\n'), ((1126, 1159), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['swish'], {}), '(swish)\n', (1152, 1159), True, 'import tensorflow as tf\n'), ((1177, 1213), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['gelu_new'], {}), '(gelu_new)\n', (1203, 1213), True, 'import tensorflow as tf\n'), ((1040, 1053), 'tensorflow.sigmoid', 'tf.sigmoid', (['x'], {}), '(x)\n', (1050, 1053), True, 'import tensorflow as tf\n'), ((543, 565), 'math.sqrt', 'math.sqrt', (['(2 / math.pi)'], {}), '(2 / math.pi)\n', (552, 565), False, 'import math\n'), ((936, 954), 'numpy.sqrt', 'np.sqrt', (['(2 / np.pi)'], {}), '(2 / np.pi)\n', (943, 954), True, 'import numpy as np\n'), ((584, 596), 'tensorflow.pow', 'tf.pow', (['x', '(3)'], {}), '(x, 3)\n', (590, 596), True, 'import tensorflow as tf\n'), ((973, 985), 'tensorflow.pow', 'tf.pow', (['x', '(3)'], {}), '(x, 3)\n', (979, 985), True, 'import tensorflow as tf\n')] |
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import cv2
import os
import random
import numpy as np
# CHAR_POSSIBILITIES = "0123456789abcdefghijklmnopqrstuvwxyz"
CHAR_POSSIBILITIES = "2345678abcdefgmnpwxy"
CHAR_COUNT = 5
ALL_LINES = [
[(30, 48), (34, 25), (76, 15), (259, 23)],
[(30, 1), (32, 9), (40, 15), (55, 23), (147, 35), (271, 33)],
[(39, 36), (42, 17), (95, 22), (271, 31)],
[(30, 16), (36, 26), (65, 25), (143, 16), (205, 15), (271, 18)],
[(29, 30), (37, 21), (51, 19), (87, 22), (142, 29), (271, 45)],
[(29, 7), (32, 30), (34, 27), (56, 33), (77, 35), (148, 29), (271, 9)],
[(30, 37), (36, 24), (80, 18), (271, 9)],
[(30, 15), (41, 20), (108, 30), (149, 33), (261, 37)],
[(30, 35), (37, 30), (63, 26), (87, 25), (152, 27), (202, 31), (271, 43)],
[(36, 35), (43, 19), (74, 20), (271, 39)],
]
def get_random_captcha_name(
length,
possibilities,
):
"""
:param length int: number of character in the captcha
:param possibilities string: all character possible in the captcha
:return string: random captcha name
"""
random_name = ''.join(
random.SystemRandom().choice(possibilities) for _ in range(length)
)
return random_name
def get_random_captcha_names(
how_many=1,
length=1,
possibilities="ab",
):
"""
:param how_many int: number of name+line generated
:param length int: number of character in the captcha
:param possibilities string: all character possible in the captcha
:return list string: list containing random captcha name
"""
for number in range(0, how_many):
yield get_random_captcha_name(length, possibilities)
def get_random_captcha_names_and_lines(
how_many=1,
length=CHAR_COUNT,
possibilities=CHAR_POSSIBILITIES,
):
"""
:param how_many int: number of name+line generated
:param length int: number of character in the captcha
:param possibilities string: all character possible in the captcha
:return string: string containing random_name-line_idx
"""
how_many_codes = int(how_many / len(ALL_LINES))
how_many_images = how_many_codes * len(ALL_LINES)
print("New number of codes: " + str(how_many_codes))
print("New number of images: " + str(how_many_images))
random_names = get_random_captcha_names(
how_many_codes, length, possibilities
)
for random_name in random_names:
for line_idx in range(len(ALL_LINES)):
yield random_name + "-" + str(line_idx)
def generate_captcha(
captcha_code,
line_idx,
base_image_path="./generate-captchas/base.jpg",
):
"""
:param name string: captcha code that will be generated
:param line_idx int: index of the line to draw on the captcha
:param base_image_path string: path of the background image for captcha
:return base numpy array2d: captcha image generated
"""
CHAR_WIDTH_DELTA = 3
START_Y = -3
font = ImageFont.truetype("./fonts/FrutigerBQ-Bold.otf", 42)
back_ground = Image.open(base_image_path)
color = (0, 0, 0)
start_x = 17
base = back_ground.copy()
draw = ImageDraw.Draw(base)
for letter in captcha_code:
if letter in ["n"]:
start_x -= 2
draw.text((start_x, START_Y), letter, color, font=font)
if letter in ["n", "m"]:
start_x -= 2
size = font.getsize(letter)[0]
start_x += size - CHAR_WIDTH_DELTA
# draw line for the code
draw.line(ALL_LINES[int(line_idx)], fill=color, width=4)
return np.array(base)
def compare_generate_with_real_captcha(real_captcha_path):
"""
Compare the images of a real captcha and a generated captcha
:param real_captcha_path string: folder containing real captcha
"""
real_captcha_files = os.listdir(real_captcha_path)
random.shuffle(real_captcha_files)
for real_captcha_file in real_captcha_files[:1]:
code = real_captcha_file.split("-")[0]
line_idx = 0
generated_image = generate_captcha(code, line_idx)
real_image = cv2.imread(real_captcha_path + real_captcha_file, 0)
cv2.imshow('generated', generated_image)
cv2.imshow('real', real_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
compare_generate_with_real_captcha("./real-captchas/")
# print(generate_captcha())
| [
"PIL.Image.open",
"os.listdir",
"random.shuffle",
"PIL.ImageFont.truetype",
"cv2.imshow",
"numpy.array",
"PIL.ImageDraw.Draw",
"cv2.waitKey",
"cv2.destroyAllWindows",
"random.SystemRandom",
"cv2.imread"
] | [((3018, 3071), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""./fonts/FrutigerBQ-Bold.otf"""', '(42)'], {}), "('./fonts/FrutigerBQ-Bold.otf', 42)\n", (3036, 3071), False, 'from PIL import ImageFont\n'), ((3091, 3118), 'PIL.Image.open', 'Image.open', (['base_image_path'], {}), '(base_image_path)\n', (3101, 3118), False, 'from PIL import Image\n'), ((3200, 3220), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['base'], {}), '(base)\n', (3214, 3220), False, 'from PIL import ImageDraw\n'), ((3612, 3626), 'numpy.array', 'np.array', (['base'], {}), '(base)\n', (3620, 3626), True, 'import numpy as np\n'), ((3862, 3891), 'os.listdir', 'os.listdir', (['real_captcha_path'], {}), '(real_captcha_path)\n', (3872, 3891), False, 'import os\n'), ((3896, 3930), 'random.shuffle', 'random.shuffle', (['real_captcha_files'], {}), '(real_captcha_files)\n', (3910, 3930), False, 'import random\n'), ((4132, 4184), 'cv2.imread', 'cv2.imread', (['(real_captcha_path + real_captcha_file)', '(0)'], {}), '(real_captcha_path + real_captcha_file, 0)\n', (4142, 4184), False, 'import cv2\n'), ((4194, 4234), 'cv2.imshow', 'cv2.imshow', (['"""generated"""', 'generated_image'], {}), "('generated', generated_image)\n", (4204, 4234), False, 'import cv2\n'), ((4243, 4273), 'cv2.imshow', 'cv2.imshow', (['"""real"""', 'real_image'], {}), "('real', real_image)\n", (4253, 4273), False, 'import cv2\n'), ((4282, 4296), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4293, 4296), False, 'import cv2\n'), ((4305, 4328), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4326, 4328), False, 'import cv2\n'), ((1163, 1184), 'random.SystemRandom', 'random.SystemRandom', ([], {}), '()\n', (1182, 1184), False, 'import random\n')] |
import random
import numpy as np
from numpy.core.fromnumeric import std
from numpy.lib.function_base import median
import scipy
from random import choice
from scipy.optimize import least_squares
import librosa
import os
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import (LinearRegression, TheilSenRegressor, RANSACRegressor, HuberRegressor)
import matplotlib.patches as mpatches
from constants_parser import Constants
import matplotlib.pyplot as plt
class Partial():
def __init__(self, frequency, order, peak_idx):
self.frequency = frequency
self.order = order
self.peak_idx = peak_idx
class ToolBox():
"""here all tools developed are stored. designed this way so
it can be expanded and incorporate other methods for partial detection
or computing beta coefficient etc. For example usage/alterations see bellow"""
def __init__(self, partial_tracking_func, inharmonicity_compute_func, partial_func_args, inharmonic_func_args):
self.partial_func = partial_tracking_func
self.inharmonic_func = inharmonicity_compute_func
self.partial_func_args = partial_func_args
self.inharmonic_func_args = inharmonic_func_args
class NoteInstance():
"""move to other level of package"""
def __init__(self, fundamental, onset, audio, ToolBoxObj:ToolBox ,sampling_rate, constants : Constants, longaudio=np.array([]), midi_flag = False):
self.fundamental = fundamental
self.onset = onset
self.audio = audio
self.longaudio = longaudio # may be used for ("internal") f0 computation
self.sampling_rate = constants.sampling_rate
self.polyfit = constants.polyfit
self.fft=np.fft.fft(self.audio,n = constants.size_of_fft)
self.frequencies=np.fft.fftfreq(constants.size_of_fft,1/self.sampling_rate)
self.partials = []
self.differences = []
self.abc = []
self.large_window = None
# self.train = False
self.string = None
if midi_flag:
self.recompute_fundamental(constants, fundamental/2)
ToolBoxObj.partial_func(self, ToolBoxObj.partial_func_args) # if a different partial tracking is incorporated keep second function arguement, else return beta from second function and change entirely
def get_string(string):
self.string = string
def plot_DFT(self, peaks=None, peaks_idx=None, lim=None, ax=None, save_path=None):
[a,b,c] = self.abc
w = self.large_window
# main function
ax.plot(self.frequencies, self.fft.real)
for k in range(50): # draw vertical red dotted lines indicating harmonics
ax.axvline(x=self.fundamental*k, color='r', ls='--', alpha=0.85, lw=0.5)
if w: # draw windows as little boxes
f0 = self.fundamental
for k in range(1,lim+1):
# f = k*f0 * np.sqrt(1+b_est*k**2)
f = window_centering_func(k,f0, a=a,b=b,c=c)
rect=mpatches.Rectangle((f-w//2,-80),w,160, fill=False, color="purple", linewidth=2)
# plt.gca().add_patch(rect)
ax.add_patch(rect)
if peaks and peaks_idx: # draw peaks
ax.plot(peaks, self.fft.real[peaks_idx], "x", alpha=0.7)
ax.set_xlim(0, window_centering_func(lim+1,f0, a=a,b=b,c=c))
ax.set_ylim(-100, 100)
return ax
def plot_partial_deviations(self, lim=None, res=None, peaks_idx=None, ax=None, note_instance=None, annos_string=None, tab_instance=None):
differences = self.differences
w = self.large_window
[a,b,c] = res
kapa = np.linspace(0, lim, num=lim*10)
y = a*kapa**3 + b*kapa + c
if peaks_idx:
PeakAmps = [ self.frequencies[peak_idx] for peak_idx in peaks_idx ]
PeakAmps = PeakAmps / max(PeakAmps) # Normalize
else:
PeakAmps = 1
ax.scatter(np.arange(2,len(differences)+2), differences, alpha=PeakAmps)
f0 = self.fundamental
# plot litte boxes
for k in range(2, len(differences)+2):
pos = window_centering_func(k, f0, a, b, c) - k*f0
rect=mpatches.Rectangle((k-0.25, pos-w//2), 0.5, w, fill=False, color="purple", linewidth=2)
# plt.gca().add_patch(rect)
ax.add_patch(rect)
ax.plot(kapa,y, label = 'new_estimate')
ax.grid()
ax.legend()
if annos_string:
if note_instance.string == annos_string:
c = 'green'
else:
c = 'red'
else:
c = 'black'
if tab_instance:
plt.title("pred: "+ str(note_instance.string) + ", annotation: " + str(annos_string) + ', fret: ' + str(tab_instance.fret) + ' || f0: ' + str(round(self.fundamental,2)) + ', beta_estimate: '+ str(round(self.beta,6)), color=c) # + '\n a = ' + str(round(a,5)), color=c)
else:
plt.title("pred: "+ str(note_instance.string) + ", annotation: " + str(annos_string) + ' || f0: ' + str(round(self.fundamental,2)) + ', beta_estimate: '+ str(round(self.beta,6)), color=c)# + '\n a = ' + str(round(a,5)), color=c)
return ax
def weighted_argmean(self, peak_idx, w=6):
min_idx = max(peak_idx - w//2, 0)
max_idx = min(peak_idx + w//2, len(self.frequencies)-1)
window = range(min_idx, max_idx+1)
amp_sum = sum([self.fft.real[i] for i in window])
res = sum( [self.fft.real[i]/amp_sum * self.frequencies[i] for i in window] )
return res
def recompute_fundamental(self, constants : Constants, window = 10):
if not self.longaudio.any(): # if we want to work on given self.audio (and not on self.longaudio)
filtered = zero_out(self.fft, self.fundamental, window, constants)
else:
longaudio_fft = np.fft.fft(self.longaudio,n = constants.size_of_fft)
filtered = zero_out(longaudio_fft, self.fundamental, window, constants)
peaks, _ =scipy.signal.find_peaks(np.abs(filtered),distance=100000) # better way to write this?
max_peak_freq = self.weighted_argmean(peak_idx=peaks[0], w=0) # w=0 means that no weighted argmean is employed, Note: equivalent to # max_peak_freq = self.frequencies[peaks[0]]
self.fundamental = max_peak_freq
return max_peak_freq
def window_centering_func(k,f0=None,a=None,b=None,c=None, b_est=None):
if b_est: # standard inharmonicity equation indicating partials position
center_freq = k*f0 * np.sqrt(1+b_est*k**2)
else: # polynomial approximation of partials
center_freq = a*k**3+b*k+c + (k*f0)
return center_freq
def compute_partials(note_instance, partial_func_args):
"""compute up to no_of_partials partials for note instance.
large_window is the length of window arround k*f0 that the partials are tracked with highest peak."""
# no_of_partials = partial_func_args[0] NOTE: deal with it somehow
note_instance.large_window = partial_func_args[1]
constants = partial_func_args[2]
diviate = round(note_instance.large_window*note_instance.fft.size/note_instance.sampling_rate)
f0 = note_instance.fundamental
a, b, c = 0, 0, 0
step=2
bound = constants.no_of_partials + constants.no_of_partials % step
for lim in range(6,31,step):
for k in range(2,lim): # 2 stands for the 2nd partial!
# center_freq = k*f0 * np.sqrt(1+b_est*k**2)
center_freq = window_centering_func(k,f0, a=a,b=b,c=c) # centering window in which to look for peak/partial
try:
filtered = zero_out(note_instance.fft, center_freq=center_freq , window_length=diviate, constants=constants)
peaks, _ = scipy.signal.find_peaks(np.abs(filtered),distance=100000) # better way to write this?
max_peak = note_instance.frequencies[peaks[0]]
# max_peak = note_instance.weighted_argmean(peak_idx=peaks[0], w=6)
note_instance.partials.append(Partial(frequency=max_peak, order=k, peak_idx=peaks[0]))
except Exception as e:
print(e)
print('MyExplanation: Certain windows where peaks are to be located surpassed the length of the DFT.')
break
# iterative beta estimates
_, [a,b,c] = compute_inharmonicity(note_instance, [])
note_instance.abc = [a,b,c]
# compute differences/deviations
note_instance.differences, orders = zip(*compute_differences(note_instance))
# if i != N-1: # i.e. if not the final iteration
if lim<30:
note_instance.partials=[]
# if constants.plot:
# peak_freqs = [partial.frequency for partial in note_instance.partials]
# peaks_idx = [partial.peak_idx for partial in note_instance.partials]
# fig = plt.figure(figsize=(15, 10))
# ax1 = fig.add_subplot(2, 1, 1)
# ax2 = fig.add_subplot(2, 1, 2)
# note_instance.plot_partial_deviations(lim=30, res=note_instance.abc, ax=ax1, note_instance=note_instance) #, peaks_idx=Peaks_Idx)
# note_instance.plot_DFT(peak_freqs, peaks_idx, lim=30, ax=ax2)
# plt.show()
def compute_differences(note_instance):
differences = []
for i, partial in enumerate(note_instance.partials):
differences.append((abs(partial.frequency-(i+2)*note_instance.fundamental), i)) # i+2 since we start at first partial of order k=2
return differences
def compute_inharmonicity(note_instance, inharmonic_func_args):
differences, orders = zip(*compute_differences(note_instance))
u=np.array(orders)+2
if note_instance.polyfit == 'lsq':
res=compute_least(u,differences) # least_squares
if note_instance.polyfit == 'Thei':
res=compute_least_TheilSen(u,differences) # least_squares
[a,b,c]=res
beta=2*a/(note_instance.fundamental+b) # Barbancho et al. (17)
note_instance.beta = beta
return beta, res
def compute_least(u,y):
def model(x, u):
return x[0] * u**3 + x[1]*u + x[2]
def fun(x, u, y):
return model(x, u)-y
def jac(x, u, y):
J = np.empty((u.size, x.size))
J[:, 0] = u**3
J[:, 1] = u
J[:, 2] = 1
return J
x0=[0.00001,0.00001,0.000001]
res = least_squares(fun, x0, jac=jac,bounds=(0,np.inf), args=(u, y),loss = 'soft_l1', verbose=0)
return res.x
# https://scikit-learn.org/stable/auto_examples/linear_model/plot_robust_fit.html#sphx-glr-auto-examples-linear-model-plot-robust-fit-py
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.TheilSenRegressor.html#sklearn.linear_model.TheilSenRegressor
def compute_least_TheilSen(u,y):
u = u[:, np.newaxis]
poly = PolynomialFeatures(3)
# print
u_poly = poly.fit_transform(u)
u_poly = np.delete(u_poly, 2, axis=1) # delete second coefficient (i.e. b=0, for b * x**2)
# estimator = LinearRegression(fit_intercept=False)
# estimator.fit(u_poly, y)
estimator = TheilSenRegressor(random_state=42)
# estimator = HuberRegressor()
estimator.fit(u_poly, y)
# print("coefficients:", estimator.coef_)
return estimator.coef_[::-1]
def zero_out(fft, center_freq, window_length, constants : Constants):
"""return amplitude values of fft arround a given frequency; when outside window amplitude is zeroed out"""
sz = fft.size
x = np.zeros(sz,dtype=np.complex64)
temp = fft
dom_freq_bin = int(round(center_freq*sz/constants.sampling_rate))
window_length = int(window_length)
# for i in range(dom_freq_bin-window_length,dom_freq_bin+window_length): #NOTE: possible error
for i in range(dom_freq_bin-window_length//2, dom_freq_bin+window_length//2): # __gb_
x[i] = temp[i]**2
return x
"""here on tests"""
#-----------ToolBox Example--------------------
#when changing only partial computing function
def example_compute_partial(note_instance, partial_func_args):
arg0 = partial_func_args[0]
arg1 = partial_func_args[1]
arg2 = partial_func_args[2]
for i in range(0, arg0):
x = random.choice([arg1, arg2])
note_instance.partials.append(Partial(x, i))
# example changing beta computation and probably bypassing previous partial computation
def example_inharmonicity_computation(note_instance, inharmonic_func_args):
arg0 = inharmonic_func_args[0]
arg1 = inharmonic_func_args[1]
# do more stuff...
note_instance.beta = arg0
#-----------end of ToolBox Example---------------
def wrapper_func(fundamental, audio, sampling_rate, constants : Constants):
ToolBoxObj = ToolBox(compute_partials, compute_inharmonicity, [14, fundamental/2, constants], [])
note_instance = NoteInstance(fundamental, 0, audio, ToolBoxObj, sampling_rate, constants)
print(note_instance.beta, [x.frequency for x in note_instance.partials])
| [
"numpy.abs",
"scipy.optimize.least_squares",
"sklearn.preprocessing.PolynomialFeatures",
"random.choice",
"matplotlib.patches.Rectangle",
"numpy.sqrt",
"numpy.delete",
"numpy.fft.fftfreq",
"numpy.fft.fft",
"sklearn.linear_model.TheilSenRegressor",
"numpy.array",
"numpy.zeros",
"numpy.linspac... | [((10417, 10513), 'scipy.optimize.least_squares', 'least_squares', (['fun', 'x0'], {'jac': 'jac', 'bounds': '(0, np.inf)', 'args': '(u, y)', 'loss': '"""soft_l1"""', 'verbose': '(0)'}), "(fun, x0, jac=jac, bounds=(0, np.inf), args=(u, y), loss=\n 'soft_l1', verbose=0)\n", (10430, 10513), False, 'from scipy.optimize import least_squares\n'), ((10874, 10895), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', (['(3)'], {}), '(3)\n', (10892, 10895), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((10956, 10984), 'numpy.delete', 'np.delete', (['u_poly', '(2)'], {'axis': '(1)'}), '(u_poly, 2, axis=1)\n', (10965, 10984), True, 'import numpy as np\n'), ((11145, 11179), 'sklearn.linear_model.TheilSenRegressor', 'TheilSenRegressor', ([], {'random_state': '(42)'}), '(random_state=42)\n', (11162, 11179), False, 'from sklearn.linear_model import LinearRegression, TheilSenRegressor, RANSACRegressor, HuberRegressor\n'), ((11536, 11568), 'numpy.zeros', 'np.zeros', (['sz'], {'dtype': 'np.complex64'}), '(sz, dtype=np.complex64)\n', (11544, 11568), True, 'import numpy as np\n'), ((1413, 1425), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1421, 1425), True, 'import numpy as np\n'), ((1732, 1779), 'numpy.fft.fft', 'np.fft.fft', (['self.audio'], {'n': 'constants.size_of_fft'}), '(self.audio, n=constants.size_of_fft)\n', (1742, 1779), True, 'import numpy as np\n'), ((1806, 1867), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['constants.size_of_fft', '(1 / self.sampling_rate)'], {}), '(constants.size_of_fft, 1 / self.sampling_rate)\n', (1820, 1867), True, 'import numpy as np\n'), ((3679, 3712), 'numpy.linspace', 'np.linspace', (['(0)', 'lim'], {'num': '(lim * 10)'}), '(0, lim, num=lim * 10)\n', (3690, 3712), True, 'import numpy as np\n'), ((9728, 9744), 'numpy.array', 'np.array', (['orders'], {}), '(orders)\n', (9736, 9744), True, 'import numpy as np\n'), ((10266, 10292), 'numpy.empty', 'np.empty', (['(u.size, x.size)'], {}), '((u.size, x.size))\n', (10274, 10292), True, 'import numpy as np\n'), ((12241, 12268), 'random.choice', 'random.choice', (['[arg1, arg2]'], {}), '([arg1, arg2])\n', (12254, 12268), False, 'import random\n'), ((4214, 4312), 'matplotlib.patches.Rectangle', 'mpatches.Rectangle', (['(k - 0.25, pos - w // 2)', '(0.5)', 'w'], {'fill': '(False)', 'color': '"""purple"""', 'linewidth': '(2)'}), "((k - 0.25, pos - w // 2), 0.5, w, fill=False, color=\n 'purple', linewidth=2)\n", (4232, 4312), True, 'import matplotlib.patches as mpatches\n'), ((5908, 5959), 'numpy.fft.fft', 'np.fft.fft', (['self.longaudio'], {'n': 'constants.size_of_fft'}), '(self.longaudio, n=constants.size_of_fft)\n', (5918, 5959), True, 'import numpy as np\n'), ((6088, 6104), 'numpy.abs', 'np.abs', (['filtered'], {}), '(filtered)\n', (6094, 6104), True, 'import numpy as np\n'), ((6594, 6621), 'numpy.sqrt', 'np.sqrt', (['(1 + b_est * k ** 2)'], {}), '(1 + b_est * k ** 2)\n', (6601, 6621), True, 'import numpy as np\n'), ((3034, 3124), 'matplotlib.patches.Rectangle', 'mpatches.Rectangle', (['(f - w // 2, -80)', 'w', '(160)'], {'fill': '(False)', 'color': '"""purple"""', 'linewidth': '(2)'}), "((f - w // 2, -80), w, 160, fill=False, color='purple',\n linewidth=2)\n", (3052, 3124), True, 'import matplotlib.patches as mpatches\n'), ((7853, 7869), 'numpy.abs', 'np.abs', (['filtered'], {}), '(filtered)\n', (7859, 7869), True, 'import numpy as np\n')] |
import cv2
import threading
import numpy as np
class VideoLoader(object):
""" Load the video frame and return the paired consecutive frames for Face Detection DSFD Model"""
def __init__(self, path, batch_size, frame_cache_size):
self.video = cv2.VideoCapture(str(path))
self.cache_size = frame_cache_size
self.batch_size = batch_size
#self.transform = TestBaseTransform((103, 117, 123))
self.factor = 2
self.cache = None
self.num_frame_loaded = None
self.num_frame_processed = None
self.loaded = True
self.process = threading.Thread(target=self._preload_frames)
self.cache_write_lock = threading.Lock()
def _preload_frames(self):
while not self.loaded:
if self.num_frame_loaded - self.num_frame_processed < self.cache_size:
self._load_next_frame()
def _load_next_frame(self):
with self.cache_write_lock:
self.video.set(cv2.CAP_PROP_POS_FRAMES, self.num_frame_loaded)
ret, frame = self.video.read()
if ret:
self.cache[self.num_frame_loaded+1] =frame
self.num_frame_loaded += 1
else:
self.loaded = True
def reset(self):
with self.cache_write_lock:
self.loaded = True # Attempt to complete the preloading
if self.process.is_alive():
del self.process # Force to finish the thread
self.cache = dict()
self.num_frame_loaded = 0
self.num_frame_processed = 0
self.loaded = False
self.max_im_shrink = None
self.shrink1 = None
self.shrink2 = None
self.shrink3 = None
self.process = threading.Thread(target=self._preload_frames)
self.process.start()
def __iter__(self):
self.reset()
return self
def __next__(self):
if self.loaded and self.num_frame_processed == self.num_frame_loaded:
raise StopIteration
else:
while self.num_frame_processed >= self.num_frame_loaded:
pass # wait for new frame to be loaded
rst = self.cache[self.num_frame_processed+1]
self.cache.pop(self.num_frame_processed+1)
self.num_frame_processed += 1
return np.array(rst)
if __name__ == '__main__':
import time
loader = VideoLoader('/home/charley/Research/Yudong/Videos/ADOS1047_E2_ADOS_091319.mp4', 16, 1000)
st = time.time()
for all_frames in loader:
end = time.time()
print([frames.shape for frames in all_frames], len(loader.cache), end-st)
st = time.time()
| [
"threading.Thread",
"numpy.array",
"threading.Lock",
"time.time"
] | [((2570, 2581), 'time.time', 'time.time', ([], {}), '()\n', (2579, 2581), False, 'import time\n'), ((609, 654), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._preload_frames'}), '(target=self._preload_frames)\n', (625, 654), False, 'import threading\n'), ((687, 703), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (701, 703), False, 'import threading\n'), ((2627, 2638), 'time.time', 'time.time', ([], {}), '()\n', (2636, 2638), False, 'import time\n'), ((2734, 2745), 'time.time', 'time.time', ([], {}), '()\n', (2743, 2745), False, 'import time\n'), ((1803, 1848), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._preload_frames'}), '(target=self._preload_frames)\n', (1819, 1848), False, 'import threading\n'), ((2399, 2412), 'numpy.array', 'np.array', (['rst'], {}), '(rst)\n', (2407, 2412), True, 'import numpy as np\n')] |
import numpy as np
import torch
from torch.optim import Adam
import gym
import time
import yaml
import safe_rl.algos.cppo.core as core
from safe_rl.utils.logx import EpochLogger
from safe_rl.utils.mpi_pytorch import setup_pytorch_for_mpi, sync_params, mpi_avg_grads
from safe_rl.utils.mpi_tools import (mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar,
num_procs, mpi_sum)
from extra_envs.wrappers import Intervention
from extra_envs.intervener.base import Intervener
class CPPOBuffer:
"""
A buffer for storing trajectories experienced by a PPO agent interacting
with the environment, and using Generalized Advantage Estimation (GAE-Lambda)
for calculating the advantages of state-action pairs.
"""
def __init__(self, obs_dim, act_dim, size, gamma=0.99, lam=0.95, scaling=1.,
normalize_adv=False):
self.obs_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(core.combined_shape(size, act_dim), dtype=np.float32)
# Associated with task reward
self.adv_buf = np.zeros(size, dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.ret_buf = np.zeros(size, dtype=np.float32)
self.val_buf = np.zeros(size, dtype=np.float32)
# Associated with task cost
self.cadv_buf = np.zeros(size, dtype=np.float32)
self.cost_buf = np.zeros(size, dtype=np.float32)
self.cret_buf = np.zeros(size, dtype=np.float32)
self.cval_buf = np.zeros(size, dtype=np.float32)
self.intv_buf = np.zeros(size, dtype=np.bool)
self.logp_buf = np.zeros(size, dtype=np.float32)
self.normalize_adv = normalize_adv
self.gamma, self.lam, self.scaling = gamma, lam, scaling
self.ptr, self.path_start_idx, self.max_size = 0, 0, size
def store(self, obs, act, rew, val, cost, cval, intv, logp):
"""
Append one timestep of agent-environment interaction to the buffer.
"""
assert self.ptr < self.max_size # buffer has to have room so you can store
self.obs_buf[self.ptr] = obs
self.act_buf[self.ptr] = act
# Reward
self.rew_buf[self.ptr] = self.scaling*rew
self.val_buf[self.ptr] = val
# Cost
self.cost_buf[self.ptr] = self.scaling*cost
self.cval_buf[self.ptr] = cval
self.intv_buf[self.ptr] = intv
self.logp_buf[self.ptr] = logp
self.ptr += 1
def finish_path(self, last_val=0, last_cval=0):
"""
Call this at the end of a trajectory, or when one gets cut off
by an epoch ending. This looks back in the buffer to where the
trajectory started, and uses rewards and value estimates from
the whole trajectory to compute advantage estimates with GAE-Lambda,
as well as compute the rewards-to-go for each state, to use as
the targets for the value function.
The "last_val" argument should be 0 if the trajectory ended
because the agent reached a terminal state (died), and otherwise
should be V(s_T), the value function estimated for the last state.
This allows us to bootstrap the reward-to-go calculation to account
for timesteps beyond the arbitrary episode horizon (or epoch cutoff).
"""
path_slice = slice(self.path_start_idx, self.ptr)
###########
# Rewards #
###########
rews = np.append((1-self.gamma)*self.rew_buf[path_slice], last_val)
vals = np.append(self.val_buf[path_slice], last_val)
# the next two lines implement GAE-Lambda advantage calculation
deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]
self.adv_buf[path_slice] = core.discount_cumsum(deltas, self.gamma * self.lam)
# the next line computes rewards-to-go, to be targets for the value function
self.ret_buf[path_slice] = core.discount_cumsum(rews, self.gamma)[:-1]
#########
# Costs #
#########
costs = np.append((1-self.gamma)*self.cost_buf[path_slice], last_cval)
cvals = np.append(self.cval_buf[path_slice], last_cval)
cdeltas = costs[:-1] + self.gamma*cvals[1:] - cvals[:-1]
self.cadv_buf[path_slice] = core.discount_cumsum(cdeltas, self.gamma*self.lam)
self.cret_buf[path_slice] = core.discount_cumsum(costs, self.gamma)[:-1]
self.path_start_idx = self.ptr
def get(self, log_penalty=-np.infty):
"""
Call this at the end of an epoch to get all of the data from
the buffer, with advantages appropriately normalized (shifted to have
mean zero and std one). Also, resets some pointers in the buffer.
"""
assert self.ptr == self.max_size # buffer has to be full before you can get
self.ptr, self.path_start_idx = 0, 0
# the next two lines implement the advantage normalization trick
weight = 1/(1 + np.exp(-log_penalty))
lagrange_adv_buf = (1-weight)*self.adv_buf - weight*self.cadv_buf
adv_mean, adv_std = mpi_statistics_scalar(lagrange_adv_buf)
lagrange_adv_buf = lagrange_adv_buf - adv_mean
lagrange_adv_buf /= (adv_std if self.normalize_adv else self.scaling)
data = dict(obs=self.obs_buf, act=self.act_buf, ret=self.ret_buf,
cret=self.cret_buf, adv=lagrange_adv_buf, logp=self.logp_buf)
out = {k: torch.as_tensor(v, dtype=torch.float32) for k,v in data.items()}
out.update(intv=self.intv_buf)
return out
def cppo(env_fn, actor_critic=core.MLPActorCritic, ac_kwargs=dict(), seed=0,
steps_per_epoch=4000, epochs=50, gamma=0.99, clip_ratio=0.2, pi_lr=3e-4,
vf_lr=1e-3, train_pi_iters=80, train_v_iters=80, lam=0.97, max_ep_len=1000,
target_kl=0.01, logger_kwargs=dict(), save_freq=10,
num_test_episodes=10, ent_bonus=0.001, scaling=100., dont_normalize_adv=False,
# Cost constraint/penalties
cost_lim=0.01, penalty=1., penalty_lr=5e-2, update_penalty_every=1,
optimize_penalty=False, ignore_unsafe_cost=False
):
"""
Proximal Policy Optimization (by clipping),
with early stopping based on approximate KL
Args:
env_fn : A function which creates a copy of the environment.
The environment must satisfy the OpenAI Gym API.
actor_critic: The constructor method for a PyTorch Module with a
``step`` method, an ``act`` method, a ``pi`` module, and a ``v``
module. The ``step`` method should accept a batch of observations
and return:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``a`` (batch, act_dim) | Numpy array of actions for each
| observation.
``v`` (batch,) | Numpy array of value estimates
| for the provided observations.
``logp_a`` (batch,) | Numpy array of log probs for the
| actions in ``a``.
=========== ================ ======================================
The ``act`` method behaves the same as ``step`` but only returns ``a``.
The ``pi`` module's forward call should accept a batch of
observations and optionally a batch of actions, and return:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``pi`` N/A | Torch Distribution object, containing
| a batch of distributions describing
| the policy for the provided observations.
``logp_a`` (batch,) | Optional (only returned if batch of
| actions is given). Tensor containing
| the log probability, according to
| the policy, of the provided actions.
| If actions not given, will contain
| ``None``.
=========== ================ ======================================
The ``v`` module's forward call should accept a batch of observations
and return:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``v`` (batch,) | Tensor containing the value estimates
| for the provided observations. (Critical:
| make sure to flatten this!)
=========== ================ ======================================
ac_kwargs (dict): Any kwargs appropriate for the ActorCritic object
you provided to PPO.
seed (int): Seed for random number generators.
steps_per_epoch (int): Number of steps of interaction (state-action pairs)
for the agent and the environment in each epoch.
epochs (int): Number of epochs of interaction (equivalent to
number of policy updates) to perform.
gamma (float): Discount factor. (Always between 0 and 1.)
clip_ratio (float): Hyperparameter for clipping in the policy objective.
Roughly: how far can the new policy go from the old policy while
still profiting (improving the objective function)? The new policy
can still go farther than the clip_ratio says, but it doesn't help
on the objective anymore. (Usually small, 0.1 to 0.3.) Typically
denoted by :math:`\epsilon`.
pi_lr (float): Learning rate for policy optimizer.
vf_lr (float): Learning rate for value function optimizer.
train_pi_iters (int): Maximum number of gradient descent steps to take
on policy loss per epoch. (Early stopping may cause optimizer
to take fewer than this.)
train_v_iters (int): Number of gradient descent steps to take on
value function per epoch.
lam (float): Lambda for GAE-Lambda. (Always between 0 and 1,
close to 1.)
max_ep_len (int): Maximum length of trajectory / episode / rollout.
target_kl (float): Roughly what KL divergence we think is appropriate
between new and old policies after an update. This will get used
for early stopping. (Usually small, 0.01 or 0.05.)
logger_kwargs (dict): Keyword args for EpochLogger.
save_freq (int): How often (in terms of gap between epochs) to save
the current policy and value function.
scaling (float): How much to scale the empirical returns to aid in learning the
value function
cost_lim (float): The tolerated cost limit
penalty (float): The initial penalty value
penalty_lr (float): The update size for the penalty
update_penalty_every (int): After how many policy updates we update the penalty
optimize_penalty (bool): Whether to optimize the penalty or keep it fixed
ignore_unsafe_cost (bool): Whether to consider the unsafe cost when computing the
cost.
"""
# Special function to avoid certain slowdowns from PyTorch + MPI combo.
setup_pytorch_for_mpi()
# Set up logger and save configuration
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
# Random seed
seed += 10000 * proc_id()
torch.manual_seed(seed)
np.random.seed(seed)
# Instantiate environment
env = env_fn()
test_env = env.env
#test_env = gym.make('extra_envs:HalfCheetah-v0')
obs_dim = env.observation_space.shape
act_dim = env.action_space.shape
rew_range = env.reward_range
v_range = (scaling*rew_range[0], scaling*rew_range[1])
vc_range = (0, scaling*1)
max_ep_len = min(max_ep_len, env.env._max_episode_steps)
# Create actor-critic module
ac = actor_critic(env.observation_space, env.action_space, v_range=v_range,
vc_range=vc_range, pred_std=True, **ac_kwargs)
# Sync params across processes
sync_params(ac)
# Count variables
var_counts = tuple(core.count_vars(module) for module in [ac.pi, ac.v])
logger.log('\nNumber of parameters: \t pi: %d, \t v: %d\n' % var_counts)
# Set up experience buffer
local_steps_per_epoch = int(steps_per_epoch / num_procs())
buf = CPPOBuffer(obs_dim, act_dim, local_steps_per_epoch, gamma, lam, scaling,
normalize_adv=not dont_normalize_adv)
# Penalty learning
if optimize_penalty:
penalty_param_init = max(np.log(penalty), -100.)
penalty_param = torch.tensor([penalty_param_init], requires_grad=True,
dtype=torch.float32)
penalty_torch = torch.exp(penalty_param)
else:
penalty_torch = torch.tensor([penalty], dtype=torch.float32)
# Set up function for computing PPO policy loss
def compute_loss_pi(data):
obs, act, adv, logp_old = data['obs'], data['act'], data['adv'], data['logp']
intv = data['intv']
obs, act, adv, logp_old = [x[~intv] for x in [obs, act, adv, logp_old]]
# Policy loss
pi, logp = ac.pi(obs, act)
ratio = torch.exp(logp - logp_old)
clip_adv = torch.clamp(ratio, 1-clip_ratio, 1+clip_ratio) * adv
ent = pi.entropy().mean().item()
loss_pi = -(torch.min(ratio * adv, clip_adv) + ent_bonus*ent).mean()
# Useful extra info
approx_kl = (logp_old - logp).mean().item()
ent = pi.entropy().mean().item()
clipped = ratio.gt(1+clip_ratio) | ratio.lt(1-clip_ratio)
clipfrac = torch.as_tensor(clipped, dtype=torch.float32).mean().item()
pi_info = dict(kl=approx_kl, ent=ent, cf=clipfrac)
return loss_pi, pi_info
# Set up function for computing value loss
loss_fn = torch.nn.SmoothL1Loss()
def compute_loss_v(data):
obs, ret = data['obs'], data['ret']
return loss_fn(ac.v(obs), ret)
def compute_loss_vc(data):
obs, cret = data['obs'], data['cret']
return loss_fn(ac.vc(obs), cret)
def compute_loss_penalty(cost):
return -penalty_torch.squeeze()*(cost - cost_lim)
# Set up optimizers for policy and value function
pi_optimizer = Adam(ac.pi.parameters(), lr=pi_lr)
vf_optimizer = Adam(ac.v.parameters(), lr=vf_lr)
vcf_optimizer = Adam(ac.vc.parameters(), lr=vf_lr)
if optimize_penalty:
penalty_optimizer = Adam([penalty_param], lr=penalty_lr)
# Set up model saving
logger.setup_pytorch_saver(ac)
def update(update_penalty):
curr_cost = logger.get_stats('EpCost')[0]
if curr_cost > cost_lim:
logger.log("Warning! Safety constraint violated.", 'red')
data = buf.get(np.log(penalty_torch.item()))
pi_l_old, pi_info_old = compute_loss_pi(data)
pi_l_old = pi_l_old.item()
v_l_old = compute_loss_v(data).item()
vc_l_old = compute_loss_vc(data).item()
# Train policy with multiple steps of gradient descent
for i in range(train_pi_iters):
pi_optimizer.zero_grad()
loss_pi, pi_info = compute_loss_pi(data)
kl = mpi_avg(pi_info['kl'])
if kl > 1.5 * target_kl:
logger.log('Early stopping at step %d due to reaching max kl.' % i)
break
loss_pi.backward()
mpi_avg_grads(ac.pi) # average grads across MPI processes
pi_optimizer.step()
logger.store(StopIter=i)
# Value function learning
for i in range(train_v_iters):
vf_optimizer.zero_grad()
loss_v = compute_loss_v(data)
loss_v.backward()
mpi_avg_grads(ac.v) # average grads across MPI processes
vf_optimizer.step()
vcf_optimizer.zero_grad()
loss_vc = compute_loss_vc(data)
loss_vc.backward()
mpi_avg_grads(ac.vc) # average grads across MPI processes
vcf_optimizer.step()
# Penalty update
if optimize_penalty and update_penalty:
penalty_optimizer.zero_grad()
loss_pen = compute_loss_penalty(curr_cost)
loss_pen.backward()
penalty_param_np = penalty_param.grad.numpy()
avg_grad = mpi_avg(penalty_param_np)
penalty_param.grad = torch.as_tensor(avg_grad)
penalty_optimizer.step()
# Log changes from update
kl, ent, cf = pi_info['kl'], pi_info_old['ent'], pi_info['cf']
logger.store(LossPi=pi_l_old, LossV=v_l_old, LossVC=vc_l_old,
KL=kl, Entropy=ent, ClipFrac=cf,
DeltaLossPi=(loss_pi.item() - pi_l_old),
DeltaLossV=(loss_v.item() - v_l_old),
DeltaLossVC=(loss_vc.item() - vc_l_old))
local_num_test_episodes = int(num_test_episodes / num_procs())
def test_agent():
for _ in range(local_num_test_episodes):
o, d, ep_ret, ep_cost, ep_len = test_env.reset(), False, 0, 0, 0
while not (d or ep_len == max_ep_len):
a = ac.act(torch.as_tensor(o, dtype=torch.float32), deterministic=True)
o, r, d, info = test_env.step(a)
ep_ret += r
ep_cost += info.get('cost', 0.)
ep_len += 1
logger.store(TestEpRet=ep_ret, TestEpCost=ep_cost, TestEpLen=ep_len)
# Prepare for interaction with environment
start_time = time.time()
o, ep_ret, ep_cost, ep_len, cum_cost, cum_viol = env.reset(), 0, 0, 0, 0, 0
ep_surr_cost, cum_surr_cost = 0, 0
already_intv, local_episodes = False, 1
# Main loop: collect experience in env and update/log each epoch
for epoch in range(epochs):
if optimize_penalty:
penalty_torch = torch.exp(penalty_param)
for t in range(local_steps_per_epoch):
a, v, vc, logp = ac.step(torch.as_tensor(o, dtype=torch.float32))
next_o, r, d, info = env.step(a)
intervened = info.get('intervened', False)
if not intervened:
c = info.get('cost', 0.)
violation = (c == 1.)
ep_ret += r
ep_cost += c
ep_len += 1
cum_cost += c
cum_viol += violation
surr_c = (1-ignore_unsafe_cost)*c
ep_surr_cost += surr_c
cum_surr_cost += surr_c
buf.store(o, a, r, v, 0., vc, already_intv, logp)
elif env.intervener.mode == env.intervener.MODE.SAFE_ACTION:
next_o, r_safe, d, info_safe = info['safe_step_output']
c_safe = info_safe.get('cost', 0.)
violation = (c_safe == 1.)
ep_ret += r_safe
ep_cost += c_safe
ep_len += 1
cum_cost += c_safe
cum_viol += violation
ep_surr_cost += 1.
cum_surr_cost += 1.
buf.store(o, a, 0.*r, v, 1., vc, already_intv, logp)
elif env.intervener.mode == env.intervener.MODE.TERMINATE:
violation = False
ep_surr_cost += 1.
cum_surr_cost += 1.
buf.store(o, a, 0.*r, v, 1., vc, already_intv, logp)
else:
raise NotImplementedError
# store whether agent has been intervened in current episode
already_intv |= intervened
# save and log
logger.store(VVals=v, VcVals=vc)
# Update obs (critical!)
o = next_o
if intervened:
if env.intervener.mode == env.intervener.MODE.SAFE_ACTION:
while not (d or ep_len == max_ep_len):
_, _, _, info = env.step()
_, r_safe, d, info_safe = info['safe_step_output']
c_safe = info_safe.get('cost', 0.)
ep_ret += r_safe
ep_cost += c_safe
ep_len += 1
cum_cost += c_safe
elif env.intervener.mode == env.intervener.MODE.TERMINATE:
pass
else:
raise NotImplementedError
timeout = ep_len == max_ep_len
terminal = d or timeout
epoch_ended = (t == local_steps_per_epoch-1)
if terminal or epoch_ended:
if epoch_ended and not terminal:
print('Warning: trajectory cut off by epoch at %d steps.' % ep_len,
flush=True)
# if trajectory didn't reach terminal state, bootstrap value target
if intervened and env.intervener.mode in [Intervener.MODE.SAFE_ACTION,
Intervener.MODE.TERMINATE]:
v, vc = 0., vc_range[1]
elif violation:
v = 0.
vc = (1-ignore_unsafe_cost)*vc_range[1]
elif timeout or epoch_ended:
_, v, vc, _ = ac.step(torch.as_tensor(o, dtype=torch.float32))
else:
v = vc = 0
buf.finish_path(v, vc)
if terminal:
# only save EpRet / EpLen if trajectory finished
logger.store(EpRet=ep_ret, EpCost=ep_cost, EpLen=ep_len,
EpSurrCost=ep_surr_cost)
o, ep_ret, ep_cost, ep_len = env.reset(), 0, 0, 0
ep_surr_cost, already_intv = 0, False
local_episodes += 1
elif intervened and env.intervener.mode == Intervener.MODE.SAFE_ACTION:
buf.finish_path(0., vc_range[1])
# Save model
if (epoch % save_freq == 0) or (epoch == epochs-1):
logger.save_state({'env': env.env}, None)
# Perform PPO update!
update(update_penalty_every > 0 and (epoch+1) % update_penalty_every == 0)
# Cumulative cost calculations
cumulative_cost = mpi_sum(cum_cost)
cumulative_surr_cost = mpi_sum(cum_surr_cost)
cumulative_violations = mpi_sum(cum_viol)
episodes = mpi_sum(local_episodes)
cost_rate = cumulative_cost / episodes
surr_cost_rate = cumulative_surr_cost / episodes
viol_rate = cumulative_violations / episodes
# Test the performance of the deterministic version of the agent.
test_agent()
o = env.reset()
# Log info about epoch
logger.log_tabular('Epoch', epoch)
# Performance
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('EpCost', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
# Cost
logger.log_tabular('CumulativeCost', cumulative_cost)
logger.log_tabular('LogCumulativeCost', np.log10(cumulative_cost))
logger.log_tabular('CostRate', cost_rate)
logger.log_tabular('EpSurrCost', with_min_and_max=True)
logger.log_tabular('CumulativeSurrCost', cumulative_surr_cost)
logger.log_tabular('LogCumulativeSurrCost', np.log10(cumulative_surr_cost))
logger.log_tabular('SurrCostRate', surr_cost_rate)
logger.log_tabular('CumulativeViolations', cumulative_violations)
logger.log_tabular('LogCumulativeViolations', np.log10(cumulative_violations))
logger.log_tabular('ViolationRate', viol_rate)
# Test performance
logger.log_tabular('TestEpRet', with_min_and_max=True)
logger.log_tabular('TestEpCost', with_min_and_max=True)
logger.log_tabular('TestEpLen', average_only=True)
# Penalty
logger.log_tabular('Penalty', float(penalty_torch.item()))
logger.log_tabular('LogPenalty', np.log10(float(penalty_torch.item())))
# Value function stats
logger.log_tabular('VVals', with_min_and_max=True)
logger.log_tabular('VcVals', with_min_and_max=True)
# Policy loss and change
logger.log_tabular('LossPi', average_only=True)
logger.log_tabular('DeltaLossPi', average_only=True)
# Value loss and change
logger.log_tabular('LossV', average_only=True)
logger.log_tabular('LossVC', average_only=True)
logger.log_tabular('DeltaLossV', average_only=True)
logger.log_tabular('DeltaLossVC', average_only=True)
# Policy stats
logger.log_tabular('Entropy', average_only=True)
logger.log_tabular('KL', average_only=True)
# PPO stats
logger.log_tabular('ClipFrac', average_only=True)
logger.log_tabular('StopIter', average_only=True)
# Time and steps elapsed
logger.log_tabular('Time', time.time()-start_time)
logger.log_tabular('TotalEnvInteracts', (epoch+1)*steps_per_epoch)
logger.dump_tabular()
| [
"torch.as_tensor",
"numpy.log10",
"safe_rl.utils.mpi_tools.mpi_sum",
"numpy.log",
"torch.exp",
"torch.min",
"safe_rl.utils.mpi_tools.mpi_avg",
"safe_rl.utils.mpi_pytorch.setup_pytorch_for_mpi",
"safe_rl.utils.logx.EpochLogger",
"numpy.exp",
"numpy.random.seed",
"safe_rl.utils.mpi_tools.proc_id... | [((11885, 11908), 'safe_rl.utils.mpi_pytorch.setup_pytorch_for_mpi', 'setup_pytorch_for_mpi', ([], {}), '()\n', (11906, 11908), False, 'from safe_rl.utils.mpi_pytorch import setup_pytorch_for_mpi, sync_params, mpi_avg_grads\n'), ((11966, 11994), 'safe_rl.utils.logx.EpochLogger', 'EpochLogger', ([], {}), '(**logger_kwargs)\n', (11977, 11994), False, 'from safe_rl.utils.logx import EpochLogger\n'), ((12081, 12104), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (12098, 12104), False, 'import torch\n'), ((12109, 12129), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (12123, 12129), True, 'import numpy as np\n'), ((12742, 12757), 'safe_rl.utils.mpi_pytorch.sync_params', 'sync_params', (['ac'], {}), '(ac)\n', (12753, 12757), False, 'from safe_rl.utils.mpi_pytorch import setup_pytorch_for_mpi, sync_params, mpi_avg_grads\n'), ((14532, 14555), 'torch.nn.SmoothL1Loss', 'torch.nn.SmoothL1Loss', ([], {}), '()\n', (14553, 14555), False, 'import torch\n'), ((18200, 18211), 'time.time', 'time.time', ([], {}), '()\n', (18209, 18211), False, 'import time\n'), ((1113, 1145), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.float32'}), '(size, dtype=np.float32)\n', (1121, 1145), True, 'import numpy as np\n'), ((1169, 1201), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.float32'}), '(size, dtype=np.float32)\n', (1177, 1201), True, 'import numpy as np\n'), ((1225, 1257), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.float32'}), '(size, dtype=np.float32)\n', (1233, 1257), True, 'import numpy as np\n'), ((1281, 1313), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.float32'}), '(size, dtype=np.float32)\n', (1289, 1313), True, 'import numpy as np\n'), ((1375, 1407), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.float32'}), '(size, dtype=np.float32)\n', (1383, 1407), True, 'import numpy as np\n'), ((1432, 1464), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.float32'}), '(size, dtype=np.float32)\n', (1440, 1464), True, 'import numpy as np\n'), ((1489, 1521), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.float32'}), '(size, dtype=np.float32)\n', (1497, 1521), True, 'import numpy as np\n'), ((1546, 1578), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.float32'}), '(size, dtype=np.float32)\n', (1554, 1578), True, 'import numpy as np\n'), ((1604, 1633), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.bool'}), '(size, dtype=np.bool)\n', (1612, 1633), True, 'import numpy as np\n'), ((1658, 1690), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.float32'}), '(size, dtype=np.float32)\n', (1666, 1690), True, 'import numpy as np\n'), ((3492, 3556), 'numpy.append', 'np.append', (['((1 - self.gamma) * self.rew_buf[path_slice])', 'last_val'], {}), '((1 - self.gamma) * self.rew_buf[path_slice], last_val)\n', (3501, 3556), True, 'import numpy as np\n'), ((3568, 3613), 'numpy.append', 'np.append', (['self.val_buf[path_slice]', 'last_val'], {}), '(self.val_buf[path_slice], last_val)\n', (3577, 3613), True, 'import numpy as np\n'), ((3785, 3836), 'safe_rl.algos.cppo.core.discount_cumsum', 'core.discount_cumsum', (['deltas', '(self.gamma * self.lam)'], {}), '(deltas, self.gamma * self.lam)\n', (3805, 3836), True, 'import safe_rl.algos.cppo.core as core\n'), ((4073, 4139), 'numpy.append', 'np.append', (['((1 - self.gamma) * self.cost_buf[path_slice])', 'last_cval'], {}), '((1 - self.gamma) * self.cost_buf[path_slice], last_cval)\n', (4082, 4139), True, 'import numpy as np\n'), ((4152, 4199), 'numpy.append', 'np.append', (['self.cval_buf[path_slice]', 'last_cval'], {}), '(self.cval_buf[path_slice], last_cval)\n', (4161, 4199), True, 'import numpy as np\n'), ((4301, 4353), 'safe_rl.algos.cppo.core.discount_cumsum', 'core.discount_cumsum', (['cdeltas', '(self.gamma * self.lam)'], {}), '(cdeltas, self.gamma * self.lam)\n', (4321, 4353), True, 'import safe_rl.algos.cppo.core as core\n'), ((5114, 5153), 'safe_rl.utils.mpi_tools.mpi_statistics_scalar', 'mpi_statistics_scalar', (['lagrange_adv_buf'], {}), '(lagrange_adv_buf)\n', (5135, 5153), False, 'from safe_rl.utils.mpi_tools import mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar, num_procs, mpi_sum\n'), ((12067, 12076), 'safe_rl.utils.mpi_tools.proc_id', 'proc_id', ([], {}), '()\n', (12074, 12076), False, 'from safe_rl.utils.mpi_tools import mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar, num_procs, mpi_sum\n'), ((13301, 13376), 'torch.tensor', 'torch.tensor', (['[penalty_param_init]'], {'requires_grad': '(True)', 'dtype': 'torch.float32'}), '([penalty_param_init], requires_grad=True, dtype=torch.float32)\n', (13313, 13376), False, 'import torch\n'), ((13438, 13462), 'torch.exp', 'torch.exp', (['penalty_param'], {}), '(penalty_param)\n', (13447, 13462), False, 'import torch\n'), ((13497, 13541), 'torch.tensor', 'torch.tensor', (['[penalty]'], {'dtype': 'torch.float32'}), '([penalty], dtype=torch.float32)\n', (13509, 13541), False, 'import torch\n'), ((13894, 13920), 'torch.exp', 'torch.exp', (['(logp - logp_old)'], {}), '(logp - logp_old)\n', (13903, 13920), False, 'import torch\n'), ((15151, 15187), 'torch.optim.Adam', 'Adam', (['[penalty_param]'], {'lr': 'penalty_lr'}), '([penalty_param], lr=penalty_lr)\n', (15155, 15187), False, 'from torch.optim import Adam\n'), ((22859, 22876), 'safe_rl.utils.mpi_tools.mpi_sum', 'mpi_sum', (['cum_cost'], {}), '(cum_cost)\n', (22866, 22876), False, 'from safe_rl.utils.mpi_tools import mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar, num_procs, mpi_sum\n'), ((22908, 22930), 'safe_rl.utils.mpi_tools.mpi_sum', 'mpi_sum', (['cum_surr_cost'], {}), '(cum_surr_cost)\n', (22915, 22930), False, 'from safe_rl.utils.mpi_tools import mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar, num_procs, mpi_sum\n'), ((22963, 22980), 'safe_rl.utils.mpi_tools.mpi_sum', 'mpi_sum', (['cum_viol'], {}), '(cum_viol)\n', (22970, 22980), False, 'from safe_rl.utils.mpi_tools import mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar, num_procs, mpi_sum\n'), ((23000, 23023), 'safe_rl.utils.mpi_tools.mpi_sum', 'mpi_sum', (['local_episodes'], {}), '(local_episodes)\n', (23007, 23023), False, 'from safe_rl.utils.mpi_tools import mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar, num_procs, mpi_sum\n'), ((911, 945), 'safe_rl.algos.cppo.core.combined_shape', 'core.combined_shape', (['size', 'obs_dim'], {}), '(size, obs_dim)\n', (930, 945), True, 'import safe_rl.algos.cppo.core as core\n'), ((997, 1031), 'safe_rl.algos.cppo.core.combined_shape', 'core.combined_shape', (['size', 'act_dim'], {}), '(size, act_dim)\n', (1016, 1031), True, 'import safe_rl.algos.cppo.core as core\n'), ((3958, 3996), 'safe_rl.algos.cppo.core.discount_cumsum', 'core.discount_cumsum', (['rews', 'self.gamma'], {}), '(rews, self.gamma)\n', (3978, 3996), True, 'import safe_rl.algos.cppo.core as core\n'), ((4388, 4427), 'safe_rl.algos.cppo.core.discount_cumsum', 'core.discount_cumsum', (['costs', 'self.gamma'], {}), '(costs, self.gamma)\n', (4408, 4427), True, 'import safe_rl.algos.cppo.core as core\n'), ((5462, 5501), 'torch.as_tensor', 'torch.as_tensor', (['v'], {'dtype': 'torch.float32'}), '(v, dtype=torch.float32)\n', (5477, 5501), False, 'import torch\n'), ((12804, 12827), 'safe_rl.algos.cppo.core.count_vars', 'core.count_vars', (['module'], {}), '(module)\n', (12819, 12827), True, 'import safe_rl.algos.cppo.core as core\n'), ((13016, 13027), 'safe_rl.utils.mpi_tools.num_procs', 'num_procs', ([], {}), '()\n', (13025, 13027), False, 'from safe_rl.utils.mpi_tools import mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar, num_procs, mpi_sum\n'), ((13253, 13268), 'numpy.log', 'np.log', (['penalty'], {}), '(penalty)\n', (13259, 13268), True, 'import numpy as np\n'), ((13940, 13990), 'torch.clamp', 'torch.clamp', (['ratio', '(1 - clip_ratio)', '(1 + clip_ratio)'], {}), '(ratio, 1 - clip_ratio, 1 + clip_ratio)\n', (13951, 13990), False, 'import torch\n'), ((15884, 15906), 'safe_rl.utils.mpi_tools.mpi_avg', 'mpi_avg', (["pi_info['kl']"], {}), "(pi_info['kl'])\n", (15891, 15906), False, 'from safe_rl.utils.mpi_tools import mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar, num_procs, mpi_sum\n'), ((16093, 16113), 'safe_rl.utils.mpi_pytorch.mpi_avg_grads', 'mpi_avg_grads', (['ac.pi'], {}), '(ac.pi)\n', (16106, 16113), False, 'from safe_rl.utils.mpi_pytorch import setup_pytorch_for_mpi, sync_params, mpi_avg_grads\n'), ((16415, 16434), 'safe_rl.utils.mpi_pytorch.mpi_avg_grads', 'mpi_avg_grads', (['ac.v'], {}), '(ac.v)\n', (16428, 16434), False, 'from safe_rl.utils.mpi_pytorch import setup_pytorch_for_mpi, sync_params, mpi_avg_grads\n'), ((16633, 16653), 'safe_rl.utils.mpi_pytorch.mpi_avg_grads', 'mpi_avg_grads', (['ac.vc'], {}), '(ac.vc)\n', (16646, 16653), False, 'from safe_rl.utils.mpi_pytorch import setup_pytorch_for_mpi, sync_params, mpi_avg_grads\n'), ((17011, 17036), 'safe_rl.utils.mpi_tools.mpi_avg', 'mpi_avg', (['penalty_param_np'], {}), '(penalty_param_np)\n', (17018, 17036), False, 'from safe_rl.utils.mpi_tools import mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar, num_procs, mpi_sum\n'), ((17070, 17095), 'torch.as_tensor', 'torch.as_tensor', (['avg_grad'], {}), '(avg_grad)\n', (17085, 17095), False, 'import torch\n'), ((17601, 17612), 'safe_rl.utils.mpi_tools.num_procs', 'num_procs', ([], {}), '()\n', (17610, 17612), False, 'from safe_rl.utils.mpi_tools import mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar, num_procs, mpi_sum\n'), ((18534, 18558), 'torch.exp', 'torch.exp', (['penalty_param'], {}), '(penalty_param)\n', (18543, 18558), False, 'import torch\n'), ((23700, 23725), 'numpy.log10', 'np.log10', (['cumulative_cost'], {}), '(cumulative_cost)\n', (23708, 23725), True, 'import numpy as np\n'), ((23964, 23994), 'numpy.log10', 'np.log10', (['cumulative_surr_cost'], {}), '(cumulative_surr_cost)\n', (23972, 23994), True, 'import numpy as np\n'), ((24183, 24214), 'numpy.log10', 'np.log10', (['cumulative_violations'], {}), '(cumulative_violations)\n', (24191, 24214), True, 'import numpy as np\n'), ((4990, 5010), 'numpy.exp', 'np.exp', (['(-log_penalty)'], {}), '(-log_penalty)\n', (4996, 5010), True, 'import numpy as np\n'), ((18644, 18683), 'torch.as_tensor', 'torch.as_tensor', (['o'], {'dtype': 'torch.float32'}), '(o, dtype=torch.float32)\n', (18659, 18683), False, 'import torch\n'), ((25557, 25568), 'time.time', 'time.time', ([], {}), '()\n', (25566, 25568), False, 'import time\n'), ((17840, 17879), 'torch.as_tensor', 'torch.as_tensor', (['o'], {'dtype': 'torch.float32'}), '(o, dtype=torch.float32)\n', (17855, 17879), False, 'import torch\n'), ((14054, 14086), 'torch.min', 'torch.min', (['(ratio * adv)', 'clip_adv'], {}), '(ratio * adv, clip_adv)\n', (14063, 14086), False, 'import torch\n'), ((14318, 14363), 'torch.as_tensor', 'torch.as_tensor', (['clipped'], {'dtype': 'torch.float32'}), '(clipped, dtype=torch.float32)\n', (14333, 14363), False, 'import torch\n'), ((21887, 21926), 'torch.as_tensor', 'torch.as_tensor', (['o'], {'dtype': 'torch.float32'}), '(o, dtype=torch.float32)\n', (21902, 21926), False, 'import torch\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
DataLoader generator for the sequence-based pretrain models for protein.
"""
import numpy as np
import paddle.fluid as fluid
from pahelix.utils.data_utils import get_part_files
from pahelix.utils.language_model_tools import apply_bert_mask
from pahelix.utils.protein_tools import ProteinTokenizer
def pretrain_sample_reader(filenames, batch_size):
"""DataLoader for pretraining tasks.
Args:
filenames(list): filenames of the input data.
batch_size(int): size of the each batch.
Returns:
reader(func): data reader.
"""
def __reader__():
examples = []
for filename in filenames:
data = np.load(filename)
masked_token_ids, labels = apply_bert_mask(data['token_ids'], ProteinTokenizer)
lengths = data['lengths']
offset = 0
for i in range(lengths.size):
tokens = masked_token_ids[offset:offset + lengths[i]].reshape(lengths[i], 1)
pos = np.arange(lengths[i]).reshape(lengths[i], 1)
label = labels[offset:offset + lengths[i]].reshape(lengths[i], 1)
examples.append((tokens, pos, label))
if len(examples) == batch_size:
yield examples
examples.clear()
offset += lengths[i]
if len(examples) > 0:
yield examples
return __reader__
def sequence_sample_reader(filenames, batch_size, label_name):
"""DataLoader for sequence classification/regression tasks.
Args:
filenames(list): filenames of the input data.
batch_size(int): size of the each batch.
label_name(str): label name.
Returns:
reader(func): data reader.
"""
def __reader__():
examples = []
for filename in filenames:
data = np.load(filename)
token_ids = data['token_ids']
labels = data[label_name]
lengths = data['lengths']
offset = 0
for i in range(lengths.size):
tokens = token_ids[offset:offset + lengths[i]].reshape(lengths[i], 1)
pos = np.arange(lengths[i]).reshape(lengths[i], 1)
label = labels[offset:offset + lengths[i]].reshape(lengths[i], 1)
examples.append((tokens, pos, label))
if len(examples) == batch_size:
yield examples
examples.clear()
offset += lengths[i]
if len(examples) > 0:
yield examples
return __reader__
def normal_sample_reader(filenames, batch_size, label_name):
"""DataLoader for classification/regression tasks.
Args:
filenames(list): filenames of the input data.
batch_size(int): size of the each batch.
label_name(str): label name.
Returns:
reader(func): data reader.
"""
def __reader__():
examples = []
for filename in filenames:
data = np.load(filename)
token_ids = data['token_ids']
labels = data[label_name]
lengths = data['lengths']
offset = 0
for i in range(lengths.size):
tokens = token_ids[offset:offset + lengths[i]].reshape(lengths[i], 1)
pos = np.arange(lengths[i]).reshape(lengths[i], 1)
label = labels[i:i + 1].reshape(1, 1)
examples.append((tokens, pos, label))
if len(examples) == batch_size:
yield examples
examples.clear()
offset += lengths[i]
if len(examples) > 0:
yield examples
return __reader__
def get_sample_generator(filenames, batch_size, model_config):
"""Set data loader generator according to different tasks.
Args:
filenames(list): filenames of the input data.
batch_size(int): size of the each batch.
model_config(dict): the dictionary containing model configuration.
Raises:
NameError: if key ``task`` in ``model_config`` is invalid.
Returns:
reader(func): data reader.
"""
task = model_config['task']
if task == 'pretrain':
return pretrain_sample_reader(filenames, batch_size)
elif task == 'seq_classification':
label_name = model_config.get('label_name', 'labels')
return sequence_sample_reader(filenames, batch_size, label_name)
elif task in ['classification', 'regression']:
label_name = model_config.get('label_name', 'labels')
return normal_sample_reader(filenames, batch_size, label_name)
else:
raise NameError('Task %s is unsupport.' % task)
def setup_data_loader(input_list, model_config, data_path, trainer_id, trainer_num, places, batch_size):
"""Setup the data_loader.
Args:
input_list(list): the feed_list of the model.
model_config(dict): the dictionary containing model configuration.
data_path(str): the directory containing data files.
trainer_id(int): the id of current trainer.
trainer_num(int): the number of trainers.
places: the place to store the loaded data.
batch_size(int) batch size.
Raises:
NameError: if key ``task`` in ``model_config`` is invalid.
Returns:
data_loader(fluid.io.DataLoader): data reader.
"""
data_loader = fluid.io.DataLoader.from_generator(
feed_list=input_list,
capacity=256,
use_double_buffer=True,
iterable=True)
filenames = get_part_files(data_path, trainer_id, trainer_num)
data_loader.set_sample_list_generator(
get_sample_generator(filenames, batch_size, model_config),
places=places)
return data_loader
def gen_batch_data(examples, tokenizer, place):
"""Generate batch for prediction.
Args:
examples(list): the list of examples containing amino acid sequences.
tokenizer(pahelix.utils.ProteinTools.ProteinTokenizer): tokenizer to generate the token ids.
place: the place to store the loaded data.
Returns:
batch_data: the orgainized data.
"""
token_ids = []
pos = []
lods = [0]
for example in examples:
cur_token_ids = tokenizer.gen_token_ids(example)
token_ids.extend(cur_token_ids)
pos.extend(np.arange(len(cur_token_ids)).tolist())
lods.append(len(token_ids))
token_tensor = fluid.core.LoDTensor()
token_tensor.set(np.array(token_ids, dtype='int64').reshape([-1, 1]), place)
token_tensor.set_lod([lods])
pos_tensor = fluid.core.LoDTensor()
pos_tensor.set(np.array(pos, dtype='int64').reshape([-1, 1]), place)
pos_tensor.set_lod([lods])
return {'protein_token': token_tensor, 'protein_pos': pos_tensor}
| [
"pahelix.utils.data_utils.get_part_files",
"paddle.fluid.io.DataLoader.from_generator",
"pahelix.utils.language_model_tools.apply_bert_mask",
"numpy.array",
"numpy.load",
"paddle.fluid.core.LoDTensor",
"numpy.arange"
] | [((6018, 6131), 'paddle.fluid.io.DataLoader.from_generator', 'fluid.io.DataLoader.from_generator', ([], {'feed_list': 'input_list', 'capacity': '(256)', 'use_double_buffer': '(True)', 'iterable': '(True)'}), '(feed_list=input_list, capacity=256,\n use_double_buffer=True, iterable=True)\n', (6052, 6131), True, 'import paddle.fluid as fluid\n'), ((6194, 6244), 'pahelix.utils.data_utils.get_part_files', 'get_part_files', (['data_path', 'trainer_id', 'trainer_num'], {}), '(data_path, trainer_id, trainer_num)\n', (6208, 6244), False, 'from pahelix.utils.data_utils import get_part_files\n'), ((7088, 7110), 'paddle.fluid.core.LoDTensor', 'fluid.core.LoDTensor', ([], {}), '()\n', (7108, 7110), True, 'import paddle.fluid as fluid\n'), ((7242, 7264), 'paddle.fluid.core.LoDTensor', 'fluid.core.LoDTensor', ([], {}), '()\n', (7262, 7264), True, 'import paddle.fluid as fluid\n'), ((1277, 1294), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (1284, 1294), True, 'import numpy as np\n'), ((1334, 1386), 'pahelix.utils.language_model_tools.apply_bert_mask', 'apply_bert_mask', (["data['token_ids']", 'ProteinTokenizer'], {}), "(data['token_ids'], ProteinTokenizer)\n", (1349, 1386), False, 'from pahelix.utils.language_model_tools import apply_bert_mask\n'), ((2458, 2475), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (2465, 2475), True, 'import numpy as np\n'), ((3609, 3626), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (3616, 3626), True, 'import numpy as np\n'), ((7132, 7166), 'numpy.array', 'np.array', (['token_ids'], {'dtype': '"""int64"""'}), "(token_ids, dtype='int64')\n", (7140, 7166), True, 'import numpy as np\n'), ((7284, 7312), 'numpy.array', 'np.array', (['pos'], {'dtype': '"""int64"""'}), "(pos, dtype='int64')\n", (7292, 7312), True, 'import numpy as np\n'), ((1606, 1627), 'numpy.arange', 'np.arange', (['lengths[i]'], {}), '(lengths[i])\n', (1615, 1627), True, 'import numpy as np\n'), ((2768, 2789), 'numpy.arange', 'np.arange', (['lengths[i]'], {}), '(lengths[i])\n', (2777, 2789), True, 'import numpy as np\n'), ((3919, 3940), 'numpy.arange', 'np.arange', (['lengths[i]'], {}), '(lengths[i])\n', (3928, 3940), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Test script for pyvi/volterra/tools.py
Notes
-----
Developed for Python 3.6
@author: <NAME> (<EMAIL>)
"""
#==============================================================================
# Importations
#==============================================================================
import unittest
import itertools
import numpy as np
from pyvi.volterra.tools import (kernel_nb_coeff, series_nb_coeff, vec2kernel,
vec2series, kernel2vec)
from pyvi.utilities.mathbox import binomial
#==============================================================================
# Test Class
#==============================================================================
class KernelNbCoeffTest(unittest.TestCase):
def setUp(self):
self.Nmax = 5
self.Mmax = 20
self.iter_obj = itertools.product(range(1, self.Nmax+1),
range(self.Mmax))
def test_nb_coeff_symmetric_form(self):
for N, M in self.iter_obj:
with self.subTest(i=(N, M)):
nb_coeff = kernel_nb_coeff(N, M, form='sym')
self.assertEqual(nb_coeff, binomial(M + N - 1, N))
def test_nb_coeff_triangular_form(self):
for N, M in self.iter_obj:
with self.subTest(i=(N, M)):
nb_coeff = kernel_nb_coeff(N, M, form='tri')
self.assertEqual(nb_coeff, binomial(M + N - 1, N))
def test_nb_coeff_raw_form(self):
for N, M in self.iter_obj:
with self.subTest(i=(N, M)):
nb_coeff = kernel_nb_coeff(N, M, form=None)
self.assertEqual(nb_coeff, M**N)
class SeriesNbCoeffTest(unittest.TestCase):
def setUp(self):
self.Nmax = 5
self.Mmax = 5
self.M_list = [10, 0, 3, 0, 2]
self.M_list_results = [('sym', 26), ('tri', 26), (None, 69)]
self.form_list = [None, None, 'sym', 'tri', 'sym']
self.iter_obj = itertools.product(range(1, self.Nmax+1),
range(self.Mmax))
def test_nb_coeff_symmetric_form(self):
for N, M in self.iter_obj:
with self.subTest(i=(N, M)):
nb_coeff = series_nb_coeff(N, M, form='sym')
self.assertEqual(nb_coeff, binomial(M + N, N) - 1)
def test_nb_coeff_triangular_form(self):
for N, M in self.iter_obj:
with self.subTest(i=(N, M)):
nb_coeff = series_nb_coeff(N, M, form='tri')
self.assertEqual(nb_coeff, binomial(M + N, N) - 1)
def test_nb_coeff_raw_form(self):
for N, M in self.iter_obj:
with self.subTest(i=(N, M)):
nb_coeff = series_nb_coeff(N, M, form=None)
self.assertEqual(nb_coeff, sum([M**n for n in range(1, N+1)]))
def test_form_as_list(self):
M = self.Mmax
N = self.Nmax
val = binomial(M + N, N) - 1 + binomial(M, 2)
nb_coeff = series_nb_coeff(N, M, form=self.form_list)
self.assertEqual(nb_coeff, val)
def test_M_as_list(self):
for form, val in self.M_list_results:
with self.subTest(i=form):
nb_coeff = series_nb_coeff(len(self.M_list), self.M_list,
form=form)
self.assertEqual(nb_coeff, val)
def test_out_by_order_type(self):
for N, M in self.iter_obj:
with self.subTest(i=(N, M)):
nb_coeff = series_nb_coeff(N, M, out_by_order=True)
self.assertIsInstance(nb_coeff, list)
def test_out_by_order_length(self):
for N, M in self.iter_obj:
with self.subTest(i=(N, M)):
nb_coeff = series_nb_coeff(N, M, out_by_order=True)
self.assertEqual(len(nb_coeff), N)
class Vec2KernelTest(unittest.TestCase):
def setUp(self):
self.M = 4
self.h_vec = {2: np.arange(1, binomial(self.M + 1, 2)+1),
3: np.arange(1, binomial(self.M + 2, 3)+1)}
self.h_tri = {2: np.array([[1, 2, 3, 4],
[0, 5, 6, 7],
[0, 0, 8, 9],
[0, 0, 0, 10]]),
3: np.array([[[1, 2, 3, 4],
[0, 5, 6, 7],
[0, 0, 8, 9],
[0, 0, 0, 10]],
[[0, 0, 0, 0],
[0, 11, 12, 13],
[0, 0, 14, 15],
[0, 0, 0, 16]],
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 17, 18],
[0, 0, 0, 19]],
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 20]]])}
self.h_sym = {2: np.array([[1, 1, 1.5, 2],
[1, 5, 3, 3.5],
[1.5, 3, 8, 4.5],
[2, 3.5, 4.5, 10]]),
3: np.array([[[1., 2/3, 1, 4/3],
[2/3, 5/3, 1, 7/6],
[1, 1, 8/3, 1.5],
[4/3, 7/6, 1.5, 10/3]],
[[2/3, 5/3, 1, 7/6],
[5/3, 11, 4, 13/3],
[1, 4, 14/3, 2.5],
[7/6, 13/3, 2.5, 16/3]],
[[1, 1, 8/3, 1.5],
[1, 4, 14/3, 2.5],
[8/3, 14/3, 17, 6],
[1.5, 2.5, 6, 19/3]],
[[4/3, 7/6, 1.5, 10/3],
[7/6, 13/3, 2.5, 16/3],
[1.5, 2.5, 6, 19/3],
[10/3, 16/3, 19/3, 20]]])}
def test_triangular_form(self):
for n in [2, 3]:
with self.subTest(i=n):
result = vec2kernel(self.h_vec[n], n, self.M, form='tri')
self.assertTrue(np.all(result == self.h_tri[n]))
def test_symmetric_form(self):
for n in [2, 3]:
with self.subTest(i=n):
result = vec2kernel(self.h_vec[n], n, self.M, form='sym')
self.assertTrue(np.all(result == self.h_sym[n]))
def test_None_form(self):
for n in [2, 3]:
with self.subTest(i=n):
result = vec2kernel(self.h_vec[n], n, self.M, form=None)
self.assertTrue(np.all(result == self.h_tri[n]))
def test_error_raised(self):
n = 2
self.assertRaises(ValueError, vec2kernel, self.h_vec[n], n+1, self.M)
class Vec2SeriesErrorTest(unittest.TestCase):
def test_error_raised_if_wrong_type(self):
f = list()
self.assertRaises(TypeError, vec2series, f, 3, 3)
class Vec2SeriesTest(Vec2KernelTest):
test_error_raised = property()
def setUp(self):
super().setUp()
self.N = 3
self.h_vec[1] = np.arange(1, self.M+1)
self.f = {1: self.h_vec[1], 2: self.h_vec[2], 3: self.h_vec[3]}
self.h_tri[1] = self.h_vec[1]
self.h_sym[1] = self.h_vec[1]
def test_triangular_form(self):
kernels = vec2series(self.h_vec, self.N, self.M, form='tri')
result = [np.all(h == self.h_tri[n]) for n, h in kernels.items()]
self.assertTrue(all(result))
def test_symmetric_form(self):
kernels = vec2series(self.h_vec, self.N, self.M, form='sym')
result = [np.all(h == self.h_sym[n]) for n, h in kernels.items()]
self.assertTrue(all(result))
def test_None_form(self):
kernels = vec2series(self.h_vec, self.N, self.M, form=None)
result = [np.all(h == self.h_tri[n]) for n, h in kernels.items()]
self.assertTrue(all(result))
class Vec2Series_F_AsVector_Test(Vec2SeriesTest):
def setUp(self):
super().setUp()
self.h_vec = np.concatenate([f for n, f in sorted(self.h_vec.items())],
axis=0)
class Vec2Series_M_AsList_Test(Vec2SeriesTest):
def setUp(self):
super().setUp()
self.M = [4, 3, 2]
self.h_vec = {1: np.arange(1, binomial(self.M[0], 1)+1),
2: np.arange(1, binomial(self.M[1]+1, 2)+1),
3: np.arange(1, binomial(self.M[2]+2, 3)+1)}
self.h_tri = {1: np.array([1, 2, 3, 4]),
2: np.array([[1, 2, 3],
[0, 4, 5],
[0, 0, 6]]),
3: np.array([[[1, 2],
[0, 3]],
[[0, 0],
[0, 4]]])}
self.h_sym = {1: np.array([1, 2, 3, 4]),
2: np.array([[1., 1, 3/2],
[1, 4, 5/2],
[3/2, 5/2, 6]]),
3: np.array([[[1., 2/3],
[2/3, 1]],
[[2/3, 1],
[1, 4]]])}
class Vec2Series_Form_AsList_Test(Vec2KernelTest):
test_triangular_form = property()
test_symmetric_form = property()
test_None_form = property()
def setUp(self):
super().setUp()
self.N = 3
self.form = ['sym', 'tri', None]
self.h_vec[1] = np.arange(1, self.M+1)
self.h = dict()
self.h[1] = self.h_vec[1]
self.h[2] = self.h_tri[2]
self.h[3] = self.h_tri[3]
def test_f_as_dict(self):
kernels = vec2series(self.h_vec, self.N, self.M, form=self.form)
result = [np.all(h == self.h[n]) for n, h in kernels.items()]
self.assertTrue(all(result))
class Kernel2VecTest(Vec2KernelTest):
def setUp(self):
super().setUp()
self.h_raw = {2: np.array([[1, 1, 3, 4],
[1, 5, 3, 7],
[0, 3, 8, 9],
[0, 0, 0, 10]]),
3: np.array([[[1, 2, 1, 4],
[0, 5, 6, 7],
[1, 0, 8, 9],
[0, 0, 0, 10]],
[[0, 0, 0, 0],
[0, 11, 12, 13],
[0, 0, 14, 15],
[0, 0, 0, 16]],
[[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 17, 18],
[0, 0, 0, 19]],
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 20]]])}
def test_triangular_form(self):
for n in [2, 3]:
with self.subTest(i=n):
result = kernel2vec(self.h_tri[n], form='tri')
self.assertTrue(np.all(result == self.h_vec[n]))
def test_symmetric_form(self):
for n in [2, 3]:
with self.subTest(i=n):
result = kernel2vec(self.h_sym[n], form='sym')
self.assertTrue(np.all(result == self.h_vec[n]))
def test_None_form(self):
for n in [2, 3]:
with self.subTest(i=n):
result = kernel2vec(self.h_raw[n], form=None)
self.assertTrue(np.all(result == self.h_vec[n]))
def test_error_raised(self):
h_not_squared = np.zeros((3, 3, 4))
self.assertRaises(ValueError, kernel2vec, h_not_squared)
#==============================================================================
# Main script
#==============================================================================
if __name__ == '__main__':
"""
Main script for testing.
"""
unittest.main()
| [
"pyvi.volterra.tools.series_nb_coeff",
"pyvi.volterra.tools.kernel_nb_coeff",
"pyvi.volterra.tools.kernel2vec",
"numpy.array",
"numpy.zeros",
"pyvi.volterra.tools.vec2kernel",
"unittest.main",
"numpy.all",
"pyvi.volterra.tools.vec2series",
"numpy.arange",
"pyvi.utilities.mathbox.binomial"
] | [((12294, 12309), 'unittest.main', 'unittest.main', ([], {}), '()\n', (12307, 12309), False, 'import unittest\n'), ((2989, 3031), 'pyvi.volterra.tools.series_nb_coeff', 'series_nb_coeff', (['N', 'M'], {'form': 'self.form_list'}), '(N, M, form=self.form_list)\n', (3004, 3031), False, 'from pyvi.volterra.tools import kernel_nb_coeff, series_nb_coeff, vec2kernel, vec2series, kernel2vec\n'), ((7362, 7386), 'numpy.arange', 'np.arange', (['(1)', '(self.M + 1)'], {}), '(1, self.M + 1)\n', (7371, 7386), True, 'import numpy as np\n'), ((7588, 7638), 'pyvi.volterra.tools.vec2series', 'vec2series', (['self.h_vec', 'self.N', 'self.M'], {'form': '"""tri"""'}), "(self.h_vec, self.N, self.M, form='tri')\n", (7598, 7638), False, 'from pyvi.volterra.tools import kernel_nb_coeff, series_nb_coeff, vec2kernel, vec2series, kernel2vec\n'), ((7804, 7854), 'pyvi.volterra.tools.vec2series', 'vec2series', (['self.h_vec', 'self.N', 'self.M'], {'form': '"""sym"""'}), "(self.h_vec, self.N, self.M, form='sym')\n", (7814, 7854), False, 'from pyvi.volterra.tools import kernel_nb_coeff, series_nb_coeff, vec2kernel, vec2series, kernel2vec\n'), ((8015, 8064), 'pyvi.volterra.tools.vec2series', 'vec2series', (['self.h_vec', 'self.N', 'self.M'], {'form': 'None'}), '(self.h_vec, self.N, self.M, form=None)\n', (8025, 8064), False, 'from pyvi.volterra.tools import kernel_nb_coeff, series_nb_coeff, vec2kernel, vec2series, kernel2vec\n'), ((9765, 9789), 'numpy.arange', 'np.arange', (['(1)', '(self.M + 1)'], {}), '(1, self.M + 1)\n', (9774, 9789), True, 'import numpy as np\n'), ((9963, 10017), 'pyvi.volterra.tools.vec2series', 'vec2series', (['self.h_vec', 'self.N', 'self.M'], {'form': 'self.form'}), '(self.h_vec, self.N, self.M, form=self.form)\n', (9973, 10017), False, 'from pyvi.volterra.tools import kernel_nb_coeff, series_nb_coeff, vec2kernel, vec2series, kernel2vec\n'), ((11955, 11974), 'numpy.zeros', 'np.zeros', (['(3, 3, 4)'], {}), '((3, 3, 4))\n', (11963, 11974), True, 'import numpy as np\n'), ((2955, 2969), 'pyvi.utilities.mathbox.binomial', 'binomial', (['M', '(2)'], {}), '(M, 2)\n', (2963, 2969), False, 'from pyvi.utilities.mathbox import binomial\n'), ((4078, 4145), 'numpy.array', 'np.array', (['[[1, 2, 3, 4], [0, 5, 6, 7], [0, 0, 8, 9], [0, 0, 0, 10]]'], {}), '([[1, 2, 3, 4], [0, 5, 6, 7], [0, 0, 8, 9], [0, 0, 0, 10]])\n', (4086, 4145), True, 'import numpy as np\n'), ((4277, 4543), 'numpy.array', 'np.array', (['[[[1, 2, 3, 4], [0, 5, 6, 7], [0, 0, 8, 9], [0, 0, 0, 10]], [[0, 0, 0, 0],\n [0, 11, 12, 13], [0, 0, 14, 15], [0, 0, 0, 16]], [[0, 0, 0, 0], [0, 0, \n 0, 0], [0, 0, 17, 18], [0, 0, 0, 19]], [[0, 0, 0, 0], [0, 0, 0, 0], [0,\n 0, 0, 0], [0, 0, 0, 20]]]'], {}), '([[[1, 2, 3, 4], [0, 5, 6, 7], [0, 0, 8, 9], [0, 0, 0, 10]], [[0, 0,\n 0, 0], [0, 11, 12, 13], [0, 0, 14, 15], [0, 0, 0, 16]], [[0, 0, 0, 0],\n [0, 0, 0, 0], [0, 0, 17, 18], [0, 0, 0, 19]], [[0, 0, 0, 0], [0, 0, 0, \n 0], [0, 0, 0, 0], [0, 0, 0, 20]]])\n', (4285, 4543), True, 'import numpy as np\n'), ((5094, 5173), 'numpy.array', 'np.array', (['[[1, 1, 1.5, 2], [1, 5, 3, 3.5], [1.5, 3, 8, 4.5], [2, 3.5, 4.5, 10]]'], {}), '([[1, 1, 1.5, 2], [1, 5, 3, 3.5], [1.5, 3, 8, 4.5], [2, 3.5, 4.5, 10]])\n', (5102, 5173), True, 'import numpy as np\n'), ((5305, 5745), 'numpy.array', 'np.array', (['[[[1.0, 2 / 3, 1, 4 / 3], [2 / 3, 5 / 3, 1, 7 / 6], [1, 1, 8 / 3, 1.5], [4 /\n 3, 7 / 6, 1.5, 10 / 3]], [[2 / 3, 5 / 3, 1, 7 / 6], [5 / 3, 11, 4, 13 /\n 3], [1, 4, 14 / 3, 2.5], [7 / 6, 13 / 3, 2.5, 16 / 3]], [[1, 1, 8 / 3, \n 1.5], [1, 4, 14 / 3, 2.5], [8 / 3, 14 / 3, 17, 6], [1.5, 2.5, 6, 19 / 3\n ]], [[4 / 3, 7 / 6, 1.5, 10 / 3], [7 / 6, 13 / 3, 2.5, 16 / 3], [1.5, \n 2.5, 6, 19 / 3], [10 / 3, 16 / 3, 19 / 3, 20]]]'], {}), '([[[1.0, 2 / 3, 1, 4 / 3], [2 / 3, 5 / 3, 1, 7 / 6], [1, 1, 8 / 3, \n 1.5], [4 / 3, 7 / 6, 1.5, 10 / 3]], [[2 / 3, 5 / 3, 1, 7 / 6], [5 / 3, \n 11, 4, 13 / 3], [1, 4, 14 / 3, 2.5], [7 / 6, 13 / 3, 2.5, 16 / 3]], [[1,\n 1, 8 / 3, 1.5], [1, 4, 14 / 3, 2.5], [8 / 3, 14 / 3, 17, 6], [1.5, 2.5,\n 6, 19 / 3]], [[4 / 3, 7 / 6, 1.5, 10 / 3], [7 / 6, 13 / 3, 2.5, 16 / 3],\n [1.5, 2.5, 6, 19 / 3], [10 / 3, 16 / 3, 19 / 3, 20]]])\n', (5313, 5745), True, 'import numpy as np\n'), ((7657, 7683), 'numpy.all', 'np.all', (['(h == self.h_tri[n])'], {}), '(h == self.h_tri[n])\n', (7663, 7683), True, 'import numpy as np\n'), ((7873, 7899), 'numpy.all', 'np.all', (['(h == self.h_sym[n])'], {}), '(h == self.h_sym[n])\n', (7879, 7899), True, 'import numpy as np\n'), ((8083, 8109), 'numpy.all', 'np.all', (['(h == self.h_tri[n])'], {}), '(h == self.h_tri[n])\n', (8089, 8109), True, 'import numpy as np\n'), ((8745, 8767), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (8753, 8767), True, 'import numpy as np\n'), ((8794, 8837), 'numpy.array', 'np.array', (['[[1, 2, 3], [0, 4, 5], [0, 0, 6]]'], {}), '([[1, 2, 3], [0, 4, 5], [0, 0, 6]])\n', (8802, 8837), True, 'import numpy as np\n'), ((8934, 8980), 'numpy.array', 'np.array', (['[[[1, 2], [0, 3]], [[0, 0], [0, 4]]]'], {}), '([[[1, 2], [0, 3]], [[0, 0], [0, 4]]])\n', (8942, 8980), True, 'import numpy as np\n'), ((9114, 9136), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (9122, 9136), True, 'import numpy as np\n'), ((9163, 9224), 'numpy.array', 'np.array', (['[[1.0, 1, 3 / 2], [1, 4, 5 / 2], [3 / 2, 5 / 2, 6]]'], {}), '([[1.0, 1, 3 / 2], [1, 4, 5 / 2], [3 / 2, 5 / 2, 6]])\n', (9171, 9224), True, 'import numpy as np\n'), ((9312, 9372), 'numpy.array', 'np.array', (['[[[1.0, 2 / 3], [2 / 3, 1]], [[2 / 3, 1], [1, 4]]]'], {}), '([[[1.0, 2 / 3], [2 / 3, 1]], [[2 / 3, 1], [1, 4]]])\n', (9320, 9372), True, 'import numpy as np\n'), ((10036, 10058), 'numpy.all', 'np.all', (['(h == self.h[n])'], {}), '(h == self.h[n])\n', (10042, 10058), True, 'import numpy as np\n'), ((10236, 10303), 'numpy.array', 'np.array', (['[[1, 1, 3, 4], [1, 5, 3, 7], [0, 3, 8, 9], [0, 0, 0, 10]]'], {}), '([[1, 1, 3, 4], [1, 5, 3, 7], [0, 3, 8, 9], [0, 0, 0, 10]])\n', (10244, 10303), True, 'import numpy as np\n'), ((10435, 10701), 'numpy.array', 'np.array', (['[[[1, 2, 1, 4], [0, 5, 6, 7], [1, 0, 8, 9], [0, 0, 0, 10]], [[0, 0, 0, 0],\n [0, 11, 12, 13], [0, 0, 14, 15], [0, 0, 0, 16]], [[1, 0, 0, 0], [0, 0, \n 0, 0], [0, 0, 17, 18], [0, 0, 0, 19]], [[0, 0, 0, 0], [0, 0, 0, 0], [0,\n 0, 0, 0], [0, 0, 0, 20]]]'], {}), '([[[1, 2, 1, 4], [0, 5, 6, 7], [1, 0, 8, 9], [0, 0, 0, 10]], [[0, 0,\n 0, 0], [0, 11, 12, 13], [0, 0, 14, 15], [0, 0, 0, 16]], [[1, 0, 0, 0],\n [0, 0, 0, 0], [0, 0, 17, 18], [0, 0, 0, 19]], [[0, 0, 0, 0], [0, 0, 0, \n 0], [0, 0, 0, 0], [0, 0, 0, 20]]])\n', (10443, 10701), True, 'import numpy as np\n'), ((1104, 1137), 'pyvi.volterra.tools.kernel_nb_coeff', 'kernel_nb_coeff', (['N', 'M'], {'form': '"""sym"""'}), "(N, M, form='sym')\n", (1119, 1137), False, 'from pyvi.volterra.tools import kernel_nb_coeff, series_nb_coeff, vec2kernel, vec2series, kernel2vec\n'), ((1355, 1388), 'pyvi.volterra.tools.kernel_nb_coeff', 'kernel_nb_coeff', (['N', 'M'], {'form': '"""tri"""'}), "(N, M, form='tri')\n", (1370, 1388), False, 'from pyvi.volterra.tools import kernel_nb_coeff, series_nb_coeff, vec2kernel, vec2series, kernel2vec\n'), ((1598, 1630), 'pyvi.volterra.tools.kernel_nb_coeff', 'kernel_nb_coeff', (['N', 'M'], {'form': 'None'}), '(N, M, form=None)\n', (1613, 1630), False, 'from pyvi.volterra.tools import kernel_nb_coeff, series_nb_coeff, vec2kernel, vec2series, kernel2vec\n'), ((2232, 2265), 'pyvi.volterra.tools.series_nb_coeff', 'series_nb_coeff', (['N', 'M'], {'form': '"""sym"""'}), "(N, M, form='sym')\n", (2247, 2265), False, 'from pyvi.volterra.tools import kernel_nb_coeff, series_nb_coeff, vec2kernel, vec2series, kernel2vec\n'), ((2483, 2516), 'pyvi.volterra.tools.series_nb_coeff', 'series_nb_coeff', (['N', 'M'], {'form': '"""tri"""'}), "(N, M, form='tri')\n", (2498, 2516), False, 'from pyvi.volterra.tools import kernel_nb_coeff, series_nb_coeff, vec2kernel, vec2series, kernel2vec\n'), ((2726, 2758), 'pyvi.volterra.tools.series_nb_coeff', 'series_nb_coeff', (['N', 'M'], {'form': 'None'}), '(N, M, form=None)\n', (2741, 2758), False, 'from pyvi.volterra.tools import kernel_nb_coeff, series_nb_coeff, vec2kernel, vec2series, kernel2vec\n'), ((2930, 2948), 'pyvi.utilities.mathbox.binomial', 'binomial', (['(M + N)', 'N'], {}), '(M + N, N)\n', (2938, 2948), False, 'from pyvi.utilities.mathbox import binomial\n'), ((3506, 3546), 'pyvi.volterra.tools.series_nb_coeff', 'series_nb_coeff', (['N', 'M'], {'out_by_order': '(True)'}), '(N, M, out_by_order=True)\n', (3521, 3546), False, 'from pyvi.volterra.tools import kernel_nb_coeff, series_nb_coeff, vec2kernel, vec2series, kernel2vec\n'), ((3745, 3785), 'pyvi.volterra.tools.series_nb_coeff', 'series_nb_coeff', (['N', 'M'], {'out_by_order': '(True)'}), '(N, M, out_by_order=True)\n', (3760, 3785), False, 'from pyvi.volterra.tools import kernel_nb_coeff, series_nb_coeff, vec2kernel, vec2series, kernel2vec\n'), ((6318, 6366), 'pyvi.volterra.tools.vec2kernel', 'vec2kernel', (['self.h_vec[n]', 'n', 'self.M'], {'form': '"""tri"""'}), "(self.h_vec[n], n, self.M, form='tri')\n", (6328, 6366), False, 'from pyvi.volterra.tools import kernel_nb_coeff, series_nb_coeff, vec2kernel, vec2series, kernel2vec\n'), ((6554, 6602), 'pyvi.volterra.tools.vec2kernel', 'vec2kernel', (['self.h_vec[n]', 'n', 'self.M'], {'form': '"""sym"""'}), "(self.h_vec[n], n, self.M, form='sym')\n", (6564, 6602), False, 'from pyvi.volterra.tools import kernel_nb_coeff, series_nb_coeff, vec2kernel, vec2series, kernel2vec\n'), ((6785, 6832), 'pyvi.volterra.tools.vec2kernel', 'vec2kernel', (['self.h_vec[n]', 'n', 'self.M'], {'form': 'None'}), '(self.h_vec[n], n, self.M, form=None)\n', (6795, 6832), False, 'from pyvi.volterra.tools import kernel_nb_coeff, series_nb_coeff, vec2kernel, vec2series, kernel2vec\n'), ((11350, 11387), 'pyvi.volterra.tools.kernel2vec', 'kernel2vec', (['self.h_tri[n]'], {'form': '"""tri"""'}), "(self.h_tri[n], form='tri')\n", (11360, 11387), False, 'from pyvi.volterra.tools import kernel_nb_coeff, series_nb_coeff, vec2kernel, vec2series, kernel2vec\n'), ((11575, 11612), 'pyvi.volterra.tools.kernel2vec', 'kernel2vec', (['self.h_sym[n]'], {'form': '"""sym"""'}), "(self.h_sym[n], form='sym')\n", (11585, 11612), False, 'from pyvi.volterra.tools import kernel_nb_coeff, series_nb_coeff, vec2kernel, vec2series, kernel2vec\n'), ((11795, 11831), 'pyvi.volterra.tools.kernel2vec', 'kernel2vec', (['self.h_raw[n]'], {'form': 'None'}), '(self.h_raw[n], form=None)\n', (11805, 11831), False, 'from pyvi.volterra.tools import kernel_nb_coeff, series_nb_coeff, vec2kernel, vec2series, kernel2vec\n'), ((1181, 1203), 'pyvi.utilities.mathbox.binomial', 'binomial', (['(M + N - 1)', 'N'], {}), '(M + N - 1, N)\n', (1189, 1203), False, 'from pyvi.utilities.mathbox import binomial\n'), ((1432, 1454), 'pyvi.utilities.mathbox.binomial', 'binomial', (['(M + N - 1)', 'N'], {}), '(M + N - 1, N)\n', (1440, 1454), False, 'from pyvi.utilities.mathbox import binomial\n'), ((3959, 3982), 'pyvi.utilities.mathbox.binomial', 'binomial', (['(self.M + 1)', '(2)'], {}), '(self.M + 1, 2)\n', (3967, 3982), False, 'from pyvi.utilities.mathbox import binomial\n'), ((4025, 4048), 'pyvi.utilities.mathbox.binomial', 'binomial', (['(self.M + 2)', '(3)'], {}), '(self.M + 2, 3)\n', (4033, 4048), False, 'from pyvi.utilities.mathbox import binomial\n'), ((6399, 6430), 'numpy.all', 'np.all', (['(result == self.h_tri[n])'], {}), '(result == self.h_tri[n])\n', (6405, 6430), True, 'import numpy as np\n'), ((6635, 6666), 'numpy.all', 'np.all', (['(result == self.h_sym[n])'], {}), '(result == self.h_sym[n])\n', (6641, 6666), True, 'import numpy as np\n'), ((6865, 6896), 'numpy.all', 'np.all', (['(result == self.h_tri[n])'], {}), '(result == self.h_tri[n])\n', (6871, 6896), True, 'import numpy as np\n'), ((8559, 8581), 'pyvi.utilities.mathbox.binomial', 'binomial', (['self.M[0]', '(1)'], {}), '(self.M[0], 1)\n', (8567, 8581), False, 'from pyvi.utilities.mathbox import binomial\n'), ((8624, 8650), 'pyvi.utilities.mathbox.binomial', 'binomial', (['(self.M[1] + 1)', '(2)'], {}), '(self.M[1] + 1, 2)\n', (8632, 8650), False, 'from pyvi.utilities.mathbox import binomial\n'), ((8691, 8717), 'pyvi.utilities.mathbox.binomial', 'binomial', (['(self.M[2] + 2)', '(3)'], {}), '(self.M[2] + 2, 3)\n', (8699, 8717), False, 'from pyvi.utilities.mathbox import binomial\n'), ((11420, 11451), 'numpy.all', 'np.all', (['(result == self.h_vec[n])'], {}), '(result == self.h_vec[n])\n', (11426, 11451), True, 'import numpy as np\n'), ((11645, 11676), 'numpy.all', 'np.all', (['(result == self.h_vec[n])'], {}), '(result == self.h_vec[n])\n', (11651, 11676), True, 'import numpy as np\n'), ((11864, 11895), 'numpy.all', 'np.all', (['(result == self.h_vec[n])'], {}), '(result == self.h_vec[n])\n', (11870, 11895), True, 'import numpy as np\n'), ((2309, 2327), 'pyvi.utilities.mathbox.binomial', 'binomial', (['(M + N)', 'N'], {}), '(M + N, N)\n', (2317, 2327), False, 'from pyvi.utilities.mathbox import binomial\n'), ((2560, 2578), 'pyvi.utilities.mathbox.binomial', 'binomial', (['(M + N)', 'N'], {}), '(M + N, N)\n', (2568, 2578), False, 'from pyvi.utilities.mathbox import binomial\n')] |
from firedrake import *
import fireshape as fs
import fireshape.zoo as fsz
import ROL
from L2tracking_PDEconstraint import Moore_Spence
from L2tracking_objective import L2trackingObjective
import numpy as np
import datetime
RB = __import__("rayleigh-benard")
def shape_optimization(target):
def print_lambda(sol):
with sol.sub(3).dat.vec_ro as x:
Ra = x.norm()
with sol.sub(4).dat.vec_ro as x:
mu = x.norm()
print("### Ra = %f, mu = %f ###\n" % (Ra, mu))
# Setup problem
problem = RB.RayleighBenardProblem()
mesh = problem.mesh(comm=COMM_WORLD)
Q = fs.FeControlSpace(mesh)
inner = fs.ElasticityInnerProduct(Q)
q = fs.ControlVector(Q, inner)
# Setup PDE constraint
mesh_m = Q.mesh_m
e = Moore_Spence(mesh_m)
# Compute the initial guess for the generalized Moore-Spence system
e.compute_guess()
e.sol_opt.assign(e.solution)
# create PDEconstrained objective functional
now = datetime.datetime.now()
pvd_path = "solution/"+now.strftime("%d-%m-%Y-%H_%M_%S")+"/u.pvd"
results_path = "solution/"+now.strftime("%d-%m-%Y-%H_%M_%S")+"/results.csv"
J_ = L2trackingObjective(e, Q, pvd_path, results_path, target)
J = fs.ReducedObjective(J_, e)
# ROL parameters
params_dict = {
'Status Test':{'Gradient Tolerance':1e-6,
'Step Tolerance':1e-12,
'Iteration Limit':100},
'Step':{'Type':'Trust Region',
'Trust Region':{'Initial Radius': -1,
'Maximum Radius':1e8,
'Subproblem Solver':'Dogleg',
'Radius Growing Rate':2.5,
'Step Acceptance Threshold':0.05,
'Radius Shrinking Threshold':0.05,
'Radius Growing Threshold':0.9,
'Radius Shrinking Rate (Negative rho)':0.0625,
'Radius Shrinking Rate (Positive rho)':0.25,
'Radius Growing Rate':2.5,
'Sufficient Decrease Parameter':1e-4,
'Safeguard Size':100.0,
}
},
'General':{'Print Verbosity':0,
'Secant':{'Type':'Limited-Memory BFGS', #BFGS-based Hessian-update in trust-region model
'Maximum Storage':10
}
}
}
params = ROL.ParameterList(params_dict, "Parameters")
opt_problem = ROL.OptimizationProblem(J, q)
solver = ROL.OptimizationSolver(opt_problem, params)
solver.solve()
# Write final bifurcation parameter
with e.solution_n.sub(3).dat.vec_ro as x:
param = x.norm()
print("\nRa = %e"%param)
# Save final mesh
np.savetxt("optimization_solution/mesh_final.txt", Q.mesh_m.coordinates.dat.data[:,:], delimiter=',')
if __name__ == "__main__":
"""
Set the target Hopf bifurcation parameter.
The default settings correspond to reproducing Fig 3 of the paper.
"""
target = 1.25e5
# Run the shape optimization
shape_optimization(target) | [
"fireshape.ReducedObjective",
"fireshape.ElasticityInnerProduct",
"L2tracking_objective.L2trackingObjective",
"datetime.datetime.now",
"ROL.OptimizationProblem",
"fireshape.ControlVector",
"numpy.savetxt",
"fireshape.FeControlSpace",
"ROL.ParameterList",
"ROL.OptimizationSolver",
"L2tracking_PDE... | [((645, 668), 'fireshape.FeControlSpace', 'fs.FeControlSpace', (['mesh'], {}), '(mesh)\n', (662, 668), True, 'import fireshape as fs\n'), ((681, 709), 'fireshape.ElasticityInnerProduct', 'fs.ElasticityInnerProduct', (['Q'], {}), '(Q)\n', (706, 709), True, 'import fireshape as fs\n'), ((718, 744), 'fireshape.ControlVector', 'fs.ControlVector', (['Q', 'inner'], {}), '(Q, inner)\n', (734, 744), True, 'import fireshape as fs\n'), ((807, 827), 'L2tracking_PDEconstraint.Moore_Spence', 'Moore_Spence', (['mesh_m'], {}), '(mesh_m)\n', (819, 827), False, 'from L2tracking_PDEconstraint import Moore_Spence\n'), ((1024, 1047), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1045, 1047), False, 'import datetime\n'), ((1207, 1264), 'L2tracking_objective.L2trackingObjective', 'L2trackingObjective', (['e', 'Q', 'pvd_path', 'results_path', 'target'], {}), '(e, Q, pvd_path, results_path, target)\n', (1226, 1264), False, 'from L2tracking_objective import L2trackingObjective\n'), ((1273, 1299), 'fireshape.ReducedObjective', 'fs.ReducedObjective', (['J_', 'e'], {}), '(J_, e)\n', (1292, 1299), True, 'import fireshape as fs\n'), ((2911, 2955), 'ROL.ParameterList', 'ROL.ParameterList', (['params_dict', '"""Parameters"""'], {}), "(params_dict, 'Parameters')\n", (2928, 2955), False, 'import ROL\n'), ((2974, 3003), 'ROL.OptimizationProblem', 'ROL.OptimizationProblem', (['J', 'q'], {}), '(J, q)\n', (2997, 3003), False, 'import ROL\n'), ((3017, 3060), 'ROL.OptimizationSolver', 'ROL.OptimizationSolver', (['opt_problem', 'params'], {}), '(opt_problem, params)\n', (3039, 3060), False, 'import ROL\n'), ((3256, 3363), 'numpy.savetxt', 'np.savetxt', (['"""optimization_solution/mesh_final.txt"""', 'Q.mesh_m.coordinates.dat.data[:, :]'], {'delimiter': '""","""'}), "('optimization_solution/mesh_final.txt', Q.mesh_m.coordinates.dat\n .data[:, :], delimiter=',')\n", (3266, 3363), True, 'import numpy as np\n')] |
from PyQt5.QtCore import QThread, pyqtSignal
from skimage import color
import numpy as np
from natsort import natsorted
import os
from PIL import Image
class Overlay(QThread):
info = pyqtSignal(str)
countChanged = pyqtSignal(int)
figures = pyqtSignal()
maxcuts = pyqtSignal(int)
def __init__(self, path, save=False):
super().__init__()
self.inputpath = path
self.save = save
self.threshold = None
def run(self, debug=False):
files = [f for f in natsorted(os.listdir(self.inputpath)) if not f.endswith("Overlay.png")]
self.maxcuts.emit(len([f for f in files if f.endswith('.png')]))
i = 0
for file in files:
if file.endswith('.png'):
self.info.emit("Preparing "+file)
print(file)
image = np.array(Image.open(self.inputpath+os.sep+file))[:, :, :3]
# image = mpimg.imread(self.inputpath+os.sep+file)[:,:,:3]
self.info.emit("Reading " + file)
self.current_image = image[::10, ::10]
self.figures.emit()
self.overlay(image,filename=file, debug=debug, save=self.save)
self.countChanged.emit(int(i)) # EMIT the loading bar
self.info.emit(file + " Overlay Done")
i += 1
self.info.emit("All Overlays Saved - ready")
def overlay(self, image, filename, debug=False, save=False):
img_hsv = color.rgb2hsv(image)
img_hue = img_hsv[:, :, 0]
image_sat = img_hsv[:, :, 1]
hue = np.logical_and(img_hue > 0.02, img_hue < 0.10) # BROWN PIXELS BETWEEN 0.02 and 0.10
self.info.emit("Preparing thresholds for " + filename)
if self.threshold:
mask = np.logical_and(hue, image_sat > self.threshold)
else:
print("normal threshold")
mask = np.logical_and(hue, image_sat > 0.79)
mask = mask.astype(int)
self.current_image = mask[::10, ::10]
self.figures.emit()
self.info.emit("Creating overlay for " + filename)
alpha = 0.9
imbw = color.rgb2gray(image)
rows, cols = imbw.shape
# Construct a colour image to superimpose
color_mask = np.zeros((rows, cols, 3))
color_mask[:, :, 1] = mask # Change to 0,1,2, for rgb
# Construct RGB version of grey-level image
img_color = np.dstack((imbw, imbw, imbw))
img_hsv = color.rgb2hsv(img_color)
color_mask_hsv = color.rgb2hsv(color_mask)
img_hsv[..., 0] = color_mask_hsv[..., 0]
img_hsv[..., 1] = color_mask_hsv[..., 1] * alpha
self.info.emit("Converting to RGB " + filename)
img_masked = color.hsv2rgb(img_hsv)
# TODO : emit this fig nicely
# self.current_image = img_masked[img_masked.shape[0]-200:img_masked.shape[0]+200,
# img_masked.shape[0]-200:img_masked.shape[0]+200]
# self.figures.emit()
print("displaying output")
self.info.emit("Saving " + filename)
imagesave = Image.fromarray((img_masked * 255).astype(np.uint8))
imagesave.save(self.inputpath+os.sep+filename+'_Overlay.png')
# mpimg.imsave(self.inputpath+os.sep+filename+'_Overlay.png', img_masked) | [
"numpy.dstack",
"skimage.color.rgb2gray",
"PyQt5.QtCore.pyqtSignal",
"skimage.color.hsv2rgb",
"os.listdir",
"numpy.logical_and",
"PIL.Image.open",
"numpy.zeros",
"skimage.color.rgb2hsv"
] | [((190, 205), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['str'], {}), '(str)\n', (200, 205), False, 'from PyQt5.QtCore import QThread, pyqtSignal\n'), ((225, 240), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['int'], {}), '(int)\n', (235, 240), False, 'from PyQt5.QtCore import QThread, pyqtSignal\n'), ((255, 267), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (265, 267), False, 'from PyQt5.QtCore import QThread, pyqtSignal\n'), ((282, 297), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['int'], {}), '(int)\n', (292, 297), False, 'from PyQt5.QtCore import QThread, pyqtSignal\n'), ((1475, 1495), 'skimage.color.rgb2hsv', 'color.rgb2hsv', (['image'], {}), '(image)\n', (1488, 1495), False, 'from skimage import color\n'), ((1582, 1627), 'numpy.logical_and', 'np.logical_and', (['(img_hue > 0.02)', '(img_hue < 0.1)'], {}), '(img_hue > 0.02, img_hue < 0.1)\n', (1596, 1627), True, 'import numpy as np\n'), ((2133, 2154), 'skimage.color.rgb2gray', 'color.rgb2gray', (['image'], {}), '(image)\n', (2147, 2154), False, 'from skimage import color\n'), ((2258, 2283), 'numpy.zeros', 'np.zeros', (['(rows, cols, 3)'], {}), '((rows, cols, 3))\n', (2266, 2283), True, 'import numpy as np\n'), ((2419, 2448), 'numpy.dstack', 'np.dstack', (['(imbw, imbw, imbw)'], {}), '((imbw, imbw, imbw))\n', (2428, 2448), True, 'import numpy as np\n'), ((2467, 2491), 'skimage.color.rgb2hsv', 'color.rgb2hsv', (['img_color'], {}), '(img_color)\n', (2480, 2491), False, 'from skimage import color\n'), ((2517, 2542), 'skimage.color.rgb2hsv', 'color.rgb2hsv', (['color_mask'], {}), '(color_mask)\n', (2530, 2542), False, 'from skimage import color\n'), ((2726, 2748), 'skimage.color.hsv2rgb', 'color.hsv2rgb', (['img_hsv'], {}), '(img_hsv)\n', (2739, 2748), False, 'from skimage import color\n'), ((1776, 1823), 'numpy.logical_and', 'np.logical_and', (['hue', '(image_sat > self.threshold)'], {}), '(hue, image_sat > self.threshold)\n', (1790, 1823), True, 'import numpy as np\n'), ((1895, 1932), 'numpy.logical_and', 'np.logical_and', (['hue', '(image_sat > 0.79)'], {}), '(hue, image_sat > 0.79)\n', (1909, 1932), True, 'import numpy as np\n'), ((524, 550), 'os.listdir', 'os.listdir', (['self.inputpath'], {}), '(self.inputpath)\n', (534, 550), False, 'import os\n'), ((850, 892), 'PIL.Image.open', 'Image.open', (['(self.inputpath + os.sep + file)'], {}), '(self.inputpath + os.sep + file)\n', (860, 892), False, 'from PIL import Image\n')] |
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder, FunctionTransformer, StandardScaler, MinMaxScaler
from sklearn.linear_model import LogisticRegressionCV, LinearRegression, RidgeCV, LassoCV
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from interpret.utils import unify_data, autogen_schema
from interpret.glassbox.ebm.ebm import EBMPreprocessor
from .base import MyGAMPlotMixinBase, MyCommonBase
from .EncodingBase import LabelEncodingRegressorMixin, LabelEncodingClassifierMixin, OnehotEncodingRegressorMixin, OnehotEncodingClassifierMixin
from .utils import my_interpolate
class MyTransformMixin(object):
def transform(self, X):
return X
class MyTransformClassifierMixin(MyTransformMixin):
def predict_proba(self, X):
X = self.transform(X)
return super().predict_proba(X)
class MyTransformRegressionMixin(MyTransformMixin):
def predict(self, X):
X = self.transform(X)
return super().predict(X)
class MyStandardizedTransformMixin(object):
def __init__(self, *args, **kwargs):
assert isinstance(self, (MyTransformClassifierMixin, MyTransformRegressionMixin))
super().__init__(*args, **kwargs)
self.scaler = StandardScaler()
def fit(self, X, y):
X = self.scaler.fit_transform(X)
return super().fit(X, y)
def transform(self, X):
X = self.scaler.transform(X)
return super().transform(X)
class MyMaxMinTransformMixin(object):
def __init__(self, *args, **kwargs):
assert isinstance(self, (MyTransformClassifierMixin, MyTransformRegressionMixin))
super().__init__(*args, **kwargs)
self.scaler = MinMaxScaler(feature_range=(-1, 1))
def fit(self, X, y):
X = self.scaler.fit_transform(X)
return super().fit(X, y)
def transform(self, X):
X = self.scaler.transform(X)
return super().transform(X)
class MyEBMPreprocessorTransformMixin(object):
def __init__(self, binning='uniform', **kwargs):
assert isinstance(self, (MyTransformClassifierMixin, MyTransformRegressionMixin))
super().__init__(**kwargs)
self.prepro_feature_names = None
self.feature_types = None
self.schema = None
self.binning = binning
def fit(self, X, y):
X, y, self.prepro_feature_names, _ = unify_data(
X, y
)
# Build preprocessor
self.schema_ = self.schema
if self.schema_ is None:
self.schema_ = autogen_schema(
X, feature_names=self.prepro_feature_names, feature_types=self.feature_types
)
self.preprocessor_ = EBMPreprocessor(schema=self.schema_, binning=self.binning)
self.preprocessor_.fit(X)
X = self.preprocessor_.transform(X)
return super().fit(X, y)
def transform(self, X):
X, _, _, _ = unify_data(X, None, self.prepro_feature_names, self.feature_types)
X = self.preprocessor_.transform(X)
return super().transform(X)
class MyMarginalizedTransformMixin(object):
def __init__(self, *args, **kwargs):
assert isinstance(self, (MyTransformClassifierMixin, MyTransformRegressionMixin))
super().__init__(*args, **kwargs)
self.X_mapping = {}
def fit(self, X, y):
# My marginal transformation
if not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X)
original_columns = X.columns
X['label'] = y
for col_idx, col in enumerate(original_columns):
self.X_mapping[col_idx] = X.groupby(col).label.apply(lambda x: x.mean())
X = X.drop('label', axis=1)
X = self._transform(X)
return super().fit(X, y)
def transform(self, X):
X = self._transform(X)
return super().transform(X)
def _transform(self, X):
assert len(self.X_mapping) > 0
if isinstance(X, pd.DataFrame):
X = X.values
new_X = np.empty(X.shape, dtype=np.float)
for col_idx in range(X.shape[1]):
x_unique = np.sort(np.unique(X[:, col_idx]))
x_map = self.X_mapping[col_idx]
if len(x_map) != len(x_unique) or np.any(x_map.index != x_unique):
new_y = my_interpolate(x_map.index, x_map.values, x_unique)
x_map = pd.Series(new_y, index=x_unique)
new_X[:, col_idx] = x_map[X[:, col_idx]].values
return new_X
class MyIndicatorTransformMixin(object):
def fit(self, X, y):
if isinstance(X, np.ndarray):
X = pd.DataFrame(X)
categorical_features = X.nunique() > 2
if not np.any(categorical_features):
self.enc = FunctionTransformer()
else:
self.enc = ColumnTransformer([
('onehot', OneHotEncoder(handle_unknown='error'), categorical_features)
], remainder='passthrough')
X = self.enc.fit_transform(X.values)
return super().fit(X, y)
def transform(self, X):
X = self.enc.transform(X)
return super().transform(X)
''' ----------------------- Transformations Mixin Class Ends ----------------- '''
# Somehow we need to set all the arguments on the parameter lists __init__ to avoid the error
class MyLogisticRegressionCVBase(LogisticRegressionCV):
def __init__(self, Cs=12, cv=5, penalty='l2', random_state=1377, solver='lbfgs', max_iter=3000, n_jobs=-1, **kwargs):
super().__init__(Cs=Cs, cv=cv, penalty=penalty, random_state=random_state, solver=solver,
max_iter=max_iter, n_jobs=n_jobs, **kwargs)
def _my_predict_logodds(self, X):
return self.decision_function(self.transform(X))
class MyLinearRegressionCVBase(RidgeCV):
def __init__(self, alphas=np.logspace(-3, 3, 12), **kwargs):
super().__init__(alphas, **kwargs)
class MyLogisticRegressionCV(OnehotEncodingClassifierMixin, MyGAMPlotMixinBase, MyStandardizedTransformMixin, MyTransformClassifierMixin, MyLogisticRegressionCVBase):
pass
class MyLinearRegressionRidgeCV(OnehotEncodingRegressorMixin, MyGAMPlotMixinBase, MyStandardizedTransformMixin, MyTransformRegressionMixin, MyLinearRegressionCVBase):
pass
class MyMarginalLogisticRegressionCV(LabelEncodingClassifierMixin, MyGAMPlotMixinBase, MyEBMPreprocessorTransformMixin, MyMarginalizedTransformMixin, MyTransformClassifierMixin, MyLogisticRegressionCVBase):
pass
class MyMarginalLinearRegressionCV(LabelEncodingRegressorMixin, MyGAMPlotMixinBase, MyEBMPreprocessorTransformMixin, MyMarginalizedTransformMixin, MyTransformRegressionMixin, MyLinearRegressionCVBase):
pass
class MyIndicatorLogisticRegressionCV(LabelEncodingClassifierMixin, MyGAMPlotMixinBase, MyEBMPreprocessorTransformMixin, MyIndicatorTransformMixin, MyTransformClassifierMixin, MyLogisticRegressionCVBase):
pass
class MyIndicatorLinearRegressionCV(LabelEncodingRegressorMixin, MyGAMPlotMixinBase, MyEBMPreprocessorTransformMixin, MyIndicatorTransformMixin, MyTransformRegressionMixin, MyLinearRegressionCVBase):
pass
class MyRandomForestClassifier(LabelEncodingClassifierMixin, MyCommonBase, RandomForestClassifier):
@property
def is_GAM(self):
return False
class MyRandomForestRegressor(LabelEncodingRegressorMixin, MyCommonBase, RandomForestRegressor):
@property
def is_GAM(self):
return False
| [
"pandas.Series",
"interpret.glassbox.ebm.ebm.EBMPreprocessor",
"numpy.unique",
"interpret.utils.autogen_schema",
"sklearn.preprocessing.OneHotEncoder",
"numpy.any",
"interpret.utils.unify_data",
"sklearn.preprocessing.StandardScaler",
"numpy.empty",
"pandas.DataFrame",
"sklearn.preprocessing.Fun... | [((1308, 1324), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1322, 1324), False, 'from sklearn.preprocessing import OneHotEncoder, FunctionTransformer, StandardScaler, MinMaxScaler\n'), ((1762, 1797), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(-1, 1)'}), '(feature_range=(-1, 1))\n', (1774, 1797), False, 'from sklearn.preprocessing import OneHotEncoder, FunctionTransformer, StandardScaler, MinMaxScaler\n'), ((2432, 2448), 'interpret.utils.unify_data', 'unify_data', (['X', 'y'], {}), '(X, y)\n', (2442, 2448), False, 'from interpret.utils import unify_data, autogen_schema\n'), ((2749, 2807), 'interpret.glassbox.ebm.ebm.EBMPreprocessor', 'EBMPreprocessor', ([], {'schema': 'self.schema_', 'binning': 'self.binning'}), '(schema=self.schema_, binning=self.binning)\n', (2764, 2807), False, 'from interpret.glassbox.ebm.ebm import EBMPreprocessor\n'), ((2970, 3036), 'interpret.utils.unify_data', 'unify_data', (['X', 'None', 'self.prepro_feature_names', 'self.feature_types'], {}), '(X, None, self.prepro_feature_names, self.feature_types)\n', (2980, 3036), False, 'from interpret.utils import unify_data, autogen_schema\n'), ((4055, 4088), 'numpy.empty', 'np.empty', (['X.shape'], {'dtype': 'np.float'}), '(X.shape, dtype=np.float)\n', (4063, 4088), True, 'import numpy as np\n'), ((5846, 5868), 'numpy.logspace', 'np.logspace', (['(-3)', '(3)', '(12)'], {}), '(-3, 3, 12)\n', (5857, 5868), True, 'import numpy as np\n'), ((2596, 2693), 'interpret.utils.autogen_schema', 'autogen_schema', (['X'], {'feature_names': 'self.prepro_feature_names', 'feature_types': 'self.feature_types'}), '(X, feature_names=self.prepro_feature_names, feature_types=\n self.feature_types)\n', (2610, 2693), False, 'from interpret.utils import unify_data, autogen_schema\n'), ((3487, 3502), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (3499, 3502), True, 'import pandas as pd\n'), ((4648, 4663), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (4660, 4663), True, 'import pandas as pd\n'), ((4727, 4755), 'numpy.any', 'np.any', (['categorical_features'], {}), '(categorical_features)\n', (4733, 4755), True, 'import numpy as np\n'), ((4780, 4801), 'sklearn.preprocessing.FunctionTransformer', 'FunctionTransformer', ([], {}), '()\n', (4799, 4801), False, 'from sklearn.preprocessing import OneHotEncoder, FunctionTransformer, StandardScaler, MinMaxScaler\n'), ((4162, 4186), 'numpy.unique', 'np.unique', (['X[:, col_idx]'], {}), '(X[:, col_idx])\n', (4171, 4186), True, 'import numpy as np\n'), ((4278, 4309), 'numpy.any', 'np.any', (['(x_map.index != x_unique)'], {}), '(x_map.index != x_unique)\n', (4284, 4309), True, 'import numpy as np\n'), ((4411, 4443), 'pandas.Series', 'pd.Series', (['new_y'], {'index': 'x_unique'}), '(new_y, index=x_unique)\n', (4420, 4443), True, 'import pandas as pd\n'), ((4886, 4923), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""error"""'}), "(handle_unknown='error')\n", (4899, 4923), False, 'from sklearn.preprocessing import OneHotEncoder, FunctionTransformer, StandardScaler, MinMaxScaler\n')] |
from itertools import repeat
from typing import Any, Tuple, Optional, List, Union
import numpy as np
from needlestack.apis import tensors_pb2
from needlestack.apis import indices_pb2
from needlestack.exceptions import SerializationError, DeserializationError
TYPE_TO_ENUM = {
"float16": tensors_pb2.NDArray.FLOAT16,
"float32": tensors_pb2.NDArray.FLOAT32,
"float64": tensors_pb2.NDArray.FLOAT64,
"int8": tensors_pb2.NDArray.INT8,
"int16": tensors_pb2.NDArray.INT16,
"int32": tensors_pb2.NDArray.INT32,
"int64": tensors_pb2.NDArray.INT64,
}
ENUM_TO_TYPE = {v: k for k, v in TYPE_TO_ENUM.items()}
def ndarray_to_proto(
X: Any, dtype: Optional[str] = None, shape: Optional[Tuple] = None
) -> tensors_pb2.NDArray:
"""Transforms a Python n-dimension array into a protobuf
Args:
X: ND Array
dtype: Explicit datatype for number
shape: Explicit shape for nd array
"""
proto = tensors_pb2.NDArray()
if isinstance(X, list):
if dtype is None:
raise SerializationError("Serializing list needs dtype")
if shape is None:
raise SerializationError("Serializing list needs shape")
X = np.array(X, dtype=dtype)
if X.shape != shape:
raise SerializationError("Shape mismatch")
if isinstance(X, np.ndarray):
if dtype and X.dtype.name != dtype:
if dtype in TYPE_TO_ENUM:
X = X.astype(dtype)
else:
raise SerializationError(f"{dtype} dtype not supported")
dtype_enum = TYPE_TO_ENUM.get(X.dtype.name)
if dtype_enum is None:
raise SerializationError(f"{X.dtype.name} dtype not yet supported")
proto.dtype = dtype_enum
proto.shape.extend(X.shape)
proto.numpy_content = X.tobytes()
return proto
else:
raise SerializationError("Unsupported NDArray")
def proto_to_ndarray(proto: tensors_pb2.NDArray) -> np.ndarray:
"""Transform a protobuf into a numpy array
Args:
proto: Protobuf for nd array
"""
dtype = ENUM_TO_TYPE.get(proto.dtype)
if not proto.shape:
raise DeserializationError("Missing attribute shape to convert to ndarray")
if proto.numpy_content and dtype:
return np.frombuffer(proto.numpy_content, dtype=dtype).reshape(*proto.shape)
elif proto.float_val:
dtype = dtype or "float32"
return np.array(proto.float_val, dtype=dtype).reshape(*proto.shape)
elif proto.double_val:
dtype = dtype or "float64"
return np.array(proto.double_val, dtype=dtype).reshape(*proto.shape)
elif proto.int_val:
dtype = dtype or "int32"
return np.array(proto.int_val, dtype=dtype).reshape(*proto.shape)
elif proto.long_val:
dtype = dtype or "int64"
return np.array(proto.long_val, dtype=dtype).reshape(*proto.shape)
else:
raise DeserializationError("Missing value attribute to convert to ndarray")
def metadata_list_to_proto(
ids: List[str],
fields_list: List[Tuple],
fieldtypes: Optional[Tuple[str]] = None,
fieldnames: Optional[Tuple[str]] = None,
) -> List[indices_pb2.Metadata]:
"""Serialize a set of items with metadata fields
Args:
ids: List of ids for items
fields_list: List of tuple of field values
fieldtypes: Optional tuple of types for values
fieldname: Optional tuple of names for values
"""
return [
metadata_to_proto(id, fields, fieldtypes, fieldnames)
for id, fields in zip(ids, fields_list)
]
def metadata_to_proto(
id: str,
fields: Tuple,
fieldtypes: Optional[Tuple[str]] = None,
fieldnames: Optional[Tuple[str]] = None,
) -> indices_pb2.Metadata:
"""Serialize a set of metadata fields for some item.
Skips over None fields
Args:
id: ID for item
fields: Tuple of primative python values
fieldtypes: Optional tuple of types for values
fieldnames: Optional tuple of names for values
"""
_fieldtypes = fieldtypes or repeat(None, len(fields))
_fieldnames = fieldnames or repeat(None, len(fields))
metadata_fields = [
metadata_field_to_proto(field, fieldtype, fieldname)
for field, fieldtype, fieldname in zip(fields, _fieldtypes, _fieldnames)
if field is not None
]
return indices_pb2.Metadata(id=id, fields=metadata_fields)
TYPE_TO_FIELD_TYPE = {str: "string", float: "double", int: "long", bool: "bool"}
def metadata_field_to_proto(
field: Union[str, int, float, bool],
fieldtype: Optional[str] = None,
fieldname: Optional[str] = None,
) -> indices_pb2.MetadataField:
"""Serialize some python value to a metadata field proto
Args:
field: Primative python value
fieldtype: Explicit type to serialize the field
fieldname: Optional name for this metadata field
"""
proto = indices_pb2.MetadataField(name=fieldname)
fieldtype = fieldtype if fieldtype else TYPE_TO_FIELD_TYPE.get(type(field))
if fieldtype is None:
raise SerializationError(f"Fieldtype {type(field)} not serializable.")
if fieldtype == "string" and isinstance(field, str):
proto.string_val = field
elif fieldtype == "double" and isinstance(field, float):
proto.double_val = field
elif fieldtype == "float" and isinstance(field, float):
proto.float_val = field
elif fieldtype == "long" and isinstance(field, int):
proto.long_val = field
elif fieldtype == "int" and isinstance(field, int):
proto.int_val = field
elif fieldtype == "bool" and isinstance(field, bool):
proto.bool_val = field
else:
raise SerializationError(
f"Fieldtype {fieldtype} and primative {type(field)} not serializable."
)
return proto
| [
"needlestack.exceptions.SerializationError",
"needlestack.apis.tensors_pb2.NDArray",
"needlestack.apis.indices_pb2.Metadata",
"needlestack.exceptions.DeserializationError",
"numpy.array",
"needlestack.apis.indices_pb2.MetadataField",
"numpy.frombuffer"
] | [((948, 969), 'needlestack.apis.tensors_pb2.NDArray', 'tensors_pb2.NDArray', ([], {}), '()\n', (967, 969), False, 'from needlestack.apis import tensors_pb2\n'), ((4380, 4431), 'needlestack.apis.indices_pb2.Metadata', 'indices_pb2.Metadata', ([], {'id': 'id', 'fields': 'metadata_fields'}), '(id=id, fields=metadata_fields)\n', (4400, 4431), False, 'from needlestack.apis import indices_pb2\n'), ((4936, 4977), 'needlestack.apis.indices_pb2.MetadataField', 'indices_pb2.MetadataField', ([], {'name': 'fieldname'}), '(name=fieldname)\n', (4961, 4977), False, 'from needlestack.apis import indices_pb2\n'), ((1201, 1225), 'numpy.array', 'np.array', (['X'], {'dtype': 'dtype'}), '(X, dtype=dtype)\n', (1209, 1225), True, 'import numpy as np\n'), ((1873, 1914), 'needlestack.exceptions.SerializationError', 'SerializationError', (['"""Unsupported NDArray"""'], {}), "('Unsupported NDArray')\n", (1891, 1914), False, 'from needlestack.exceptions import SerializationError, DeserializationError\n'), ((2165, 2234), 'needlestack.exceptions.DeserializationError', 'DeserializationError', (['"""Missing attribute shape to convert to ndarray"""'], {}), "('Missing attribute shape to convert to ndarray')\n", (2185, 2234), False, 'from needlestack.exceptions import SerializationError, DeserializationError\n'), ((1043, 1093), 'needlestack.exceptions.SerializationError', 'SerializationError', (['"""Serializing list needs dtype"""'], {}), "('Serializing list needs dtype')\n", (1061, 1093), False, 'from needlestack.exceptions import SerializationError, DeserializationError\n'), ((1138, 1188), 'needlestack.exceptions.SerializationError', 'SerializationError', (['"""Serializing list needs shape"""'], {}), "('Serializing list needs shape')\n", (1156, 1188), False, 'from needlestack.exceptions import SerializationError, DeserializationError\n'), ((1273, 1309), 'needlestack.exceptions.SerializationError', 'SerializationError', (['"""Shape mismatch"""'], {}), "('Shape mismatch')\n", (1291, 1309), False, 'from needlestack.exceptions import SerializationError, DeserializationError\n'), ((1655, 1716), 'needlestack.exceptions.SerializationError', 'SerializationError', (['f"""{X.dtype.name} dtype not yet supported"""'], {}), "(f'{X.dtype.name} dtype not yet supported')\n", (1673, 1716), False, 'from needlestack.exceptions import SerializationError, DeserializationError\n'), ((1503, 1553), 'needlestack.exceptions.SerializationError', 'SerializationError', (['f"""{dtype} dtype not supported"""'], {}), "(f'{dtype} dtype not supported')\n", (1521, 1553), False, 'from needlestack.exceptions import SerializationError, DeserializationError\n'), ((2289, 2336), 'numpy.frombuffer', 'np.frombuffer', (['proto.numpy_content'], {'dtype': 'dtype'}), '(proto.numpy_content, dtype=dtype)\n', (2302, 2336), True, 'import numpy as np\n'), ((2435, 2473), 'numpy.array', 'np.array', (['proto.float_val'], {'dtype': 'dtype'}), '(proto.float_val, dtype=dtype)\n', (2443, 2473), True, 'import numpy as np\n'), ((2573, 2612), 'numpy.array', 'np.array', (['proto.double_val'], {'dtype': 'dtype'}), '(proto.double_val, dtype=dtype)\n', (2581, 2612), True, 'import numpy as np\n'), ((2923, 2992), 'needlestack.exceptions.DeserializationError', 'DeserializationError', (['"""Missing value attribute to convert to ndarray"""'], {}), "('Missing value attribute to convert to ndarray')\n", (2943, 2992), False, 'from needlestack.exceptions import SerializationError, DeserializationError\n'), ((2707, 2743), 'numpy.array', 'np.array', (['proto.int_val'], {'dtype': 'dtype'}), '(proto.int_val, dtype=dtype)\n', (2715, 2743), True, 'import numpy as np\n'), ((2839, 2876), 'numpy.array', 'np.array', (['proto.long_val'], {'dtype': 'dtype'}), '(proto.long_val, dtype=dtype)\n', (2847, 2876), True, 'import numpy as np\n')] |
import math
import numpy
#Generates audio for a single channel
class channel:
def __init__(self, samples, frame_rate_period):
self.samples = samples
self.frame_rate_period = frame_rate_period
self.current_frame = 0
self.sample = None
self.period = 0
self.nyquist_frequency = 0
self.frame_rate = 1.0 / frame_rate_period
#plays a note using a given sample, at a frequency given by period, with given effect
def play_note(self, sampleId, period, effect):
if sampleId > -1 and period > 0: # start playing a brand new note
self.current_frame = 0
self.sample = self.samples[sampleId]
self.period = period
self.nyquist_frequency = 1.0 / period / 2.0
self.effect = effect
self.frame_rate_scale = self.frame_rate_period / self.period
else:
#only changing effect
self.effect = effect
return
#returns audio frames for this channel for a given duration in secs.
def get_frames(self, duration):
requested_frames = math.floor(duration / self.frame_rate_period)
buffer = numpy.zeros(requested_frames,dtype=numpy.float32)
if self.sample is None:
return buffer #nothing to play
# we are not repeating and we finished playing this sample
if self.current_frame >= self.sample.length:
return buffer #nothing to play
#ignoring repeat for now
for i in range(requested_frames):
frac, whole = math.modf(self.current_frame)
whole = int(whole)
buffer[i] = (1.0 - frac) * self.sample.pcm_data[whole] + frac * self.sample.pcm_data[whole + 1]
self.current_frame += self.frame_rate_scale
if self.current_frame >= self.sample.length:
return buffer
return buffer
| [
"math.modf",
"numpy.zeros",
"math.floor"
] | [((1112, 1157), 'math.floor', 'math.floor', (['(duration / self.frame_rate_period)'], {}), '(duration / self.frame_rate_period)\n', (1122, 1157), False, 'import math\n'), ((1175, 1225), 'numpy.zeros', 'numpy.zeros', (['requested_frames'], {'dtype': 'numpy.float32'}), '(requested_frames, dtype=numpy.float32)\n', (1186, 1225), False, 'import numpy\n'), ((1590, 1619), 'math.modf', 'math.modf', (['self.current_frame'], {}), '(self.current_frame)\n', (1599, 1619), False, 'import math\n')] |
import argparse
from datetime import datetime
import json
import numpy as np
from pathlib import Path
import time
from tqdm import tqdm
import warnings
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
from data import data_loaders
from model import RandLANet
from utils.tools import Config as cfg
from utils.metrics import accuracy, intersection_over_union
def evaluate(model, loader, criterion, device):
model.eval()
losses = []
accuracies = []
ious = []
with torch.no_grad():
for points, labels in tqdm(loader, desc="Validation", leave=False):
points = points.to(device)
labels = labels.to(device)
scores = model(points)
loss = criterion(scores, labels)
losses.append(loss.cpu().item())
accuracies.append(accuracy(scores, labels))
ious.append(intersection_over_union(scores, labels))
return (
np.mean(losses),
np.nanmean(np.array(accuracies), axis=0),
np.nanmean(np.array(ious), axis=0),
)
def train(args):
train_path = args.dataset / args.train_dir
val_path = args.dataset / args.val_dir
logs_dir = args.logs_dir / args.name
logs_dir.mkdir(exist_ok=True, parents=True)
num_classes = len(cfg.class_weights)
train_loader, val_loader = data_loaders(
args.dataset,
args.dataset_sampling,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=True,
)
d_in = next(iter(train_loader))[0].size(-1)
model = RandLANet(
d_in,
num_classes,
num_neighbors=args.neighbors,
decimation=args.decimation,
device=args.gpu,
)
print("Computing weights...", end="\t")
samples_per_class = np.array(cfg.class_weights)
n_samples = torch.tensor(cfg.class_weights, dtype=torch.float, device=args.gpu)
ratio_samples = n_samples / n_samples.sum()
weights = 1 / (ratio_samples + 0.02)
print("Done.")
print("Weights:", weights)
criterion = nn.CrossEntropyLoss(weight=weights)
optimizer = torch.optim.Adam(model.parameters(), lr=args.adam_lr)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, args.scheduler_gamma)
first_epoch = 1
if args.load:
path = max(list((args.logs_dir / args.load).glob("*.pth")))
print(f"Loading {path}...")
checkpoint = torch.load(path)
first_epoch = checkpoint["epoch"] + 1
model.load_state_dict(checkpoint["model_state_dict"])
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
scheduler.load_state_dict(checkpoint["scheduler_state_dict"])
with SummaryWriter(logs_dir) as writer:
for epoch in range(first_epoch, args.epochs + 1):
print(f"=== EPOCH {epoch:d}/{args.epochs:d} ===")
t0 = time.time()
# Train
model.train()
# metrics
losses = []
accuracies = []
ious = []
# iterate over dataset
for points, labels in tqdm(train_loader, desc="Training", leave=False):
points = points.to(args.gpu)
labels = labels.to(args.gpu)
optimizer.zero_grad()
scores = model(points)
logp = torch.distributions.utils.probs_to_logits(
scores, is_binary=False
)
loss = criterion(logp, labels)
# logpy = torch.gather(logp, 1, labels)
# loss = -(logpy).mean()
loss.backward()
optimizer.step()
losses.append(loss.cpu().item())
accuracies.append(accuracy(scores, labels))
ious.append(intersection_over_union(scores, labels))
scheduler.step()
accs = np.nanmean(np.array(accuracies), axis=0)
ious = np.nanmean(np.array(ious), axis=0)
val_loss, val_accs, val_ious = evaluate(
model, val_loader, criterion, args.gpu
)
loss_dict = {"Training loss": np.mean(losses), "Validation loss": val_loss}
acc_dicts = [
{"Training accuracy": acc, "Validation accuracy": val_acc}
for acc, val_acc in zip(accs, val_accs)
]
iou_dicts = [
{"Training accuracy": iou, "Validation accuracy": val_iou}
for iou, val_iou in zip(ious, val_ious)
]
t1 = time.time()
d = t1 - t0
# Display results
for k, v in loss_dict.items():
print(f"{k}: {v:.7f}", end="\t")
print()
print(
"Accuracy ",
*[f"{i:>5d}" for i in range(num_classes)],
" OA",
sep=" | ",
)
print(
"Training: ",
*[f"{acc:.3f}" if not np.isnan(acc) else " nan" for acc in accs],
sep=" | ",
)
print(
"Validation: ",
*[f"{acc:.3f}" if not np.isnan(acc) else " nan" for acc in val_accs],
sep=" | ",
)
print(
"IoU ",
*[f"{i:>5d}" for i in range(num_classes)],
" mIoU",
sep=" | ",
)
print(
"Training: ",
*[f"{iou:.3f}" if not np.isnan(iou) else " nan" for iou in ious],
sep=" | ",
)
print(
"Validation: ",
*[f"{iou:.3f}" if not np.isnan(iou) else " nan" for iou in val_ious],
sep=" | ",
)
print(f"Time elapsed: {d}")
# send results to tensorboard
writer.add_scalars("Loss", loss_dict, epoch)
for i in range(num_classes):
writer.add_scalars(f"Per-class accuracy/{i+1:02d}", acc_dicts[i], epoch)
writer.add_scalars(f"Per-class IoU/{i+1:02d}", iou_dicts[i], epoch)
writer.add_scalars("Per-class accuracy/Overall", acc_dicts[-1], epoch)
writer.add_scalars("Per-class IoU/Mean IoU", iou_dicts[-1], epoch)
if epoch % args.save_freq == 0:
torch.save(
dict(
epoch=epoch,
model_state_dict=model.state_dict(),
optimizer_state_dict=optimizer.state_dict(),
scheduler_state_dict=scheduler.state_dict(),
),
args.logs_dir / args.name / f"checkpoint_{epoch:02d}.pth",
)
if __name__ == "__main__":
"""Parse program arguments"""
parser = argparse.ArgumentParser(
prog="RandLA-Net", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
base = parser.add_argument_group("Base options")
expr = parser.add_argument_group("Experiment parameters")
param = parser.add_argument_group("Hyperparameters")
dirs = parser.add_argument_group("Storage directories")
misc = parser.add_argument_group("Miscellaneous")
base.add_argument(
"--dataset",
type=Path,
help="location of the dataset",
default="datasets/s3dis/subsampled",
)
expr.add_argument("--epochs", type=int, help="number of epochs", default=50)
expr.add_argument("--load", type=str, help="model to load", default="")
param.add_argument(
"--adam_lr", type=float, help="learning rate of the optimizer", default=1e-2
)
param.add_argument("--batch_size", type=int, help="batch size", default=1)
param.add_argument(
"--decimation",
type=int,
help="ratio the point cloud is divided by at each layer",
default=4,
)
param.add_argument(
"--dataset_sampling",
type=str,
help="how dataset is sampled",
default="active_learning",
choices=["active_learning", "naive"],
)
param.add_argument(
"--neighbors",
type=int,
help="number of neighbors considered by k-NN",
default=16,
)
param.add_argument(
"--scheduler_gamma",
type=float,
help="gamma of the learning rate scheduler",
default=0.95,
)
dirs.add_argument(
"--test_dir",
type=str,
help="location of the test set in the dataset dir",
default="test",
)
dirs.add_argument(
"--train_dir",
type=str,
help="location of the training set in the dataset dir",
default="train",
)
dirs.add_argument(
"--val_dir",
type=str,
help="location of the validation set in the dataset dir",
default="val",
)
dirs.add_argument(
"--logs_dir", type=Path, help="path to tensorboard logs", default="runs"
)
misc.add_argument(
"--gpu", type=int, help="which GPU to use (-1 for CPU)", default=0
)
misc.add_argument("--name", type=str, help="name of the experiment", default=None)
misc.add_argument(
"--num_workers", type=int, help="number of threads for loading data", default=0
)
misc.add_argument(
"--save_freq", type=int, help="frequency of saving checkpoints", default=10
)
args = parser.parse_args()
if args.gpu >= 0:
if torch.cuda.is_available():
args.gpu = torch.device(f"cuda:{args.gpu:d}")
else:
warnings.warn(
"CUDA is not available on your machine. Running the algorithm on CPU."
)
args.gpu = torch.device("cpu")
else:
args.gpu = torch.device("cpu")
if args.name is None:
if args.load:
args.name = args.load
else:
args.name = datetime.now().strftime("%Y-%m-%d_%H:%M")
t0 = time.time()
train(args)
t1 = time.time()
d = t1 - t0
print(f"Done. Time elapsed: {d}")
| [
"torch.nn.CrossEntropyLoss",
"numpy.array",
"torch.cuda.is_available",
"numpy.mean",
"torch.utils.tensorboard.SummaryWriter",
"argparse.ArgumentParser",
"model.RandLANet",
"torch.distributions.utils.probs_to_logits",
"warnings.warn",
"torch.optim.lr_scheduler.ExponentialLR",
"numpy.isnan",
"ti... | [((1348, 1477), 'data.data_loaders', 'data_loaders', (['args.dataset', 'args.dataset_sampling'], {'batch_size': 'args.batch_size', 'num_workers': 'args.num_workers', 'pin_memory': '(True)'}), '(args.dataset, args.dataset_sampling, batch_size=args.\n batch_size, num_workers=args.num_workers, pin_memory=True)\n', (1360, 1477), False, 'from data import data_loaders\n'), ((1582, 1690), 'model.RandLANet', 'RandLANet', (['d_in', 'num_classes'], {'num_neighbors': 'args.neighbors', 'decimation': 'args.decimation', 'device': 'args.gpu'}), '(d_in, num_classes, num_neighbors=args.neighbors, decimation=args.\n decimation, device=args.gpu)\n', (1591, 1690), False, 'from model import RandLANet\n'), ((1802, 1829), 'numpy.array', 'np.array', (['cfg.class_weights'], {}), '(cfg.class_weights)\n', (1810, 1829), True, 'import numpy as np\n'), ((1847, 1914), 'torch.tensor', 'torch.tensor', (['cfg.class_weights'], {'dtype': 'torch.float', 'device': 'args.gpu'}), '(cfg.class_weights, dtype=torch.float, device=args.gpu)\n', (1859, 1914), False, 'import torch\n'), ((2071, 2106), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'weight': 'weights'}), '(weight=weights)\n', (2090, 2106), True, 'import torch.nn as nn\n'), ((2194, 2265), 'torch.optim.lr_scheduler.ExponentialLR', 'torch.optim.lr_scheduler.ExponentialLR', (['optimizer', 'args.scheduler_gamma'], {}), '(optimizer, args.scheduler_gamma)\n', (2232, 2265), False, 'import torch\n'), ((6852, 6955), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""RandLA-Net"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(prog='RandLA-Net', formatter_class=argparse.\n ArgumentDefaultsHelpFormatter)\n", (6875, 6955), False, 'import argparse\n'), ((9974, 9985), 'time.time', 'time.time', ([], {}), '()\n', (9983, 9985), False, 'import time\n'), ((10011, 10022), 'time.time', 'time.time', ([], {}), '()\n', (10020, 10022), False, 'import time\n'), ((521, 536), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (534, 536), False, 'import torch\n'), ((568, 612), 'tqdm.tqdm', 'tqdm', (['loader'], {'desc': '"""Validation"""', 'leave': '(False)'}), "(loader, desc='Validation', leave=False)\n", (572, 612), False, 'from tqdm import tqdm\n'), ((959, 974), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (966, 974), True, 'import numpy as np\n'), ((2430, 2446), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (2440, 2446), False, 'import torch\n'), ((2705, 2728), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['logs_dir'], {}), '(logs_dir)\n', (2718, 2728), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((9482, 9507), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9505, 9507), False, 'import torch\n'), ((9781, 9800), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (9793, 9800), False, 'import torch\n'), ((995, 1015), 'numpy.array', 'np.array', (['accuracies'], {}), '(accuracies)\n', (1003, 1015), True, 'import numpy as np\n'), ((1045, 1059), 'numpy.array', 'np.array', (['ious'], {}), '(ious)\n', (1053, 1059), True, 'import numpy as np\n'), ((2877, 2888), 'time.time', 'time.time', ([], {}), '()\n', (2886, 2888), False, 'import time\n'), ((3102, 3150), 'tqdm.tqdm', 'tqdm', (['train_loader'], {'desc': '"""Training"""', 'leave': '(False)'}), "(train_loader, desc='Training', leave=False)\n", (3106, 3150), False, 'from tqdm import tqdm\n'), ((4556, 4567), 'time.time', 'time.time', ([], {}), '()\n', (4565, 4567), False, 'import time\n'), ((9532, 9566), 'torch.device', 'torch.device', (['f"""cuda:{args.gpu:d}"""'], {}), "(f'cuda:{args.gpu:d}')\n", (9544, 9566), False, 'import torch\n'), ((9593, 9683), 'warnings.warn', 'warnings.warn', (['"""CUDA is not available on your machine. Running the algorithm on CPU."""'], {}), "(\n 'CUDA is not available on your machine. Running the algorithm on CPU.')\n", (9606, 9683), False, 'import warnings\n'), ((9732, 9751), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (9744, 9751), False, 'import torch\n'), ((847, 871), 'utils.metrics.accuracy', 'accuracy', (['scores', 'labels'], {}), '(scores, labels)\n', (855, 871), False, 'from utils.metrics import accuracy, intersection_over_union\n'), ((897, 936), 'utils.metrics.intersection_over_union', 'intersection_over_union', (['scores', 'labels'], {}), '(scores, labels)\n', (920, 936), False, 'from utils.metrics import accuracy, intersection_over_union\n'), ((3344, 3410), 'torch.distributions.utils.probs_to_logits', 'torch.distributions.utils.probs_to_logits', (['scores'], {'is_binary': '(False)'}), '(scores, is_binary=False)\n', (3385, 3410), False, 'import torch\n'), ((3900, 3920), 'numpy.array', 'np.array', (['accuracies'], {}), '(accuracies)\n', (3908, 3920), True, 'import numpy as np\n'), ((3960, 3974), 'numpy.array', 'np.array', (['ious'], {}), '(ious)\n', (3968, 3974), True, 'import numpy as np\n'), ((4150, 4165), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (4157, 4165), True, 'import numpy as np\n'), ((3744, 3768), 'utils.metrics.accuracy', 'accuracy', (['scores', 'labels'], {}), '(scores, labels)\n', (3752, 3768), False, 'from utils.metrics import accuracy, intersection_over_union\n'), ((3798, 3837), 'utils.metrics.intersection_over_union', 'intersection_over_union', (['scores', 'labels'], {}), '(scores, labels)\n', (3821, 3837), False, 'from utils.metrics import accuracy, intersection_over_union\n'), ((9922, 9936), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (9934, 9936), False, 'from datetime import datetime\n'), ((5002, 5015), 'numpy.isnan', 'np.isnan', (['acc'], {}), '(acc)\n', (5010, 5015), True, 'import numpy as np\n'), ((5178, 5191), 'numpy.isnan', 'np.isnan', (['acc'], {}), '(acc)\n', (5186, 5191), True, 'import numpy as np\n'), ((5536, 5549), 'numpy.isnan', 'np.isnan', (['iou'], {}), '(iou)\n', (5544, 5549), True, 'import numpy as np\n'), ((5712, 5725), 'numpy.isnan', 'np.isnan', (['iou'], {}), '(iou)\n', (5720, 5725), True, 'import numpy as np\n')] |
import random
import cv2
import numpy as np
import imgaug.augmenters as iaa
from utils.bbox import quad_2_rbox, rbox_2_quad, points_2_thetas, thetas_2_points
class HSV(object):
def __init__(self, saturation=0, brightness=0, p=0.):
self.saturation = saturation
self.brightness = brightness
self.p = p
def __call__(self, img, labels, landmarks, mode=None):
if random.random() < self.p:
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
S = img_hsv[:, :, 1].astype(np.float32)
V = img_hsv[:, :, 2].astype(np.float32)
a = random.uniform(-1, 1) * self.saturation + 1
b = random.uniform(-1, 1) * self.brightness + 1
S *= a
V *= b
img_hsv[:, :, 1] = S if a < 1 else S.clip(None, 255)
img_hsv[:, :, 2] = V if b < 1 else V.clip(None, 255)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)
return img, labels, landmarks
class HSV_pos(object):
def __init__(self, saturation=0, brightness=0, p=0.):
self.saturation = saturation
self.brightness = brightness
self.p = p
def __call__(self, img, labels, landmarks, mode=None):
if random.random() < self.p:
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
S = img_hsv[:, :, 1].astype(np.float32)
V = img_hsv[:, :, 2].astype(np.float32)
a = random.uniform(-1, 1) * self.saturation + 1
b = random.uniform(0, 1) * self.brightness + 1
S *= a
V *= b
img_hsv[:, :, 1] = S if a < 1 else S.clip(None, 255)
img_hsv[:, :, 2] = V if b < 1 else V.clip(None, 255)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)
return img, labels, landmarks
class Blur(object):
def __init__(self, sigma=0, p=0.):
self.sigma = sigma
self.p = p
def __call__(self, img, labels, landmarks, mode=None):
if random.random() < self.p:
blur_aug = iaa.GaussianBlur(sigma=(0, self.sigma))
img = blur_aug.augment_image(img)
return img, labels, landmarks
class Grayscale(object):
def __init__(self, grayscale=0., p=0.):
self.alpha = random.uniform(grayscale, 1.0)
self.p = p
def __call__(self, img, labels, landmarks, mode=None):
if random.random() < self.p:
gray_aug = iaa.Grayscale(alpha=(self.alpha, 1.0))
img = gray_aug.augment_image(img)
return img, labels, landmarks
class Gamma(object):
def __init__(self, intensity=0, p=0.):
self.intensity = intensity
self.p = p
def __call__(self, img, labels, landmarks, mode=None):
if random.random() < self.p:
gm = random.uniform(1 - self.intensity, 1 + self.intensity)
img = np.uint8(np.power(img / float(np.max(img)), gm) * np.max(img))
return img, labels, landmarks
class Noise(object):
def __init__(self, intensity=0, p=0.):
self.intensity = intensity
self.p = p
def __call__(self, img, labels, landmarks, mode=None):
if random.random() < self.p:
noise_aug = iaa.AdditiveGaussianNoise(scale=(0, self.intensity * 255))
img = noise_aug.augment_image(img)
return img, labels, landmarks
class Sharpen(object):
def __init__(self, intensity=0, p=0.):
self.intensity = intensity
self.p = p
def __call__(self, img, labels, landmarks, mode=None):
if random.random() < self.p:
sharpen_aug = iaa.Sharpen(alpha=(0.0, 1.0), lightness=(1 - self.intensity, 1 + self.intensity))
img = sharpen_aug.augment_image(img)
return img, labels, landmarks
class Contrast(object):
def __init__(self, intensity=0, p=0.):
self.intensity = intensity
self.p = p
def __call__(self, img, labels, landmarks, mode=None):
if random.random() < self.p:
contrast_aug = iaa.contrast.LinearContrast((1 - self.intensity, 1 + self.intensity))
img = contrast_aug.augment_image(img)
return img, labels, landmarks
class HorizontalFlip(object):
def __init__(self, p=0.):
self.p = p
def __call__(self, img, labels, landmarks, mode=None):
if random.random() < self.p:
img = np.fliplr(img)
if mode == 'cxywha':
labels[:, 1] = img.shape[1] - labels[:, 1]
labels[:, 5] = -labels[:, 5]
if mode == 'xyxyxyxy':
labels[:, [0, 2, 4, 6]] = img.shape[1] - labels[:, [0, 2, 4, 6]]
if mode == 'xywha':
labels[:, 0] = img.shape[1] - labels[:, 0]
labels[:, -1] = -labels[:, -1]
landmarks[:, 0] = img.shape[1] - landmarks[:, 0]
landmarks[:, [-2, -1]] = -landmarks[:, [-2, -1]]
return img, labels, landmarks
class VerticalFlip(object):
def __init__(self, p=0.):
self.p = p
def __call__(self, img, labels, landmarks, mode=None):
if random.random() < self.p:
img = np.flipud(img)
if mode == 'cxywha':
labels[:, 2] = img.shape[0] - labels[:, 2]
labels[:, 5] = -labels[:, 5]
if mode == 'xyxyxyxy':
labels[:, [1, 3, 5, 7]] = img.shape[0] - labels[:, [1, 3, 5, 7]]
if mode == 'xywha':
labels[:, 1] = img.shape[0] - labels[:, 1]
labels[:, -1] = -labels[:, -1]
landmarks[:, 1] = img.shape[0] - landmarks[:, 1]
landmarks[:, -1] = (180.0 - landmarks[:, -1]) * (landmarks[:, -1] > 0) + (-180.0 - landmarks[:, -1]) * (
landmarks[:, -1] < 0)
landmarks[:, -2] = (180.0 - landmarks[:, -2]) * (landmarks[:, -2] > 0) + (-180.0 - landmarks[:, -2]) * (
landmarks[:, -2] < 0)
return img, labels, landmarks
class Affine(object):
def __init__(self, degree=0., translate=0., scale=0., shear=0., p=0.):
self.degree = degree
self.translate = translate
self.scale = scale
self.shear = shear
self.p = p
def __call__(self, img, labels, landmarks, mode=None):
if random.random() < self.p:
if mode == 'xywha':
labels = rbox_2_quad(labels, mode='xywha')
landmarks = thetas_2_points(landmarks)
img, labels, landmarks = random_affine(img, labels, landmarks,
degree=self.degree, translate=self.translate,
scale=self.scale, shear=self.shear)
labels = quad_2_rbox(labels, mode='xywha')
landmarks = points_2_thetas(landmarks)
else:
landmarks = thetas_2_points(landmarks)
img, labels, landmarks = random_affine(img, labels, landmarks,
degree=self.degree, translate=self.translate,
scale=self.scale, shear=self.shear)
landmarks = points_2_thetas(landmarks)
return img, labels, landmarks
class Augment(object):
def __init__(self, augmentations, probs=1, box_mode=None):
self.augmentations = augmentations
self.probs = probs
self.mode = box_mode
def __call__(self, img, labels, landmarks):
for i, augmentation in enumerate(self.augmentations):
if type(self.probs) == list:
prob = self.probs[i]
else:
prob = self.probs
if random.random() < prob:
img, labels, landmarks = augmentation(img, labels, landmarks, self.mode)
return img, labels, landmarks
def random_affine(img, targets1=None, targets2=None, degree=10, translate=.1, scale=.1, shear=10):
if targets1 is None:
targets1 = []
if targets2 is None:
targets2 = []
border = 0
height = img.shape[0] + border * 2
width = img.shape[1] + border * 2
R = np.eye(3)
a = random.uniform(-degree, degree)
s = random.uniform(1 - scale, 1 + scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
T = np.eye(3)
T[0, 2] = random.uniform(-translate, translate) * img.shape[0] + border
T[1, 2] = random.uniform(-translate, translate) * img.shape[1] + border
M = T @ R
imw = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_AREA,
borderValue=(128, 128, 128))
targets1[:, [0, 2, 4, 6]] = targets1[:, [0, 2, 4, 6]] * M[0, 0] + targets1[:, [1, 3, 5, 7]] * M[0, 1] + M[0, 2]
targets1[:, [1, 3, 5, 7]] = targets1[:, [0, 2, 4, 6]] * M[1, 0] + targets1[:, [1, 3, 5, 7]] * M[1, 1] + M[1, 2]
targets2[:, [0, 2, 4]] = targets2[:, [0, 2, 4]] * M[0, 0] + targets2[:, [1, 3, 5]] * M[0, 1] + M[0, 2]
targets2[:, [1, 3, 5]] = targets2[:, [0, 2, 4]] * M[1, 0] + targets2[:, [1, 3, 5]] * M[1, 1] + M[1, 2]
for x in range(0, 8, 2):
targets1[:, x] = targets1[:, x].clip(0, width)
for y in range(1, 8, 2):
targets1[:, y] = targets1[:, y].clip(0, height)
return imw, targets1, targets2
| [
"imgaug.augmenters.Grayscale",
"random.uniform",
"numpy.eye",
"cv2.warpAffine",
"imgaug.augmenters.AdditiveGaussianNoise",
"imgaug.augmenters.contrast.LinearContrast",
"numpy.flipud",
"imgaug.augmenters.GaussianBlur",
"numpy.fliplr",
"utils.bbox.quad_2_rbox",
"numpy.max",
"utils.bbox.points_2_... | [((8121, 8130), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (8127, 8130), True, 'import numpy as np\n'), ((8139, 8170), 'random.uniform', 'random.uniform', (['(-degree)', 'degree'], {}), '(-degree, degree)\n', (8153, 8170), False, 'import random\n'), ((8179, 8215), 'random.uniform', 'random.uniform', (['(1 - scale)', '(1 + scale)'], {}), '(1 - scale, 1 + scale)\n', (8193, 8215), False, 'import random\n'), ((8228, 8319), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', ([], {'angle': 'a', 'center': '(img.shape[1] / 2, img.shape[0] / 2)', 'scale': 's'}), '(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2\n ), scale=s)\n', (8251, 8319), False, 'import cv2\n'), ((8323, 8332), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (8329, 8332), True, 'import numpy as np\n'), ((8509, 8613), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'M[:2]'], {'dsize': '(width, height)', 'flags': 'cv2.INTER_AREA', 'borderValue': '(128, 128, 128)'}), '(img, M[:2], dsize=(width, height), flags=cv2.INTER_AREA,\n borderValue=(128, 128, 128))\n', (8523, 8613), False, 'import cv2\n'), ((2244, 2274), 'random.uniform', 'random.uniform', (['grayscale', '(1.0)'], {}), '(grayscale, 1.0)\n', (2258, 2274), False, 'import random\n'), ((401, 416), 'random.random', 'random.random', ([], {}), '()\n', (414, 416), False, 'import random\n'), ((449, 485), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (461, 485), False, 'import cv2\n'), ((890, 939), 'cv2.cvtColor', 'cv2.cvtColor', (['img_hsv', 'cv2.COLOR_HSV2BGR'], {'dst': 'img'}), '(img_hsv, cv2.COLOR_HSV2BGR, dst=img)\n', (902, 939), False, 'import cv2\n'), ((1225, 1240), 'random.random', 'random.random', ([], {}), '()\n', (1238, 1240), False, 'import random\n'), ((1273, 1309), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (1285, 1309), False, 'import cv2\n'), ((1713, 1762), 'cv2.cvtColor', 'cv2.cvtColor', (['img_hsv', 'cv2.COLOR_HSV2BGR'], {'dst': 'img'}), '(img_hsv, cv2.COLOR_HSV2BGR, dst=img)\n', (1725, 1762), False, 'import cv2\n'), ((1979, 1994), 'random.random', 'random.random', ([], {}), '()\n', (1992, 1994), False, 'import random\n'), ((2028, 2067), 'imgaug.augmenters.GaussianBlur', 'iaa.GaussianBlur', ([], {'sigma': '(0, self.sigma)'}), '(sigma=(0, self.sigma))\n', (2044, 2067), True, 'import imgaug.augmenters as iaa\n'), ((2365, 2380), 'random.random', 'random.random', ([], {}), '()\n', (2378, 2380), False, 'import random\n'), ((2414, 2452), 'imgaug.augmenters.Grayscale', 'iaa.Grayscale', ([], {'alpha': '(self.alpha, 1.0)'}), '(alpha=(self.alpha, 1.0))\n', (2427, 2452), True, 'import imgaug.augmenters as iaa\n'), ((2728, 2743), 'random.random', 'random.random', ([], {}), '()\n', (2741, 2743), False, 'import random\n'), ((2771, 2825), 'random.uniform', 'random.uniform', (['(1 - self.intensity)', '(1 + self.intensity)'], {}), '(1 - self.intensity, 1 + self.intensity)\n', (2785, 2825), False, 'import random\n'), ((3136, 3151), 'random.random', 'random.random', ([], {}), '()\n', (3149, 3151), False, 'import random\n'), ((3186, 3244), 'imgaug.augmenters.AdditiveGaussianNoise', 'iaa.AdditiveGaussianNoise', ([], {'scale': '(0, self.intensity * 255)'}), '(scale=(0, self.intensity * 255))\n', (3211, 3244), True, 'import imgaug.augmenters as iaa\n'), ((3523, 3538), 'random.random', 'random.random', ([], {}), '()\n', (3536, 3538), False, 'import random\n'), ((3575, 3661), 'imgaug.augmenters.Sharpen', 'iaa.Sharpen', ([], {'alpha': '(0.0, 1.0)', 'lightness': '(1 - self.intensity, 1 + self.intensity)'}), '(alpha=(0.0, 1.0), lightness=(1 - self.intensity, 1 + self.\n intensity))\n', (3586, 3661), True, 'import imgaug.augmenters as iaa\n'), ((3938, 3953), 'random.random', 'random.random', ([], {}), '()\n', (3951, 3953), False, 'import random\n'), ((3991, 4060), 'imgaug.augmenters.contrast.LinearContrast', 'iaa.contrast.LinearContrast', (['(1 - self.intensity, 1 + self.intensity)'], {}), '((1 - self.intensity, 1 + self.intensity))\n', (4018, 4060), True, 'import imgaug.augmenters as iaa\n'), ((4301, 4316), 'random.random', 'random.random', ([], {}), '()\n', (4314, 4316), False, 'import random\n'), ((4345, 4359), 'numpy.fliplr', 'np.fliplr', (['img'], {}), '(img)\n', (4354, 4359), True, 'import numpy as np\n'), ((5061, 5076), 'random.random', 'random.random', ([], {}), '()\n', (5074, 5076), False, 'import random\n'), ((5105, 5119), 'numpy.flipud', 'np.flipud', (['img'], {}), '(img)\n', (5114, 5119), True, 'import numpy as np\n'), ((6235, 6250), 'random.random', 'random.random', ([], {}), '()\n', (6248, 6250), False, 'import random\n'), ((8347, 8384), 'random.uniform', 'random.uniform', (['(-translate)', 'translate'], {}), '(-translate, translate)\n', (8361, 8384), False, 'import random\n'), ((8423, 8460), 'random.uniform', 'random.uniform', (['(-translate)', 'translate'], {}), '(-translate, translate)\n', (8437, 8460), False, 'import random\n'), ((6318, 6351), 'utils.bbox.rbox_2_quad', 'rbox_2_quad', (['labels'], {'mode': '"""xywha"""'}), "(labels, mode='xywha')\n", (6329, 6351), False, 'from utils.bbox import quad_2_rbox, rbox_2_quad, points_2_thetas, thetas_2_points\n'), ((6380, 6406), 'utils.bbox.thetas_2_points', 'thetas_2_points', (['landmarks'], {}), '(landmarks)\n', (6395, 6406), False, 'from utils.bbox import quad_2_rbox, rbox_2_quad, points_2_thetas, thetas_2_points\n'), ((6703, 6736), 'utils.bbox.quad_2_rbox', 'quad_2_rbox', (['labels'], {'mode': '"""xywha"""'}), "(labels, mode='xywha')\n", (6714, 6736), False, 'from utils.bbox import quad_2_rbox, rbox_2_quad, points_2_thetas, thetas_2_points\n'), ((6765, 6791), 'utils.bbox.points_2_thetas', 'points_2_thetas', (['landmarks'], {}), '(landmarks)\n', (6780, 6791), False, 'from utils.bbox import quad_2_rbox, rbox_2_quad, points_2_thetas, thetas_2_points\n'), ((6839, 6865), 'utils.bbox.thetas_2_points', 'thetas_2_points', (['landmarks'], {}), '(landmarks)\n', (6854, 6865), False, 'from utils.bbox import quad_2_rbox, rbox_2_quad, points_2_thetas, thetas_2_points\n'), ((7165, 7191), 'utils.bbox.points_2_thetas', 'points_2_thetas', (['landmarks'], {}), '(landmarks)\n', (7180, 7191), False, 'from utils.bbox import quad_2_rbox, rbox_2_quad, points_2_thetas, thetas_2_points\n'), ((7674, 7689), 'random.random', 'random.random', ([], {}), '()\n', (7687, 7689), False, 'import random\n'), ((606, 627), 'random.uniform', 'random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (620, 627), False, 'import random\n'), ((666, 687), 'random.uniform', 'random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (680, 687), False, 'import random\n'), ((1430, 1451), 'random.uniform', 'random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (1444, 1451), False, 'import random\n'), ((1490, 1510), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1504, 1510), False, 'import random\n'), ((2894, 2905), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (2900, 2905), True, 'import numpy as np\n'), ((2874, 2885), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (2880, 2885), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from models import slip
import viability as vibly
p = {'mass': 80.0, 'stiffness': 8200.0, 'spring_resting_length': 1.0,
'gravity': 9.81, 'angle_of_attack': 1/5*np.pi,
'actuator_resting_length': 0}
x0 = np.array([0, 0.85, 5.5, 0, 0, 0, 0])
x0 = slip.reset_leg(x0, p)
p['x0'] = x0
p['total_energy'] = slip.compute_total_energy(x0, p)
p_map = slip.p_map
p_map.p = p
p_map.x = x0
p_map.sa2xp = slip.sa2xp
p_map.xp2s = slip.xp2s
s_grid = np.linspace(0.1, 1, 91)
s_grid = (s_grid[:-1],)
a_grid = (np.linspace(-10/180*np.pi, 70/180*np.pi, 81),)
grids = {'states': s_grid, 'actions': a_grid}
Q_map, Q_F = vibly.compute_Q_map(grids, p_map)
# Q_map, Q_F = vibly.parcompute_Q_map(grids, p_map)
Q_V, S_V = vibly.compute_QV(Q_map, grids)
S_M = vibly.project_Q2S(Q_V, grids, proj_opt=np.mean)
Q_M = vibly.map_S2Q(Q_map, S_M, s_grid, Q_V=Q_V)
################################################################################
# save data as pickle
################################################################################
import pickle
filename = '../data/dynamics/slip_map' + '.pickle'
data2save = {"grids": grids, "Q_map": Q_map, "Q_F": Q_F, "Q_V": Q_V,
"Q_M": Q_M, "S_M": S_M, "p": p, "x0": x0}
outfile = open(filename, 'wb')
pickle.dump(data2save, outfile)
outfile.close()
# to load this data, do:
# infile = open(filename, 'rb')
# data = pickle.load(infile)
# infile.close()
################################################################################
# basic visualization
################################################################################
plt.imshow(Q_map, origin='lower')
plt.show()
| [
"matplotlib.pyplot.imshow",
"pickle.dump",
"models.slip.reset_leg",
"viability.project_Q2S",
"viability.map_S2Q",
"viability.compute_QV",
"numpy.array",
"numpy.linspace",
"viability.compute_Q_map",
"models.slip.compute_total_energy",
"matplotlib.pyplot.show"
] | [((264, 300), 'numpy.array', 'np.array', (['[0, 0.85, 5.5, 0, 0, 0, 0]'], {}), '([0, 0.85, 5.5, 0, 0, 0, 0])\n', (272, 300), True, 'import numpy as np\n'), ((306, 327), 'models.slip.reset_leg', 'slip.reset_leg', (['x0', 'p'], {}), '(x0, p)\n', (320, 327), False, 'from models import slip\n'), ((361, 393), 'models.slip.compute_total_energy', 'slip.compute_total_energy', (['x0', 'p'], {}), '(x0, p)\n', (386, 393), False, 'from models import slip\n'), ((496, 519), 'numpy.linspace', 'np.linspace', (['(0.1)', '(1)', '(91)'], {}), '(0.1, 1, 91)\n', (507, 519), True, 'import numpy as np\n'), ((660, 693), 'viability.compute_Q_map', 'vibly.compute_Q_map', (['grids', 'p_map'], {}), '(grids, p_map)\n', (679, 693), True, 'import viability as vibly\n'), ((757, 787), 'viability.compute_QV', 'vibly.compute_QV', (['Q_map', 'grids'], {}), '(Q_map, grids)\n', (773, 787), True, 'import viability as vibly\n'), ((794, 841), 'viability.project_Q2S', 'vibly.project_Q2S', (['Q_V', 'grids'], {'proj_opt': 'np.mean'}), '(Q_V, grids, proj_opt=np.mean)\n', (811, 841), True, 'import viability as vibly\n'), ((848, 890), 'viability.map_S2Q', 'vibly.map_S2Q', (['Q_map', 'S_M', 's_grid'], {'Q_V': 'Q_V'}), '(Q_map, S_M, s_grid, Q_V=Q_V)\n', (861, 890), True, 'import viability as vibly\n'), ((1297, 1328), 'pickle.dump', 'pickle.dump', (['data2save', 'outfile'], {}), '(data2save, outfile)\n', (1308, 1328), False, 'import pickle\n'), ((1634, 1667), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Q_map'], {'origin': '"""lower"""'}), "(Q_map, origin='lower')\n", (1644, 1667), True, 'import matplotlib.pyplot as plt\n'), ((1668, 1678), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1676, 1678), True, 'import matplotlib.pyplot as plt\n'), ((554, 606), 'numpy.linspace', 'np.linspace', (['(-10 / 180 * np.pi)', '(70 / 180 * np.pi)', '(81)'], {}), '(-10 / 180 * np.pi, 70 / 180 * np.pi, 81)\n', (565, 606), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 6 16:55:49 2020
@author: <NAME>
"""
# Package Area
import numpy as np
import matplotlib.pyplot as plt
from testCases import *
import sklearn
import sklearn.datasets
import sklearn.linear_model
from planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets
np.random.seed(1)
# Here comes the Function
X,Y = load_planar_dataset() # X is point and Y is label
plt.scatter(X[0,:],X[1,:],c=Y,s=40,cmap=plt.cm.Spectral)
shape_X = X.shape # The raw data
shape_Y = Y.shape # The label of data
m= Y.shape[1] # Total Data number, in here is 400
print("The dimension for X is "+str(shape_X))
print("THe dimension for Y is "+str(shape_Y))
print("The dataset has total data "+ str(m))
clf = sklearn.linear_model.LogisticRegressionCV()
clf.fit(X.T,Y.T)
plot_decision_boundary(lambda x: clf.predict(x),X,Y)
plt.title("Logistic Regression")
LR_predictions = clf.predict(X.T)
print("The accuracy for logistic regression is %d" %float((np.dot(Y,LR_predictions)+\
np.dot(1-Y,1-LR_predictions))/float(Y.size)*100)+"% right point" )
# print: All str is in the "". And the variables are in form of % float sth like that
def layer_sizes(X,Y):
n_x = X.shape[0] # Input layer
n_h = 4 # Hidden layer
n_y = Y.shape[0] # Output layer
return(n_x,n_h,n_y)
def initialize_parameters(n_x,n_h,n_y):
np.random.seed(2)
w1 = np.random.randn(n_h,n_x)*0.01
b1 = np.zeros(shape=(n_h,1))
w2 = np.random.randn(n_y,n_h)*0.01
b2 = np.zeros(shape=(n_y,1))
assert(w1.shape == (n_h,n_x))
assert(b1.shape == (n_h,1))
assert(w2.shape == (n_y,n_h))
assert(b2.shape == (n_y,1))
parameters = {"w1" : w1,
"b1" : b1,
"w2" : w2,
"b2" : b2
}
return parameters
def forward_propagation(X, parameters):
w1 = parameters["w1"]
b1 = parameters["b1"]
w2 = parameters["w2"]
b2 = parameters["b2"]
Z1 = np.dot(w1,X) + b1
A1 = np.tanh(Z1)
Z2 = np.dot(w2,A1) + b2
A2 = sigmoid(Z2)
assert(A2.shape == (1,X.shape[1]))
cache = {"Z1" : Z1,
"A1" : A1,
"Z2" : Z2,
"A2" : A2
}
return(A2,cache)
def compute_cost(A2,Y,parameters):
m = Y.shape[1]
w1 = parameters["w1"]
w2 = parameters["w2"]
logprobs = np.multiply(np.log(A2),Y) + np.multiply((1-Y),np.log(1-A2))
cost = - np.sum(logprobs) / m
cost = float(np.squeeze(cost))
assert(isinstance(cost,float))
return cost
def backward_propagation(parameters,cache,X,Y):
m = X.shape[1]
w1 = parameters["w1"]
w2 = parameters["w2"]
A1 = cache["A1"]
A2 = cache["A2"]
dZ2 = A2 - Y
dw2 = (1/m) * np.dot(dZ2, A1.T)
db2 = (1/m) * np.sum(dZ2,axis=1,keepdims=True)
dZ1 = np.multiply(np.dot(w2.T,dZ2),1-np.power(A1,2))
dw1 = (1/m)*np.dot(dZ1,X.T)
db1 = (1/m)*np.sum(dZ1,axis=1,keepdims=True)
grads = {"dw1":dw1,
"db1":db1,
"dw2":dw2,
"db2":db2
}
return grads
def update_parameters(parameters,grads,learning_rate=1.2):
w1,w2 = parameters["w1"],parameters["w2"]
b1,b2 = parameters["b1"],parameters["b2"]
dw1,dw2 = grads["dw1"],grads["dw2"]
db1,db2 = grads["db1"],grads["db2"]
w1 = w1 - learning_rate * dw1
b1 = b1 - learning_rate * db1
w2 = w2 - learning_rate * dw2
b2 = b2 - learning_rate * db2
parameters = { "w1":w1,
"b1":b1,
"w2":w2,
"b2":b2
}
return parameters
def nn_model(X,Y,n_h,num_iterations,print_cost=False):
np.random.seed(3)
n_x = layer_sizes(X,Y)[0]
n_y = layer_sizes(X,Y)[2]
parameters = initialize_parameters(n_x,n_h,n_y)
w1 = parameters["w1"]
b1 = parameters["b1"]
w2 = parameters["w2"]
b2 = parameters["b2"]
for i in range(num_iterations):
A2, cache = forward_propagation(X,parameters)
cost = compute_cost(A2,Y,parameters)
grads = backward_propagation(parameters,cache,X,Y)
parameters = update_parameters(parameters,grads,learning_rate = 0.5)
if print_cost:
if i%1000 ==0:
print("The",i,"iteration cost is "+str(cost))
return parameters
def predict(parameters,X):
A2, cache = forward_propagation(X,parameters)
predictions = np.round(A2)
return predictions
#--------------------MAIN FUNCTION--------------------------
parameters = nn_model(X,Y,n_h = 4, num_iterations=10000, print_cost=True)
plot_decision_boundary(lambda x: predict(parameters,x.T),X,Y)
plt.title("Decision Boundary for hidden layer size" +str(4))
predictions = predict(parameters,X)
print('The accuracy is %d' %float((np.dot(Y,predictions.T)+np.dot(1-Y,1-predictions.T))/float(Y.size)*100)+'%')
#-------------------END OF MAIN FUNCTION------------------------
plt.figure(figsize=(16,32))
hidden_layer_sizes = [1,2,3,4,5,20,50] # The number of hidden layer
for i,n_h in enumerate(hidden_layer_sizes):
plt.subplot(5,2,i+1)
plt.title('Hidden Layer of size %d' %n_h)
parameters = nn_model(X,Y,n_h,num_iterations=5000)
plot_decision_boundary(lambda x:predict(parameters,x.T),X,Y)
predictions = predict(parameters,X)
accuracy = float((np.dot(Y,predictions.T)+np.dot(1-Y,1-predictions.T))\
/float(Y.size)*100)
print("THE NUMBER OF HIDDEN LAYER:{} Accuracy:{} %".format(n_h,accuracy))
| [
"planar_utils.load_planar_dataset",
"numpy.power",
"numpy.log",
"numpy.tanh",
"numpy.squeeze",
"numpy.sum",
"sklearn.linear_model.LogisticRegressionCV",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"planar_utils.sigmoid",
"numpy.random.randn",
... | [((366, 383), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (380, 383), True, 'import numpy as np\n'), ((417, 438), 'planar_utils.load_planar_dataset', 'load_planar_dataset', ([], {}), '()\n', (436, 438), False, 'from planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets\n'), ((469, 531), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[0, :]', 'X[1, :]'], {'c': 'Y', 's': '(40)', 'cmap': 'plt.cm.Spectral'}), '(X[0, :], X[1, :], c=Y, s=40, cmap=plt.cm.Spectral)\n', (480, 531), True, 'import matplotlib.pyplot as plt\n'), ((806, 849), 'sklearn.linear_model.LogisticRegressionCV', 'sklearn.linear_model.LogisticRegressionCV', ([], {}), '()\n', (847, 849), False, 'import sklearn\n'), ((921, 953), 'matplotlib.pyplot.title', 'plt.title', (['"""Logistic Regression"""'], {}), "('Logistic Regression')\n", (930, 953), True, 'import matplotlib.pyplot as plt\n'), ((5039, 5067), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 32)'}), '(figsize=(16, 32))\n', (5049, 5067), True, 'import matplotlib.pyplot as plt\n'), ((1444, 1461), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (1458, 1461), True, 'import numpy as np\n'), ((1510, 1534), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_h, 1)'}), '(shape=(n_h, 1))\n', (1518, 1534), True, 'import numpy as np\n'), ((1582, 1606), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_y, 1)'}), '(shape=(n_y, 1))\n', (1590, 1606), True, 'import numpy as np\n'), ((2082, 2093), 'numpy.tanh', 'np.tanh', (['Z1'], {}), '(Z1)\n', (2089, 2093), True, 'import numpy as np\n'), ((2131, 2142), 'planar_utils.sigmoid', 'sigmoid', (['Z2'], {}), '(Z2)\n', (2138, 2142), False, 'from planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets\n'), ((3766, 3783), 'numpy.random.seed', 'np.random.seed', (['(3)'], {}), '(3)\n', (3780, 3783), True, 'import numpy as np\n'), ((4520, 4532), 'numpy.round', 'np.round', (['A2'], {}), '(A2)\n', (4528, 4532), True, 'import numpy as np\n'), ((5183, 5207), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(2)', '(i + 1)'], {}), '(5, 2, i + 1)\n', (5194, 5207), True, 'import matplotlib.pyplot as plt\n'), ((5208, 5250), 'matplotlib.pyplot.title', 'plt.title', (["('Hidden Layer of size %d' % n_h)"], {}), "('Hidden Layer of size %d' % n_h)\n", (5217, 5250), True, 'import matplotlib.pyplot as plt\n'), ((1471, 1496), 'numpy.random.randn', 'np.random.randn', (['n_h', 'n_x'], {}), '(n_h, n_x)\n', (1486, 1496), True, 'import numpy as np\n'), ((1543, 1568), 'numpy.random.randn', 'np.random.randn', (['n_y', 'n_h'], {}), '(n_y, n_h)\n', (1558, 1568), True, 'import numpy as np\n'), ((2054, 2067), 'numpy.dot', 'np.dot', (['w1', 'X'], {}), '(w1, X)\n', (2060, 2067), True, 'import numpy as np\n'), ((2103, 2117), 'numpy.dot', 'np.dot', (['w2', 'A1'], {}), '(w2, A1)\n', (2109, 2117), True, 'import numpy as np\n'), ((2556, 2572), 'numpy.squeeze', 'np.squeeze', (['cost'], {}), '(cost)\n', (2566, 2572), True, 'import numpy as np\n'), ((2850, 2867), 'numpy.dot', 'np.dot', (['dZ2', 'A1.T'], {}), '(dZ2, A1.T)\n', (2856, 2867), True, 'import numpy as np\n'), ((2886, 2920), 'numpy.sum', 'np.sum', (['dZ2'], {'axis': '(1)', 'keepdims': '(True)'}), '(dZ2, axis=1, keepdims=True)\n', (2892, 2920), True, 'import numpy as np\n'), ((2941, 2958), 'numpy.dot', 'np.dot', (['w2.T', 'dZ2'], {}), '(w2.T, dZ2)\n', (2947, 2958), True, 'import numpy as np\n'), ((2993, 3009), 'numpy.dot', 'np.dot', (['dZ1', 'X.T'], {}), '(dZ1, X.T)\n', (2999, 3009), True, 'import numpy as np\n'), ((3025, 3059), 'numpy.sum', 'np.sum', (['dZ1'], {'axis': '(1)', 'keepdims': '(True)'}), '(dZ1, axis=1, keepdims=True)\n', (3031, 3059), True, 'import numpy as np\n'), ((2457, 2467), 'numpy.log', 'np.log', (['A2'], {}), '(A2)\n', (2463, 2467), True, 'import numpy as np\n'), ((2491, 2505), 'numpy.log', 'np.log', (['(1 - A2)'], {}), '(1 - A2)\n', (2497, 2505), True, 'import numpy as np\n'), ((2518, 2534), 'numpy.sum', 'np.sum', (['logprobs'], {}), '(logprobs)\n', (2524, 2534), True, 'import numpy as np\n'), ((2960, 2975), 'numpy.power', 'np.power', (['A1', '(2)'], {}), '(A1, 2)\n', (2968, 2975), True, 'import numpy as np\n'), ((5432, 5456), 'numpy.dot', 'np.dot', (['Y', 'predictions.T'], {}), '(Y, predictions.T)\n', (5438, 5456), True, 'import numpy as np\n'), ((5456, 5488), 'numpy.dot', 'np.dot', (['(1 - Y)', '(1 - predictions.T)'], {}), '(1 - Y, 1 - predictions.T)\n', (5462, 5488), True, 'import numpy as np\n'), ((1048, 1073), 'numpy.dot', 'np.dot', (['Y', 'LR_predictions'], {}), '(Y, LR_predictions)\n', (1054, 1073), True, 'import numpy as np\n'), ((1082, 1115), 'numpy.dot', 'np.dot', (['(1 - Y)', '(1 - LR_predictions)'], {}), '(1 - Y, 1 - LR_predictions)\n', (1088, 1115), True, 'import numpy as np\n'), ((4894, 4918), 'numpy.dot', 'np.dot', (['Y', 'predictions.T'], {}), '(Y, predictions.T)\n', (4900, 4918), True, 'import numpy as np\n'), ((4918, 4950), 'numpy.dot', 'np.dot', (['(1 - Y)', '(1 - predictions.T)'], {}), '(1 - Y, 1 - predictions.T)\n', (4924, 4950), True, 'import numpy as np\n')] |
import numpy as np
from collections import Counter
from hela.math.preprocessing import clean_document, clean_corpus
from hela.math.levenshtein import replace
from hela.math.score_info import SimilarityInfo
from typing import Sequence, Tuple
def build_matrix(documents: Sequence[str]) -> Tuple[np.ndarray, Sequence[str]]:
"""Build the tf_idf matrix, returned along with the vocabulary for the matrix.
Args:
documents: A list of string documents
Returns:
A tuple (X, vocab) as the tf_idf matrix and vocabulary as list
"""
word_count = [Counter(x.split()) for x in documents]
vocab = list(set([w for words in word_count for w in words.keys()]))
arrs = [[wc.get(v, 0) for v in vocab] for wc in word_count]
X = np.array(arrs)
idf = np.log(X.shape[1] / np.where(X > 0, 1, 0).sum(axis=0))
return X * idf, vocab
def normalize(X: np.ndarray) -> np.ndarray:
"""Normalizes each vector in the matrix, i.e. sets vector length = 1.
Args:
X: A two-dimensional numpy array
Returns:
A normalized numpy matrix
"""
return X / np.linalg.norm(X, axis=1).reshape(-1, 1)
def cosine_similarity(X: np.ndarray, Y: np.ndarray) -> np.ndarray:
"""Calculates the cosine similarity based on two numpy matrices.
Args:
X: A one or two -dimensional numpy array
Y: A two-dimensional numpy array
Returns:
The cosine similarity of each pair between the two matrices.
Will have length equal to X.shape[0] * Y.shape[0]
"""
if X.ndim == 1:
X = X.reshape(1, -1)
# Set the length of each vector to 1
X, Y = normalize(X), normalize(Y)
return (X @ Y.T).flatten()
def find_similar_occurrences(corpus: Sequence[str], min_similarity: float = .75) -> Sequence[SimilarityInfo]:
"""Finds similar occurences of documents in a corpus.
Args:
corpus: A list of string documents
min_similarity: The minimum required similarity [0 to 1]
Returns:
A list of similarity info objects with similarity score >= threshold
"""
documents, tokenized_documents = clean_corpus(corpus)
X, _ = build_matrix(tokenized_documents)
cs = cosine_similarity(X, X).reshape(X.shape[0], -1)
similarities, match_indices = [], []
for i, row in enumerate(cs):
for idx in np.argwhere(row >= min_similarity).flatten():
# If i == idx that means it is its own match
# if (idx, i) is in match_indices that means we found the reverse match already
if i != idx and (idx, i) not in match_indices:
match_indices.append((i, idx))
similarities.append(SimilarityInfo(
score=round(row[idx], 3),
match_idx=idx,
match_string=documents[idx],
target_idx=i,
target_string=documents[i]
))
return sorted(similarities, key=lambda x: -x.score)
def sort(query_str: str, corpus: Sequence[str], fuzzy: bool = True) -> Sequence[SimilarityInfo]:
"""Sort matches based on cosine similarity between query string and corpus.
Args:
query_str: A string of one or more search terms
corpus: A list of documents as strings
fuzzy: Whether the search terms (query_str) should fuzzily adapt to vocabulary
Returns:
A similarity info object with the best possible matching document
"""
query_dict = Counter(clean_document(query_str).split())
documents, tokenized_documents = clean_corpus(corpus)
X, vocab = build_matrix(tokenized_documents)
if fuzzy:
query_dict = {
doc if doc in vocab else replace(doc, vocab): occurrences
for doc, occurrences in query_dict.items()
}
query_arr = np.array([query_dict.get(x, 0) for x in vocab])
if sum(query_arr) == 0:
raise ValueError(f'No matching strings found between vocabulary and query string: "{query_str}"')
sims = cosine_similarity(query_arr, X)
return [
SimilarityInfo(
score=sims[idx],
match_idx=idx,
match_string=documents[idx]
)
# Argsorting on -sims to get highest similarity first
for idx in np.argsort(-sims)
]
| [
"hela.math.preprocessing.clean_document",
"numpy.where",
"hela.math.preprocessing.clean_corpus",
"numpy.argsort",
"numpy.array",
"hela.math.score_info.SimilarityInfo",
"numpy.argwhere",
"hela.math.levenshtein.replace",
"numpy.linalg.norm"
] | [((758, 772), 'numpy.array', 'np.array', (['arrs'], {}), '(arrs)\n', (766, 772), True, 'import numpy as np\n'), ((2128, 2148), 'hela.math.preprocessing.clean_corpus', 'clean_corpus', (['corpus'], {}), '(corpus)\n', (2140, 2148), False, 'from hela.math.preprocessing import clean_document, clean_corpus\n'), ((3566, 3586), 'hela.math.preprocessing.clean_corpus', 'clean_corpus', (['corpus'], {}), '(corpus)\n', (3578, 3586), False, 'from hela.math.preprocessing import clean_document, clean_corpus\n'), ((4071, 4146), 'hela.math.score_info.SimilarityInfo', 'SimilarityInfo', ([], {'score': 'sims[idx]', 'match_idx': 'idx', 'match_string': 'documents[idx]'}), '(score=sims[idx], match_idx=idx, match_string=documents[idx])\n', (4085, 4146), False, 'from hela.math.score_info import SimilarityInfo\n'), ((4274, 4291), 'numpy.argsort', 'np.argsort', (['(-sims)'], {}), '(-sims)\n', (4284, 4291), True, 'import numpy as np\n'), ((1107, 1132), 'numpy.linalg.norm', 'np.linalg.norm', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (1121, 1132), True, 'import numpy as np\n'), ((2344, 2378), 'numpy.argwhere', 'np.argwhere', (['(row >= min_similarity)'], {}), '(row >= min_similarity)\n', (2355, 2378), True, 'import numpy as np\n'), ((3494, 3519), 'hela.math.preprocessing.clean_document', 'clean_document', (['query_str'], {}), '(query_str)\n', (3508, 3519), False, 'from hela.math.preprocessing import clean_document, clean_corpus\n'), ((3710, 3729), 'hela.math.levenshtein.replace', 'replace', (['doc', 'vocab'], {}), '(doc, vocab)\n', (3717, 3729), False, 'from hela.math.levenshtein import replace\n'), ((803, 824), 'numpy.where', 'np.where', (['(X > 0)', '(1)', '(0)'], {}), '(X > 0, 1, 0)\n', (811, 824), True, 'import numpy as np\n')] |
'''
Pybindings for I3SimConstants.ShowerParameters, e.g.:
from icecube.sim_services import I3SimConstants
ShowerParameters = I3SimConstants.ShowerParameters
Are not available in older icecube meta projects.
This is a python implementation and copy of:
https://code.icecube.wisc.edu/projects/icecube/browser/IceCube/projects/
sim-services/trunk/private/sim-services/I3SimConstants.cxx
ToDo:
Once pybindings are available in icecube metaprojects, deprecate this
pyhton copy to ensure that code is not being duplicated in different
areas.
'''
import numpy as np
from icecube.icetray import I3Units
from icecube import dataclasses
from icecube.icetray.i3logging import log_warn
class ShowerParameters(object):
"""ShowerParameters copy of sim_services.I3SimConstants.ShowerParameters
Attributes
----------
arg : TYPE
Description
"""
def __init__(self, particle_type, energy,
density=0.9216*(I3Units.g/I3Units.cm3)):
# initalize variables
self.a = 0.
self.b = 0.
self.emScale = 1.
self.emScaleSigma = 0.
# protect against extremely low energies
# NB: while Equation 4.11 of Leif's Masters' thesis is written in terms
# of log10, we use the natural log here and divide the energy-scaling
# coefficients (beta) below by ln(10) to compensate
self.logE = max(0., np.log(energy))
self.Lrad = 0.358*(I3Units.g/I3Units.cm3)/density
self.isElectron = particle_type in [
dataclasses.I3Particle.ParticleType.EMinus,
dataclasses.I3Particle.ParticleType.EPlus,
dataclasses.I3Particle.ParticleType.Brems,
dataclasses.I3Particle.ParticleType.DeltaE,
dataclasses.I3Particle.ParticleType.PairProd,
dataclasses.I3Particle.ParticleType.Gamma,
# Pi0 decays to 2 gammas and produce EM showers
dataclasses.I3Particle.ParticleType.Pi0,
dataclasses.I3Particle.ParticleType.EMinus,
dataclasses.I3Particle.ParticleType.EMinus,
]
self.isHadron = particle_type in [
dataclasses.I3Particle.ParticleType.Hadrons,
dataclasses.I3Particle.ParticleType.Neutron,
dataclasses.I3Particle.ParticleType.PiPlus,
dataclasses.I3Particle.ParticleType.PiMinus,
dataclasses.I3Particle.ParticleType.K0_Long,
dataclasses.I3Particle.ParticleType.KPlus,
dataclasses.I3Particle.ParticleType.KMinus,
dataclasses.I3Particle.ParticleType.PPlus,
dataclasses.I3Particle.ParticleType.PMinus,
dataclasses.I3Particle.ParticleType.K0_Short,
dataclasses.I3Particle.ParticleType.Eta,
dataclasses.I3Particle.ParticleType.Lambda,
dataclasses.I3Particle.ParticleType.SigmaPlus,
dataclasses.I3Particle.ParticleType.Sigma0,
dataclasses.I3Particle.ParticleType.SigmaMinus,
dataclasses.I3Particle.ParticleType.Xi0,
dataclasses.I3Particle.ParticleType.XiMinus,
dataclasses.I3Particle.ParticleType.OmegaMinus,
dataclasses.I3Particle.ParticleType.NeutronBar,
dataclasses.I3Particle.ParticleType.LambdaBar,
dataclasses.I3Particle.ParticleType.SigmaMinusBar,
dataclasses.I3Particle.ParticleType.Sigma0Bar,
dataclasses.I3Particle.ParticleType.SigmaPlusBar,
dataclasses.I3Particle.ParticleType.Xi0Bar,
dataclasses.I3Particle.ParticleType.XiPlusBar,
dataclasses.I3Particle.ParticleType.OmegaPlusBar,
dataclasses.I3Particle.ParticleType.DPlus,
dataclasses.I3Particle.ParticleType.DMinus,
dataclasses.I3Particle.ParticleType.D0,
dataclasses.I3Particle.ParticleType.D0Bar,
dataclasses.I3Particle.ParticleType.DsPlus,
dataclasses.I3Particle.ParticleType.DsMinusBar,
dataclasses.I3Particle.ParticleType.LambdacPlus,
dataclasses.I3Particle.ParticleType.WPlus,
dataclasses.I3Particle.ParticleType.WMinus,
dataclasses.I3Particle.ParticleType.Z0,
dataclasses.I3Particle.ParticleType.NuclInt,
]
self.isMuon = particle_type in [
dataclasses.I3Particle.ParticleType.MuMinus,
dataclasses.I3Particle.ParticleType.MuPlus,
]
self.isTau = particle_type in [
dataclasses.I3Particle.ParticleType.TauMinus,
dataclasses.I3Particle.ParticleType.TauPlus,
]
if ((not self.isHadron) and (not self.isElectron) and (not self.isMuon)
and (not self.isTau)):
# if we don't know it but it has a pdg code,
# it is probably a hadron..
self.isHadron = True
# Added safety check: throw error in this case to make sure nothing
# weird is happenning unkowingly
# raise ValueError('Unkown particle type {!r}'.format(particle_type))
log_warn(
'Unkown particle type {!r}. Assuming this is a hadron!'.format(
particle_type)
)
if self.isElectron:
if particle_type == dataclasses.I3Particle.ParticleType.EPlus:
self.a = 2.00035+0.63190*self.logE
self.b = self.Lrad/0.63008
elif particle_type in [
dataclasses.I3Particle.ParticleType.Gamma,
dataclasses.I3Particle.ParticleType.Pi0, # gamma, pi0
]:
self.a = 2.83923+0.58209*self.logE
self.b = self.Lrad/0.64526
else:
self.a = 2.01849+0.63176*self.logE
self.b = self.Lrad/0.63207
elif self.isHadron:
self.E0 = 0.
self.m = 0.
self.f0 = 1.
self.rms0 = 0.
self.gamma = 0.
if particle_type == dataclasses.I3Particle.ParticleType.PiMinus:
self.a = 1.69176636+0.40803489 * self.logE
self.b = self.Lrad / 0.34108075
self.E0 = 0.19826506
self.m = 0.16218006
self.f0 = 0.31859323
self.rms0 = 0.94033488
self.gamma = 1.35070162
elif particle_type == dataclasses.I3Particle.ParticleType.K0_Long:
self.a = 1.95948974+0.34934666 * self.logE
self.b = self.Lrad / 0.34535151
self.E0 = 0.21687243
self.m = 0.16861530
self.f0 = 0.27724987
self.rms0 = 1.00318874
self.gamma = 1.37528605
elif particle_type == dataclasses.I3Particle.ParticleType.PPlus:
self.a = 1.47495778+0.40450398 * self.logE
self.b = self.Lrad / 0.35226706
self.E0 = 0.29579368
self.m = 0.19373018
self.f0 = 0.02455403
self.rms0 = 1.01619344
self.gamma = 1.45477346
elif particle_type == dataclasses.I3Particle.ParticleType.Neutron:
self.a = 1.57739060+0.40631102 * self.logE
self.b = self.Lrad / 0.35269455
self.E0 = 0.66725124
self.m = 0.19263595
self.f0 = 0.17559033
self.rms0 = 1.01414337
self.gamma = 1.45086895
elif particle_type == dataclasses.I3Particle.ParticleType.PMinus:
self.a = 1.92249171+0.33701751 * self.logE
self.b = self.Lrad / 0.34969748
self.E0 = 0.29579368
self.m = 0.19373018
self.f0 = 0.02455403
self.rms0 = 1.01094637
self.gamma = 1.50438415
else:
self.a = 1.58357292+0.41886807 * self.logE
self.b = self.Lrad / 0.33833116
self.E0 = 0.18791678
self.m = 0.16267529
self.f0 = 0.30974123
self.rms0 = 0.95899551
self.gamma = 1.35589541
e = max(2.71828183, energy)
self.emScale = 1. - pow(e/self.E0, -self.m)*(1.-self.f0)
self.emScaleSigma = \
self.emScale*self.rms0*pow(np.log(e), -self.gamma)
else:
raise ValueError('Particle type {!r} is not a shower'.format(
particle_type))
if (energy < 1.*I3Units.GeV):
self.b = 0. # this sets the cascade length to 0
| [
"numpy.log"
] | [((1420, 1434), 'numpy.log', 'np.log', (['energy'], {}), '(energy)\n', (1426, 1434), True, 'import numpy as np\n'), ((8359, 8368), 'numpy.log', 'np.log', (['e'], {}), '(e)\n', (8365, 8368), True, 'import numpy as np\n')] |
import numpy as np
from PIL import Image
import glob
import torch
import torch.nn as nn
from torch.autograd import Variable
from random import randint
from torch.utils.data.dataset import Dataset
from pre_processing import *
from mean_std import *
Training_MEAN = 0.4911
Training_STDEV = 0.1658
class SEMDataTrain(Dataset):
def __init__(self, image_path, mask_path, in_size=572, out_size=388):
"""
Args:
image_path (str): the path where the image is located
mask_path (str): the path where the mask is located
option (str): decide which dataset to import
"""
# all file names
self.mask_arr = glob.glob(str(mask_path) + "/*")
self.image_arr = glob.glob(str(image_path) + str("/*"))
self.in_size, self.out_size = in_size, out_size
# Calculate len
self.data_len = len(self.mask_arr)
# calculate mean and stdev
def __getitem__(self, index):
"""Get specific data corresponding to the index
Args:
index (int): index of the data
Returns:
Tensor: specific data on index which is converted to Tensor
"""
"""
# GET IMAGE
"""
single_image_name = self.image_arr[index]
img_as_img = Image.open(single_image_name)
# img_as_img.show()
img_as_np = np.asarray(img_as_img)
# Augmentation
# flip {0: vertical, 1: horizontal, 2: both, 3: none}
flip_num = randint(0, 3)
img_as_np = flip(img_as_np, flip_num)
# Noise Determine {0: Gaussian_noise, 1: uniform_noise
if randint(0, 1):
# Gaussian_noise
gaus_sd, gaus_mean = randint(0, 20), 0
img_as_np = add_gaussian_noise(img_as_np, gaus_mean, gaus_sd)
else:
# uniform_noise
l_bound, u_bound = randint(-20, 0), randint(0, 20)
img_as_np = add_uniform_noise(img_as_np, l_bound, u_bound)
# Brightness
pix_add = randint(-20, 20)
img_as_np = change_brightness(img_as_np, pix_add)
# Elastic distort {0: distort, 1:no distort}
sigma = randint(6, 12)
# sigma = 4, alpha = 34
img_as_np, seed = add_elastic_transform(img_as_np, alpha=34, sigma=sigma, pad_size=20)
# Crop the image
img_height, img_width = img_as_np.shape[0], img_as_np.shape[1]
pad_size = int((self.in_size - self.out_size)/2)
img_as_np = np.pad(img_as_np, pad_size, mode="symmetric")
y_loc, x_loc = randint(0, img_height-self.out_size), randint(0, img_width-self.out_size)
img_as_np = cropping(img_as_np, crop_size=self.in_size, dim1=y_loc, dim2=x_loc)
'''
# Sanity Check for image
img1 = Image.fromarray(img_as_np)
img1.show()
'''
# Normalize the image
img_as_np = normalization2(img_as_np, max=1, min=0)
img_as_np = np.expand_dims(img_as_np, axis=0) # add additional dimension
img_as_tensor = torch.from_numpy(img_as_np).float() # Convert numpy array to tensor
"""
# GET MASK
"""
single_mask_name = self.mask_arr[index]
msk_as_img = Image.open(single_mask_name)
# msk_as_img.show()
msk_as_np = np.asarray(msk_as_img)
# flip the mask with respect to image
msk_as_np = flip(msk_as_np, flip_num)
# elastic_transform of mask with respect to image
# sigma = 4, alpha = 34, seed = from image transformation
msk_as_np, _ = add_elastic_transform(
msk_as_np, alpha=34, sigma=sigma, seed=seed, pad_size=20)
msk_as_np = approximate_image(msk_as_np) # images only with 0 and 255
# Crop the mask
msk_as_np = cropping(msk_as_np, crop_size=self.out_size, dim1=y_loc, dim2=x_loc)
'''
# Sanity Check for mask
img2 = Image.fromarray(msk_as_np)
img2.show()
'''
# Normalize mask to only 0 and 1
msk_as_np = msk_as_np/255
# msk_as_np = np.expand_dims(msk_as_np, axis=0) # add additional dimension
msk_as_tensor = torch.from_numpy(msk_as_np).long() # Convert numpy array to tensor
return (img_as_tensor, msk_as_tensor)
def __len__(self):
"""
Returns:
length (int): length of the data
"""
return self.data_len
class SEMDataVal(Dataset):
def __init__(self, image_path, mask_path, in_size=572, out_size=388):
'''
Args:
image_path = path where test images are located
mask_path = path where test masks are located
'''
# paths to all images and masks
self.mask_arr = glob.glob(str(mask_path) + str("/*"))
self.image_arr = glob.glob(str(image_path) + str("/*"))
self.in_size = in_size
self.out_size = out_size
self.data_len = len(self.mask_arr)
def __getitem__(self, index):
"""Get specific data corresponding to the index
Args:
index : an integer variable that calls (indext)th image in the
path
Returns:
Tensor: 4 cropped data on index which is converted to Tensor
"""
single_image = self.image_arr[index]
img_as_img = Image.open(single_image)
# img_as_img.show()
# Convert the image into numpy array
img_as_np = np.asarray(img_as_img)
# Make 4 cropped image (in numpy array form) using values calculated above
# Cropped images will also have paddings to fit the model.
pad_size = int((self.in_size - self.out_size)/2)
img_as_np = np.pad(img_as_np, pad_size, mode="symmetric")
img_as_np = multi_cropping(img_as_np,
crop_size=self.in_size,
crop_num1=2, crop_num2=2)
# Empty list that will be filled in with arrays converted to tensor
processed_list = []
for array in img_as_np:
# SANITY CHECK: SEE THE CROPPED & PADDED IMAGES
#array_image = Image.fromarray(array)
# Normalize the cropped arrays
img_to_add = normalization2(array, max=1, min=0)
# Convert normalized array into tensor
processed_list.append(img_to_add)
img_as_tensor = torch.Tensor(processed_list)
# return tensor of 4 cropped images
# top left, top right, bottom left, bottom right respectively.
"""
# GET MASK
"""
single_mask_name = self.mask_arr[index]
msk_as_img = Image.open(single_mask_name)
# msk_as_img.show()
msk_as_np = np.asarray(msk_as_img)
# Normalize mask to only 0 and 1
msk_as_np = multi_cropping(msk_as_np,
crop_size=self.out_size,
crop_num1=2, crop_num2=2)
msk_as_np = msk_as_np/255
# msk_as_np = np.expand_dims(msk_as_np, axis=0) # add additional dimension
msk_as_tensor = torch.from_numpy(msk_as_np).long() # Convert numpy array to tensor
original_msk = torch.from_numpy(np.asarray(msk_as_img))
return (img_as_tensor, msk_as_tensor, original_msk)
def __len__(self):
return self.data_len
class SEMDataTest(Dataset):
def __init__(self, image_path, in_size=572, out_size=388):
'''
Args:
image_path = path where test images are located
mask_path = path where test masks are located
'''
# paths to all images and masks
self.image_arr = glob.glob(str(image_path) + str("/*"))
self.in_size = in_size
self.out_size = out_size
self.data_len = len(self.image_arr)
def __getitem__(self, index):
'''Get specific data corresponding to the index
Args:
index: an integer variable that calls(indext)th image in the
path
Returns:
Tensor: 4 cropped data on index which is converted to Tensor
'''
single_image = self.image_arr[index]
img_as_img = Image.open(single_image)
# img_as_img.show()
# Convert the image into numpy array
img_as_np = np.asarray(img_as_img)
pad_size = int((self.in_size - self.out_size)/2)
img_as_np = np.pad(img_as_np, pad_size, mode="symmetric")
img_as_np = multi_cropping(img_as_np,
crop_size=self.in_size,
crop_num1=2, crop_num2=2)
# Empty list that will be filled in with arrays converted to tensor
processed_list = []
for array in img_as_np:
# SANITY CHECK: SEE THE PADDED AND CROPPED IMAGES
# array_image = Image.fromarray(array)
# Normalize the cropped arrays
img_to_add = normalization2(array, max=1, min=0)
# Convert normalized array into tensor
processed_list.append(img_to_add)
img_as_tensor = torch.Tensor(processed_list)
# return tensor of 4 cropped images
# top left, top right, bottom left, bottom right respectively.
return img_as_tensor
def __len__(self):
return self.data_len
if __name__ == "__main__":
SEM_train = SEMDataTrain(
'../data/train/images', '../data/train/masks')
SEM_test = SEMDataTest(
'../data/test/images/', '../data/test/masks')
SEM_val = SEMDataVal('../data/val/images', '../data/val/masks')
imag_1, msk = SEM_train.__getitem__(0)
| [
"PIL.Image.open",
"numpy.asarray",
"torch.Tensor",
"torch.from_numpy",
"numpy.expand_dims",
"numpy.pad",
"random.randint"
] | [((1295, 1324), 'PIL.Image.open', 'Image.open', (['single_image_name'], {}), '(single_image_name)\n', (1305, 1324), False, 'from PIL import Image\n'), ((1373, 1395), 'numpy.asarray', 'np.asarray', (['img_as_img'], {}), '(img_as_img)\n', (1383, 1395), True, 'import numpy as np\n'), ((1501, 1514), 'random.randint', 'randint', (['(0)', '(3)'], {}), '(0, 3)\n', (1508, 1514), False, 'from random import randint\n'), ((1636, 1649), 'random.randint', 'randint', (['(0)', '(1)'], {}), '(0, 1)\n', (1643, 1649), False, 'from random import randint\n'), ((2021, 2037), 'random.randint', 'randint', (['(-20)', '(20)'], {}), '(-20, 20)\n', (2028, 2037), False, 'from random import randint\n'), ((2166, 2180), 'random.randint', 'randint', (['(6)', '(12)'], {}), '(6, 12)\n', (2173, 2180), False, 'from random import randint\n'), ((2482, 2527), 'numpy.pad', 'np.pad', (['img_as_np', 'pad_size'], {'mode': '"""symmetric"""'}), "(img_as_np, pad_size, mode='symmetric')\n", (2488, 2527), True, 'import numpy as np\n'), ((2942, 2975), 'numpy.expand_dims', 'np.expand_dims', (['img_as_np'], {'axis': '(0)'}), '(img_as_np, axis=0)\n', (2956, 2975), True, 'import numpy as np\n'), ((3210, 3238), 'PIL.Image.open', 'Image.open', (['single_mask_name'], {}), '(single_mask_name)\n', (3220, 3238), False, 'from PIL import Image\n'), ((3287, 3309), 'numpy.asarray', 'np.asarray', (['msk_as_img'], {}), '(msk_as_img)\n', (3297, 3309), True, 'import numpy as np\n'), ((5299, 5323), 'PIL.Image.open', 'Image.open', (['single_image'], {}), '(single_image)\n', (5309, 5323), False, 'from PIL import Image\n'), ((5417, 5439), 'numpy.asarray', 'np.asarray', (['img_as_img'], {}), '(img_as_img)\n', (5427, 5439), True, 'import numpy as np\n'), ((5668, 5713), 'numpy.pad', 'np.pad', (['img_as_np', 'pad_size'], {'mode': '"""symmetric"""'}), "(img_as_np, pad_size, mode='symmetric')\n", (5674, 5713), True, 'import numpy as np\n'), ((6356, 6384), 'torch.Tensor', 'torch.Tensor', (['processed_list'], {}), '(processed_list)\n', (6368, 6384), False, 'import torch\n'), ((6615, 6643), 'PIL.Image.open', 'Image.open', (['single_mask_name'], {}), '(single_mask_name)\n', (6625, 6643), False, 'from PIL import Image\n'), ((6692, 6714), 'numpy.asarray', 'np.asarray', (['msk_as_img'], {}), '(msk_as_img)\n', (6702, 6714), True, 'import numpy as np\n'), ((8144, 8168), 'PIL.Image.open', 'Image.open', (['single_image'], {}), '(single_image)\n', (8154, 8168), False, 'from PIL import Image\n'), ((8262, 8284), 'numpy.asarray', 'np.asarray', (['img_as_img'], {}), '(img_as_img)\n', (8272, 8284), True, 'import numpy as np\n'), ((8363, 8408), 'numpy.pad', 'np.pad', (['img_as_np', 'pad_size'], {'mode': '"""symmetric"""'}), "(img_as_np, pad_size, mode='symmetric')\n", (8369, 8408), True, 'import numpy as np\n'), ((9054, 9082), 'torch.Tensor', 'torch.Tensor', (['processed_list'], {}), '(processed_list)\n', (9066, 9082), False, 'import torch\n'), ((2551, 2589), 'random.randint', 'randint', (['(0)', '(img_height - self.out_size)'], {}), '(0, img_height - self.out_size)\n', (2558, 2589), False, 'from random import randint\n'), ((2589, 2626), 'random.randint', 'randint', (['(0)', '(img_width - self.out_size)'], {}), '(0, img_width - self.out_size)\n', (2596, 2626), False, 'from random import randint\n'), ((7175, 7197), 'numpy.asarray', 'np.asarray', (['msk_as_img'], {}), '(msk_as_img)\n', (7185, 7197), True, 'import numpy as np\n'), ((1713, 1727), 'random.randint', 'randint', (['(0)', '(20)'], {}), '(0, 20)\n', (1720, 1727), False, 'from random import randint\n'), ((1878, 1893), 'random.randint', 'randint', (['(-20)', '(0)'], {}), '(-20, 0)\n', (1885, 1893), False, 'from random import randint\n'), ((1895, 1909), 'random.randint', 'randint', (['(0)', '(20)'], {}), '(0, 20)\n', (1902, 1909), False, 'from random import randint\n'), ((3028, 3055), 'torch.from_numpy', 'torch.from_numpy', (['img_as_np'], {}), '(img_as_np)\n', (3044, 3055), False, 'import torch\n'), ((4140, 4167), 'torch.from_numpy', 'torch.from_numpy', (['msk_as_np'], {}), '(msk_as_np)\n', (4156, 4167), False, 'import torch\n'), ((7067, 7094), 'torch.from_numpy', 'torch.from_numpy', (['msk_as_np'], {}), '(msk_as_np)\n', (7083, 7094), False, 'import torch\n')] |
import os
import numpy as np
import torch
from torch.optim import lr_scheduler
import torch.nn as nn
import torchvision.transforms as transforms
import time
from sklearn.metrics import confusion_matrix, cohen_kappa_score, accuracy_score
import wandb
from dataset.ucmayo4 import UCMayo4
from utils.metrics import get_mean_sensitivity_specificity
from utils import provider
from utils.provider import get_test_results_regression, get_regression_accuracy_with_boundaries, \
setup_reproducability, get_dataset_mean_and_std, write_metric_results_to_file, get_batch_size_for_model
from sklearn.metrics import classification_report, precision_recall_fscore_support
import argparse
setup_reproducability(35)
parser = argparse.ArgumentParser(description="Arguments for the training.")
parser.add_argument("--CV_fold_path", type=str, required=True, help="location of train-val folds and test set.")
parser.add_argument("--test_set_path", type=str, required=True, help="location of the test set.")
parser.add_argument("--model_name", type=str, default="ResNet18",
choices=["ResNet18", "ResNet50", "VGG16_bn", "DenseNet121", "Inception_v3", "MobileNet_v3_large"],
help="Name of the CNN architecture.")
parser.add_argument("--optimizer", type=str, choices=["Adam", "SGD"], default="Adam",
help="Name of the optimization function.")
parser.add_argument("-lr", "--learning_rate", type=float, default=0.0002, help="learning rate.")
parser.add_argument("-wd", "--weight_decay", type=float, default=0., help="weight decay.")
parser.add_argument("-est", "--early_stopping_threshold", type=int, default=25,
help="early stopping threshold to terminate training.")
parser.add_argument("--num_epoch", type=int, default=200, help="Max number of epochs to train.")
parser.add_argument("--use_lrscheduling", choices=["True", "False"], default="True",
help="if given, training does not use LR scheduling.")
parser.add_argument("-lrsp", "--LRscheduling_patience", type=int, default=15,
help="learning rate scheduling patience to decrease learning rate.")
parser.add_argument("-lrsf", "--LRscheduling_factor", type=float, default=0.2,
help="learning rate scheduling scaling factor when decrease learning rate.")
parser.add_argument("--use_pretrained_weights", choices=["True", "False"], default="True",
help="if True, weights start from pretrained weights on imagenet dataset.")
parser.add_argument("--enable_wandb", choices=["True", "False"], default="True",
help="if True, logs training details into wandb platform. Wandb settings should be performed before using this option.")
args = parser.parse_args()
for k, v in vars(args).items():
print(k, ":", v)
model_name = args.model_name
batch_size = get_batch_size_for_model(model_name)
optimizer_name = args.optimizer
use_lrscheduling = args.use_lrscheduling == "True"
learning_rate = args.learning_rate
weight_decay = args.weight_decay
best_threshold = 0.0001
num_epoch = args.num_epoch
num_worker = 4
early_stopping_thresh = args.early_stopping_threshold
lr_scheduler_patience = args.LRscheduling_patience
lrs_factor = args.LRscheduling_factor
num_classes = 1
real_num_classes = 4
use_multiGPU = False
use_weighted_sampler = True
pretrained_weights = args.use_pretrained_weights == "True"
enable_wandb = args.enable_wandb == "True"
print("\nCreate weights directory for checkpoints!")
dirName = "weights"
try:
os.makedirs(dirName)
print("Directory ", dirName, " Created ")
except FileExistsError:
print("Directory ", dirName, " already exists")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("device: ", device)
def train_inception(model, device, train_loader, criterion, optimizer):
model.train()
training_loss = 0.0
correct = 0
for data, target in train_loader:
data, target = data.to(device), target.to(device).float()
output, aux_output = model(data)
output.squeeze_(1)
aux_output.squeeze_(1)
loss1 = criterion(output, target)
loss2 = criterion(aux_output, target)
loss = loss1 + 0.4 * loss2
optimizer.zero_grad()
loss.backward()
optimizer.step()
output_classified = get_regression_accuracy_with_boundaries(output, target, [0.5, 1.5, 2.5])
correct += output_classified.eq(target).sum().item()
training_loss += loss.item()
training_loss /= len(train_loader)
correct /= len(train_loader.dataset)
return training_loss, correct
def train(model, device, train_loader, criterion, optimizer):
model.train()
training_loss = 0.0
correct = 0
for data, target in train_loader:
data, target = data.to(device), target.to(device).float()
output = model(data)
output.squeeze_(1)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
output_classified = get_regression_accuracy_with_boundaries(output, target, [0.5, 1.5, 2.5])
correct += output_classified.eq(target).sum().item()
training_loss += loss.item()
training_loss /= len(train_loader)
correct /= len(train_loader.dataset)
return training_loss, correct
def validation(model, device, val_loader, criterion):
model.eval()
val_loss = 0
correct = 0
with torch.no_grad():
for data, target in val_loader:
data, target = data.to(device), target.to(device).float()
output = model(data)
output.squeeze_(1)
loss = criterion(output, target)
output_classified = get_regression_accuracy_with_boundaries(output, target, [0.5, 1.5, 2.5])
correct += output_classified.eq(target).sum().item()
val_loss += loss.item()
val_loss /= len(val_loader)
correct /= len(val_loader.dataset)
return val_loss, correct
def get_test_set_results(id, test_dir, normalize):
if model_name == "Inception_v3":
test_transform = transforms.Compose([transforms.Resize((299, 299)),
transforms.ToTensor(),
normalize])
else:
test_transform = transforms.Compose([transforms.ToTensor(),
normalize])
test_dataset = UCMayo4(test_dir, transform=test_transform)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=num_worker,
pin_memory=True)
model = provider.initialize_model(model_name, False, num_classes)
model.load_state_dict(torch.load("weights/best_R_" + model_name + '_' + str(id) + '.pth.tar'))
model.to(device)
y_true, y_pred, r_true, r_pred = get_test_results_regression(model, test_loader, device, [0.5, 1.5, 2.5])
return y_true, y_pred, r_true, r_pred
def run_experiment(experiment_id: int, train_dir: str, val_dir: str, normalize, best_acc=0, early_stop_counter=0):
if model_name == "Inception_v3":
train_transform = transforms.Compose([transforms.RandomHorizontalFlip(),
transforms.RandomRotation((-180, 180)),
transforms.Resize((299, 299)),
transforms.ToTensor(),
normalize])
else:
train_transform = transforms.Compose([transforms.RandomHorizontalFlip(),
transforms.RandomRotation((-180, 180)),
transforms.ToTensor(),
normalize])
train_dataset = UCMayo4(train_dir, transform=train_transform)
if use_weighted_sampler:
weighted_sampler = provider.weighted_random_sampler(train_dataset)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=False,
sampler=weighted_sampler,
num_workers=num_worker,
pin_memory=True)
else:
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
num_workers=num_worker,
pin_memory=True)
if model_name == "Inception_v3":
val_transform = transforms.Compose([transforms.Resize((299, 299)),
transforms.ToTensor(),
normalize])
else:
val_transform = transforms.Compose([transforms.ToTensor(),
normalize])
val_dataset = UCMayo4(val_dir, transform=val_transform)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_worker,
pin_memory=True)
print("Standard model is running!")
model = provider.initialize_model(model_name, pretrained_weights, num_classes)
if use_multiGPU:
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = nn.DataParallel(model)
model.to(device)
experiment_signature = "R " + model_name + " ID=" + str(experiment_id) + " lr=" + str(
learning_rate) + " reg=" + str(weight_decay) + " bs=" + str(
batch_size)
print("model: " + experiment_signature + " worker: " + str(num_worker))
if optimizer_name == "Adam":
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
elif optimizer_name == "SGD":
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9, weight_decay=weight_decay)
else:
raise Exception("Undefined optimizer name")
if use_lrscheduling:
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode="max", factor=lrs_factor,
patience=lr_scheduler_patience,
threshold=best_threshold,
verbose=False)
criterion = nn.MSELoss()
for epoch in range(num_epoch):
if model_name == "Inception_v3":
train_loss, train_accuracy = train_inception(model, device, train_loader, criterion, optimizer)
else:
train_loss, train_accuracy = train(model, device, train_loader, criterion, optimizer)
val_loss, val_accuracy = validation(model, device, val_loader, criterion)
if use_lrscheduling:
scheduler.step(val_accuracy)
if enable_wandb:
wandb.log(
{"epoch" : epoch + 1,
"lr" : optimizer.param_groups[0]['lr'],
'train loss': train_loss,
'val loss' : val_loss,
'train acc' : train_accuracy,
'val acc' : val_accuracy})
if val_accuracy > best_acc * (1 + best_threshold):
early_stop_counter = 0
best_acc = val_accuracy
if enable_wandb:
wandb.run.summary["best accuracy"] = best_acc
torch.save(model.state_dict(), "weights/best_R_" + model_name + '_' + str(experiment_id) + '.pth.tar')
else:
early_stop_counter += 1
if early_stop_counter >= early_stopping_thresh:
print("Early stopping at: " + str(epoch))
break
print("Experiment: " + str(experiment_id) + ", best validation set accuracy: " + str(best_acc * 100))
experiment_start_time = time.time()
if __name__ == "__main__":
if enable_wandb:
group_id = wandb.util.generate_id()
CV_fold_path = args.CV_fold_path
CV_fold_folders = [x for x in os.listdir(CV_fold_path) if x.startswith("fold")]
CV_fold_folders = sorted(CV_fold_folders)
number_of_experiments = len(CV_fold_folders)
kappa_scores = []
weighted_kappa_scores = []
accuracies = []
sensitivities = []
specificities = []
macro_precisions = []
macro_recalls = []
macro_f1s = []
class_precisions = np.zeros([number_of_experiments, real_num_classes])
class_recalls = np.zeros([number_of_experiments, real_num_classes])
class_f1s = np.zeros([number_of_experiments, real_num_classes])
precisions_r = []
recalls_r = []
f1s_r = []
kappa_scores_r = []
accuracies_r = []
sensitivities_r = []
specificities_r = []
for i in range(number_of_experiments):
id = i + 1
print("\n--------------> Starting Fold " + str(id))
if enable_wandb:
wandb.init(project="ulcerative-colitis-classification",
group=model_name + "_CV_R" + "_" + group_id,
save_code=True,
reinit=True)
wandb.run.name = "epoch_" + str(id)
wandb.run.save()
config = wandb.config
config.exp = os.path.basename(__file__)[:-3]
config.model = model_name
config.dataset = "final_dataset"
config.lr = learning_rate
config.wd = weight_decay
config.bs = batch_size
config.num_worker = num_worker
config.optimizer = optimizer_name
train_dir = os.path.join(CV_fold_path, CV_fold_folders[i], "train")
val_dir = os.path.join(CV_fold_path, CV_fold_folders[i], "val")
test_dir = args.test_set_path
channel_means, channel_stds = get_dataset_mean_and_std(train_dir)
normalize = transforms.Normalize(mean=channel_means,
std=channel_stds)
run_experiment(id, train_dir, val_dir, normalize)
y_true, y_pred, r_true, r_pred = get_test_set_results(id, test_dir, normalize)
prf1_4classes = precision_recall_fscore_support(y_true, y_pred, average=None, labels=[0, 1, 2, 3])
prf1_remission = precision_recall_fscore_support(r_true, r_pred, average="binary")
cm_4class = confusion_matrix(y_true, y_pred)
cm_remission = confusion_matrix(r_true, r_pred)
class_precisions[i] = prf1_4classes[0]
macro_precision = prf1_4classes[0].mean()
macro_precisions.append(macro_precision)
class_recalls[i] = prf1_4classes[1]
macro_recall = prf1_4classes[1].mean()
macro_recalls.append(macro_recall)
class_f1s[i] = prf1_4classes[2]
macro_f1 = prf1_4classes[2].mean()
macro_f1s.append(macro_f1)
# 4-class analysis
all_kappa_score = cohen_kappa_score(y_true, y_pred)
kappa_scores.append(all_kappa_score)
all_kappa_score_weighted = cohen_kappa_score(y_true, y_pred, weights="quadratic")
weighted_kappa_scores.append(all_kappa_score_weighted)
accuracy = accuracy_score(y_true, y_pred)
accuracies.append(accuracy)
mean_sensitivity, mean_specificity = get_mean_sensitivity_specificity(y_true, y_pred)
sensitivities.append(mean_sensitivity), specificities.append(mean_specificity)
# Remission analysis
remission_kappa_score = cohen_kappa_score(r_true, r_pred)
kappa_scores_r.append(remission_kappa_score)
accuracy_r = accuracy_score(r_true, r_pred)
accuracies_r.append(accuracy_r)
precisions_r.append(prf1_remission[0])
recalls_r.append(prf1_remission[1])
f1s_r.append(prf1_remission[2])
cr_r = classification_report(r_true, r_pred, output_dict=True)
sensitivities_r.append(cr_r["0"]["recall"]), specificities_r.append(cr_r["1"]["recall"])
if enable_wandb:
wandb.run.summary["m_precision"] = macro_precision
wandb.run.summary["m_recall"] = macro_recall
wandb.run.summary["m_f1"] = macro_f1
wandb.run.summary["kappa"] = all_kappa_score
wandb.run.summary["qw_kappa"] = all_kappa_score_weighted
wandb.run.summary["accuracy"] = accuracy
wandb.run.summary["m_sensitivity"] = mean_sensitivity
wandb.run.summary["m_specificity"] = mean_specificity
wandb.run.summary["remission_accuracy"] = accuracy_r
wandb.run.summary["remission_kappa"] = remission_kappa_score
wandb.run.summary["remission_precision"] = prf1_remission[0]
wandb.run.summary["remission_recall"] = prf1_remission[1]
wandb.run.summary["remission_f1"] = prf1_remission[2]
wandb.run.summary["remission_sensitivity"] = cr_r["0"]["recall"]
wandb.run.summary["remission_specificity"] = cr_r["1"]["recall"]
if id == number_of_experiments:
write_metric_results_to_file(wandb.run.dir, accuracies, kappa_scores, weighted_kappa_scores,
sensitivities, specificities,
macro_precisions,
macro_recalls, macro_f1s, class_precisions, class_recalls, class_f1s,
accuracies_r, kappa_scores_r, sensitivities_r, specificities_r,
precisions_r,
recalls_r, f1s_r)
wandb.run.finish()
| [
"utils.provider.get_test_results_regression",
"wandb.log",
"sklearn.metrics.classification_report",
"torch.cuda.device_count",
"wandb.init",
"torch.nn.MSELoss",
"torch.cuda.is_available",
"os.listdir",
"argparse.ArgumentParser",
"utils.metrics.get_mean_sensitivity_specificity",
"utils.provider.w... | [((679, 704), 'utils.provider.setup_reproducability', 'setup_reproducability', (['(35)'], {}), '(35)\n', (700, 704), False, 'from utils.provider import get_test_results_regression, get_regression_accuracy_with_boundaries, setup_reproducability, get_dataset_mean_and_std, write_metric_results_to_file, get_batch_size_for_model\n'), ((715, 781), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Arguments for the training."""'}), "(description='Arguments for the training.')\n", (738, 781), False, 'import argparse\n'), ((2863, 2899), 'utils.provider.get_batch_size_for_model', 'get_batch_size_for_model', (['model_name'], {}), '(model_name)\n', (2887, 2899), False, 'from utils.provider import get_test_results_regression, get_regression_accuracy_with_boundaries, setup_reproducability, get_dataset_mean_and_std, write_metric_results_to_file, get_batch_size_for_model\n'), ((11974, 11985), 'time.time', 'time.time', ([], {}), '()\n', (11983, 11985), False, 'import time\n'), ((3532, 3552), 'os.makedirs', 'os.makedirs', (['dirName'], {}), '(dirName)\n', (3543, 3552), False, 'import os\n'), ((6456, 6499), 'dataset.ucmayo4.UCMayo4', 'UCMayo4', (['test_dir'], {'transform': 'test_transform'}), '(test_dir, transform=test_transform)\n', (6463, 6499), False, 'from dataset.ucmayo4 import UCMayo4\n'), ((6518, 6633), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': 'num_worker', 'pin_memory': '(True)'}), '(test_dataset, batch_size=1, shuffle=False,\n num_workers=num_worker, pin_memory=True)\n', (6545, 6633), False, 'import torch\n'), ((6689, 6746), 'utils.provider.initialize_model', 'provider.initialize_model', (['model_name', '(False)', 'num_classes'], {}), '(model_name, False, num_classes)\n', (6714, 6746), False, 'from utils import provider\n'), ((6905, 6977), 'utils.provider.get_test_results_regression', 'get_test_results_regression', (['model', 'test_loader', 'device', '[0.5, 1.5, 2.5]'], {}), '(model, test_loader, device, [0.5, 1.5, 2.5])\n', (6932, 6977), False, 'from utils.provider import get_test_results_regression, get_regression_accuracy_with_boundaries, setup_reproducability, get_dataset_mean_and_std, write_metric_results_to_file, get_batch_size_for_model\n'), ((7870, 7915), 'dataset.ucmayo4.UCMayo4', 'UCMayo4', (['train_dir'], {'transform': 'train_transform'}), '(train_dir, transform=train_transform)\n', (7877, 7915), False, 'from dataset.ucmayo4 import UCMayo4\n'), ((8989, 9030), 'dataset.ucmayo4.UCMayo4', 'UCMayo4', (['val_dir'], {'transform': 'val_transform'}), '(val_dir, transform=val_transform)\n', (8996, 9030), False, 'from dataset.ucmayo4 import UCMayo4\n'), ((9048, 9172), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_worker', 'pin_memory': '(True)'}), '(val_dataset, batch_size=batch_size, shuffle=\n False, num_workers=num_worker, pin_memory=True)\n', (9075, 9172), False, 'import torch\n'), ((9266, 9336), 'utils.provider.initialize_model', 'provider.initialize_model', (['model_name', 'pretrained_weights', 'num_classes'], {}), '(model_name, pretrained_weights, num_classes)\n', (9291, 9336), False, 'from utils import provider\n'), ((10506, 10518), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (10516, 10518), True, 'import torch.nn as nn\n'), ((12508, 12559), 'numpy.zeros', 'np.zeros', (['[number_of_experiments, real_num_classes]'], {}), '([number_of_experiments, real_num_classes])\n', (12516, 12559), True, 'import numpy as np\n'), ((12580, 12631), 'numpy.zeros', 'np.zeros', (['[number_of_experiments, real_num_classes]'], {}), '([number_of_experiments, real_num_classes])\n', (12588, 12631), True, 'import numpy as np\n'), ((12648, 12699), 'numpy.zeros', 'np.zeros', (['[number_of_experiments, real_num_classes]'], {}), '([number_of_experiments, real_num_classes])\n', (12656, 12699), True, 'import numpy as np\n'), ((3710, 3735), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3733, 3735), False, 'import torch\n'), ((4343, 4415), 'utils.provider.get_regression_accuracy_with_boundaries', 'get_regression_accuracy_with_boundaries', (['output', 'target', '[0.5, 1.5, 2.5]'], {}), '(output, target, [0.5, 1.5, 2.5])\n', (4382, 4415), False, 'from utils.provider import get_test_results_regression, get_regression_accuracy_with_boundaries, setup_reproducability, get_dataset_mean_and_std, write_metric_results_to_file, get_batch_size_for_model\n'), ((5062, 5134), 'utils.provider.get_regression_accuracy_with_boundaries', 'get_regression_accuracy_with_boundaries', (['output', 'target', '[0.5, 1.5, 2.5]'], {}), '(output, target, [0.5, 1.5, 2.5])\n', (5101, 5134), False, 'from utils.provider import get_test_results_regression, get_regression_accuracy_with_boundaries, setup_reproducability, get_dataset_mean_and_std, write_metric_results_to_file, get_batch_size_for_model\n'), ((5464, 5479), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5477, 5479), False, 'import torch\n'), ((7973, 8020), 'utils.provider.weighted_random_sampler', 'provider.weighted_random_sampler', (['train_dataset'], {}), '(train_dataset)\n', (8005, 8020), False, 'from utils import provider\n'), ((8044, 8196), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'sampler': 'weighted_sampler', 'num_workers': 'num_worker', 'pin_memory': '(True)'}), '(train_dataset, batch_size=batch_size, shuffle=\n False, sampler=weighted_sampler, num_workers=num_worker, pin_memory=True)\n', (8071, 8196), False, 'import torch\n'), ((8378, 8503), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': 'num_worker', 'pin_memory': '(True)'}), '(train_dataset, batch_size=batch_size, shuffle=\n True, num_workers=num_worker, pin_memory=True)\n', (8405, 8503), False, 'import torch\n'), ((10191, 10340), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'lr_scheduler.ReduceLROnPlateau', (['optimizer'], {'mode': '"""max"""', 'factor': 'lrs_factor', 'patience': 'lr_scheduler_patience', 'threshold': 'best_threshold', 'verbose': '(False)'}), "(optimizer, mode='max', factor=lrs_factor,\n patience=lr_scheduler_patience, threshold=best_threshold, verbose=False)\n", (10221, 10340), False, 'from torch.optim import lr_scheduler\n'), ((12053, 12077), 'wandb.util.generate_id', 'wandb.util.generate_id', ([], {}), '()\n', (12075, 12077), False, 'import wandb\n'), ((13685, 13740), 'os.path.join', 'os.path.join', (['CV_fold_path', 'CV_fold_folders[i]', '"""train"""'], {}), "(CV_fold_path, CV_fold_folders[i], 'train')\n", (13697, 13740), False, 'import os\n'), ((13759, 13812), 'os.path.join', 'os.path.join', (['CV_fold_path', 'CV_fold_folders[i]', '"""val"""'], {}), "(CV_fold_path, CV_fold_folders[i], 'val')\n", (13771, 13812), False, 'import os\n'), ((13890, 13925), 'utils.provider.get_dataset_mean_and_std', 'get_dataset_mean_and_std', (['train_dir'], {}), '(train_dir)\n', (13914, 13925), False, 'from utils.provider import get_test_results_regression, get_regression_accuracy_with_boundaries, setup_reproducability, get_dataset_mean_and_std, write_metric_results_to_file, get_batch_size_for_model\n'), ((13946, 14004), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'channel_means', 'std': 'channel_stds'}), '(mean=channel_means, std=channel_stds)\n', (13966, 14004), True, 'import torchvision.transforms as transforms\n'), ((14217, 14303), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['y_true', 'y_pred'], {'average': 'None', 'labels': '[0, 1, 2, 3]'}), '(y_true, y_pred, average=None, labels=[0, 1,\n 2, 3])\n', (14248, 14303), False, 'from sklearn.metrics import classification_report, precision_recall_fscore_support\n'), ((14325, 14390), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['r_true', 'r_pred'], {'average': '"""binary"""'}), "(r_true, r_pred, average='binary')\n", (14356, 14390), False, 'from sklearn.metrics import classification_report, precision_recall_fscore_support\n'), ((14412, 14444), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (14428, 14444), False, 'from sklearn.metrics import confusion_matrix, cohen_kappa_score, accuracy_score\n'), ((14468, 14500), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['r_true', 'r_pred'], {}), '(r_true, r_pred)\n', (14484, 14500), False, 'from sklearn.metrics import confusion_matrix, cohen_kappa_score, accuracy_score\n'), ((14956, 14989), 'sklearn.metrics.cohen_kappa_score', 'cohen_kappa_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (14973, 14989), False, 'from sklearn.metrics import confusion_matrix, cohen_kappa_score, accuracy_score\n'), ((15071, 15125), 'sklearn.metrics.cohen_kappa_score', 'cohen_kappa_score', (['y_true', 'y_pred'], {'weights': '"""quadratic"""'}), "(y_true, y_pred, weights='quadratic')\n", (15088, 15125), False, 'from sklearn.metrics import confusion_matrix, cohen_kappa_score, accuracy_score\n'), ((15209, 15239), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (15223, 15239), False, 'from sklearn.metrics import confusion_matrix, cohen_kappa_score, accuracy_score\n'), ((15322, 15370), 'utils.metrics.get_mean_sensitivity_specificity', 'get_mean_sensitivity_specificity', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (15354, 15370), False, 'from utils.metrics import get_mean_sensitivity_specificity\n'), ((15520, 15553), 'sklearn.metrics.cohen_kappa_score', 'cohen_kappa_score', (['r_true', 'r_pred'], {}), '(r_true, r_pred)\n', (15537, 15553), False, 'from sklearn.metrics import confusion_matrix, cohen_kappa_score, accuracy_score\n'), ((15629, 15659), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['r_true', 'r_pred'], {}), '(r_true, r_pred)\n', (15643, 15659), False, 'from sklearn.metrics import confusion_matrix, cohen_kappa_score, accuracy_score\n'), ((15848, 15903), 'sklearn.metrics.classification_report', 'classification_report', (['r_true', 'r_pred'], {'output_dict': '(True)'}), '(r_true, r_pred, output_dict=True)\n', (15869, 15903), False, 'from sklearn.metrics import classification_report, precision_recall_fscore_support\n'), ((5734, 5806), 'utils.provider.get_regression_accuracy_with_boundaries', 'get_regression_accuracy_with_boundaries', (['output', 'target', '[0.5, 1.5, 2.5]'], {}), '(output, target, [0.5, 1.5, 2.5])\n', (5773, 5806), False, 'from utils.provider import get_test_results_regression, get_regression_accuracy_with_boundaries, setup_reproducability, get_dataset_mean_and_std, write_metric_results_to_file, get_batch_size_for_model\n'), ((9370, 9395), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (9393, 9395), False, 'import torch\n'), ((9488, 9510), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (9503, 9510), True, 'import torch.nn as nn\n'), ((11007, 11187), 'wandb.log', 'wandb.log', (["{'epoch': epoch + 1, 'lr': optimizer.param_groups[0]['lr'], 'train loss':\n train_loss, 'val loss': val_loss, 'train acc': train_accuracy,\n 'val acc': val_accuracy}"], {}), "({'epoch': epoch + 1, 'lr': optimizer.param_groups[0]['lr'],\n 'train loss': train_loss, 'val loss': val_loss, 'train acc':\n train_accuracy, 'val acc': val_accuracy})\n", (11016, 11187), False, 'import wandb\n'), ((12150, 12174), 'os.listdir', 'os.listdir', (['CV_fold_path'], {}), '(CV_fold_path)\n', (12160, 12174), False, 'import os\n'), ((13014, 13147), 'wandb.init', 'wandb.init', ([], {'project': '"""ulcerative-colitis-classification"""', 'group': "(model_name + '_CV_R' + '_' + group_id)", 'save_code': '(True)', 'reinit': '(True)'}), "(project='ulcerative-colitis-classification', group=model_name +\n '_CV_R' + '_' + group_id, save_code=True, reinit=True)\n", (13024, 13147), False, 'import wandb\n'), ((13273, 13289), 'wandb.run.save', 'wandb.run.save', ([], {}), '()\n', (13287, 13289), False, 'import wandb\n'), ((17664, 17682), 'wandb.run.finish', 'wandb.run.finish', ([], {}), '()\n', (17680, 17682), False, 'import wandb\n'), ((6145, 6174), 'torchvision.transforms.Resize', 'transforms.Resize', (['(299, 299)'], {}), '((299, 299))\n', (6162, 6174), True, 'import torchvision.transforms as transforms\n'), ((6221, 6242), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6240, 6242), True, 'import torchvision.transforms as transforms\n'), ((6356, 6377), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6375, 6377), True, 'import torchvision.transforms as transforms\n'), ((7221, 7254), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (7252, 7254), True, 'import torchvision.transforms as transforms\n'), ((7302, 7340), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(-180, 180)'], {}), '((-180, 180))\n', (7327, 7340), True, 'import torchvision.transforms as transforms\n'), ((7388, 7417), 'torchvision.transforms.Resize', 'transforms.Resize', (['(299, 299)'], {}), '((299, 299))\n', (7405, 7417), True, 'import torchvision.transforms as transforms\n'), ((7465, 7486), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (7484, 7486), True, 'import torchvision.transforms as transforms\n'), ((7602, 7635), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (7633, 7635), True, 'import torchvision.transforms as transforms\n'), ((7683, 7721), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(-180, 180)'], {}), '((-180, 180))\n', (7708, 7721), True, 'import torchvision.transforms as transforms\n'), ((7769, 7790), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (7788, 7790), True, 'import torchvision.transforms as transforms\n'), ((8683, 8712), 'torchvision.transforms.Resize', 'transforms.Resize', (['(299, 299)'], {}), '((299, 299))\n', (8700, 8712), True, 'import torchvision.transforms as transforms\n'), ((8758, 8779), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (8777, 8779), True, 'import torchvision.transforms as transforms\n'), ((8891, 8912), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (8910, 8912), True, 'import torchvision.transforms as transforms\n'), ((9432, 9457), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (9455, 9457), False, 'import torch\n'), ((13350, 13376), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (13366, 13376), False, 'import os\n'), ((17075, 17397), 'utils.provider.write_metric_results_to_file', 'write_metric_results_to_file', (['wandb.run.dir', 'accuracies', 'kappa_scores', 'weighted_kappa_scores', 'sensitivities', 'specificities', 'macro_precisions', 'macro_recalls', 'macro_f1s', 'class_precisions', 'class_recalls', 'class_f1s', 'accuracies_r', 'kappa_scores_r', 'sensitivities_r', 'specificities_r', 'precisions_r', 'recalls_r', 'f1s_r'], {}), '(wandb.run.dir, accuracies, kappa_scores,\n weighted_kappa_scores, sensitivities, specificities, macro_precisions,\n macro_recalls, macro_f1s, class_precisions, class_recalls, class_f1s,\n accuracies_r, kappa_scores_r, sensitivities_r, specificities_r,\n precisions_r, recalls_r, f1s_r)\n', (17103, 17397), False, 'from utils.provider import get_test_results_regression, get_regression_accuracy_with_boundaries, setup_reproducability, get_dataset_mean_and_std, write_metric_results_to_file, get_batch_size_for_model\n')] |
import random
import sys
from PIL import Image, ImageDraw, ImageFont
import numpy as np
FONT_PATH = "/mnt/c/Windows/Fonts/"
fonts = []
fonts.append("NIS_R10N.ttc")
fonts.append("NIS_R10N.ttc")
fonts.append("NIS_SAI8N.ttc")
num_images = 10000
#num_images = 100
images = np.empty((num_images, 28, 28), dtype=np.uint8)
KANAS = "あいうえおかきくけこ"
chars = [ord(c) for c in list(KANAS)]
for i in range(num_images):
img = Image.new('L', (28, 28))
draw = ImageDraw.Draw(img)
f = FONT_PATH+random.choice(fonts)
draw.font = ImageFont.truetype(f, 26)
theta = random.randint(-5, 5)
kana = chr(random.choice(chars))
x = random.randint(0, 2)
y = random.randint(0, 2)
draw.text((x, y), kana, (255))
img = img.rotate(theta)
nim = np.array(img)
nim = nim.reshape((28, 28))
images[i:] = nim
sys.stdout.write('\r>> Generating image %d/%d' % (i + 1, num_images))
sys.stdout.flush()
if i < 100:
filename = "test%04d.png" % i
img.save(filename)
np.save("hiragana", images)
print(images.shape)
| [
"random.choice",
"PIL.Image.new",
"PIL.ImageFont.truetype",
"numpy.array",
"PIL.ImageDraw.Draw",
"numpy.empty",
"sys.stdout.flush",
"random.randint",
"numpy.save",
"sys.stdout.write"
] | [((273, 319), 'numpy.empty', 'np.empty', (['(num_images, 28, 28)'], {'dtype': 'np.uint8'}), '((num_images, 28, 28), dtype=np.uint8)\n', (281, 319), True, 'import numpy as np\n'), ((1004, 1031), 'numpy.save', 'np.save', (['"""hiragana"""', 'images'], {}), "('hiragana', images)\n", (1011, 1031), True, 'import numpy as np\n'), ((419, 443), 'PIL.Image.new', 'Image.new', (['"""L"""', '(28, 28)'], {}), "('L', (28, 28))\n", (428, 443), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((455, 474), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (469, 474), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((530, 555), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['f', '(26)'], {}), '(f, 26)\n', (548, 555), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((568, 589), 'random.randint', 'random.randint', (['(-5)', '(5)'], {}), '(-5, 5)\n', (582, 589), False, 'import random\n'), ((635, 655), 'random.randint', 'random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (649, 655), False, 'import random\n'), ((664, 684), 'random.randint', 'random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (678, 684), False, 'import random\n'), ((758, 771), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (766, 771), True, 'import numpy as np\n'), ((829, 898), 'sys.stdout.write', 'sys.stdout.write', (["('\\r>> Generating image %d/%d' % (i + 1, num_images))"], {}), "('\\r>> Generating image %d/%d' % (i + 1, num_images))\n", (845, 898), False, 'import sys\n'), ((903, 921), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (919, 921), False, 'import sys\n'), ((493, 513), 'random.choice', 'random.choice', (['fonts'], {}), '(fonts)\n', (506, 513), False, 'import random\n'), ((605, 625), 'random.choice', 'random.choice', (['chars'], {}), '(chars)\n', (618, 625), False, 'import random\n')] |
#!/usr/bin/env python
# Copyright (C) 2015 by
# <NAME> <<EMAIL>>
# All rights reserved.
# BSD license.
import itertools
import numpy as np
import networkx as nx
from typedecorator import params, returns
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__all__ = ['nsim_hs03']
@params(G1=nx.DiGraph, G2=nx.DiGraph, max_iter=int, eps=float)
def nsim_hs03(G1, G2, max_iter=100, eps=1e-4):
"""
References
----------
[1] <NAME>., <NAME>. Deriving phylogenetic trees from the similarity
analysis of metabolic pathways. Bioinformatics, 2003
"""
N = len(G1.nodes())
M = len(G2.nodes())
A = nx.adjacency_matrix(G1).todense()
B = nx.adjacency_matrix(G2).todense()
Ia = np.ones((N, N))
Ib = np.ones((M, M))
nsim_prev = np.zeros((M, N))
nsim = np.ones((M, N))
for i in range(max_iter):
if np.allclose(nsim, nsim_prev, atol=eps):
break
nsim_prev = np.copy(nsim)
nsim = \
np.dot(np.dot(B, nsim_prev), A.T) + \
np.dot(np.dot(B.T, nsim_prev), A) + \
np.dot(np.dot((Ib-B), nsim_prev), (Ia-A).T) + \
np.dot(np.dot((Ib-B).T, nsim_prev), (Ia-A)) - \
np.dot(np.dot(B, nsim_prev), (Ia-A).T) - \
np.dot(np.dot(B.T, nsim_prev), (Ia-A)) - \
np.dot(np.dot((Ib-B), nsim_prev), A.T) - \
np.dot(np.dot((Ib-B).T, nsim_prev), A)
fnorm = np.linalg.norm(nsim, ord='fro')
nsim = nsim / fnorm
print("Converge after %d iterations (eps=%f)." % (i, eps))
return nsim.T
if __name__ == '__main__':
# Example of Fig. 1.2 in paper BVD04.
G1 = nx.DiGraph()
G1.add_edges_from([(1,2), (2,1), (1,3), (4,1), (2,3), (3,2), (4,3)])
G2 = nx.DiGraph()
G2.add_edges_from([(1,4), (1,3), (3,1), (6,1), (6,4), (6,3), (3,6), (2,4), (2,6), (3,5)])
nsim = nsim_hs03(G1, G2)
print(nsim)
| [
"numpy.copy",
"numpy.allclose",
"numpy.ones",
"networkx.adjacency_matrix",
"networkx.DiGraph",
"numpy.zeros",
"numpy.dot",
"numpy.linalg.norm",
"typedecorator.params"
] | [((277, 338), 'typedecorator.params', 'params', ([], {'G1': 'nx.DiGraph', 'G2': 'nx.DiGraph', 'max_iter': 'int', 'eps': 'float'}), '(G1=nx.DiGraph, G2=nx.DiGraph, max_iter=int, eps=float)\n', (283, 338), False, 'from typedecorator import params, returns\n'), ((707, 722), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (714, 722), True, 'import numpy as np\n'), ((732, 747), 'numpy.ones', 'np.ones', (['(M, M)'], {}), '((M, M))\n', (739, 747), True, 'import numpy as np\n'), ((765, 781), 'numpy.zeros', 'np.zeros', (['(M, N)'], {}), '((M, N))\n', (773, 781), True, 'import numpy as np\n'), ((793, 808), 'numpy.ones', 'np.ones', (['(M, N)'], {}), '((M, N))\n', (800, 808), True, 'import numpy as np\n'), ((1636, 1648), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (1646, 1648), True, 'import networkx as nx\n'), ((1732, 1744), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (1742, 1744), True, 'import networkx as nx\n'), ((851, 889), 'numpy.allclose', 'np.allclose', (['nsim', 'nsim_prev'], {'atol': 'eps'}), '(nsim, nsim_prev, atol=eps)\n', (862, 889), True, 'import numpy as np\n'), ((930, 943), 'numpy.copy', 'np.copy', (['nsim'], {}), '(nsim)\n', (937, 943), True, 'import numpy as np\n'), ((1414, 1445), 'numpy.linalg.norm', 'np.linalg.norm', (['nsim'], {'ord': '"""fro"""'}), "(nsim, ord='fro')\n", (1428, 1445), True, 'import numpy as np\n'), ((622, 645), 'networkx.adjacency_matrix', 'nx.adjacency_matrix', (['G1'], {}), '(G1)\n', (641, 645), True, 'import networkx as nx\n'), ((664, 687), 'networkx.adjacency_matrix', 'nx.adjacency_matrix', (['G2'], {}), '(G2)\n', (683, 687), True, 'import networkx as nx\n'), ((1365, 1394), 'numpy.dot', 'np.dot', (['(Ib - B).T', 'nsim_prev'], {}), '((Ib - B).T, nsim_prev)\n', (1371, 1394), True, 'import numpy as np\n'), ((1310, 1335), 'numpy.dot', 'np.dot', (['(Ib - B)', 'nsim_prev'], {}), '(Ib - B, nsim_prev)\n', (1316, 1335), True, 'import numpy as np\n'), ((1255, 1277), 'numpy.dot', 'np.dot', (['B.T', 'nsim_prev'], {}), '(B.T, nsim_prev)\n', (1261, 1277), True, 'import numpy as np\n'), ((1200, 1220), 'numpy.dot', 'np.dot', (['B', 'nsim_prev'], {}), '(B, nsim_prev)\n', (1206, 1220), True, 'import numpy as np\n'), ((1140, 1169), 'numpy.dot', 'np.dot', (['(Ib - B).T', 'nsim_prev'], {}), '((Ib - B).T, nsim_prev)\n', (1146, 1169), True, 'import numpy as np\n'), ((1080, 1105), 'numpy.dot', 'np.dot', (['(Ib - B)', 'nsim_prev'], {}), '(Ib - B, nsim_prev)\n', (1086, 1105), True, 'import numpy as np\n'), ((980, 1000), 'numpy.dot', 'np.dot', (['B', 'nsim_prev'], {}), '(B, nsim_prev)\n', (986, 1000), True, 'import numpy as np\n'), ((1030, 1052), 'numpy.dot', 'np.dot', (['B.T', 'nsim_prev'], {}), '(B.T, nsim_prev)\n', (1036, 1052), True, 'import numpy as np\n')] |
"""Handling of GNSS broadcast orbits
Description:
------------
The module includes a class for handling apriori GNSS broadcast orbits.
Example:
from where import apriori
# Get broadcast ephemeris object
brdc = apriori.get('orbit', rundate=rundate, station=station, , system=system, apriori_orbit='broadcast')
# Write calculated Dataset to file
brdc.dset.write()
"""
# Standard library imports
from datetime import timedelta
from typing import Dict, List, Union
# External library imports
import numpy as np
import pandas as pd
# Midgard imports
from midgard.collections import enums
from midgard.dev import plugins
from midgard.files import dependencies
from midgard.math.constant import constant
from midgard.math.unit import Unit
from midgard.parsers import rinex_nav
# Where imports
from where.data import dataset3 as dataset
from where import cleaners
from where.apriori import orbit
from where.lib import config
from where.lib import log
from where.lib import rotation
# Earth's gravitational constant
GM = {
"C": constant.get("GM", source="cgcs2000"),
"E": constant.get("GM", source="gtrf"),
"G": constant.get("GM", source="wgs84"),
"I": constant.get("GM", source="wgs84"),
"J": constant.get("GM", source="jgs"),
}
# Earth's rotation rate
OMEGA = {
"C": constant.get("omega", source="cgcs2000"),
"E": constant.get("omega", source="gtrf"),
"G": constant.get("omega", source="wgs84"),
"I": constant.get("omega", source="wgs84"),
"J": constant.get("omega", source="jgs"),
}
@plugins.register
class BroadcastOrbit(orbit.AprioriOrbit):
"""A class for representing apriori broadcast orbits
RINEX navigation files can be read and edited. In addition GNSS satellite position, velocities, satellite clock
correction and relativistic clock corrections can be calculated for given observation epochs based on broadcast
ephemeris.
Attributes:
dset (Dataset): Dataset object, which includes satellite position and velocity, satellite clock
correction and relativistic clock correction for each observation epoch
dset_edit (Dataset): Dataset object, which includes edited broadcast ephemeris navigation messages
dset_raw (Dataset): Dataset object, which includes broadcast ephemeris navigation messages read from RINEX
file
file_key (str): Key to the broadcast orbit file defined in files.conf file.
name (str): Apriori orbit name
system (tuple): GNSS system identifiers
Methods:
relativistic_clock_correction(): Determine relativistic clock correction due to orbit eccentricity
satellite_clock_correction(): Determine satellite clock correction
unhealthy_satellites(): Get unhealthy satellites based on RINEX navigation file information
_calculate(): Calculate broadcast ephemeris and satellite clock correction for given observation
epochs
_edit(): Edit RINEX navigation file data and save it in a Dataset
_read(): Read RINEX navigation file data and save it in a Dataset
"""
name = "broadcast"
def __init__(self, rundate, system, station=None, file_key=None, file_path=None, day_offset=1):
"""Set up a new BroadcastOrbit object, does not parse any data
TODO: Remove dependency on rundate, use time to read correct files. (What to do with dataset?)
Args:
rundate (date): Date of model run.
station (str): 4 character station identifier.
system (tuple): List with GNSS system string codes (G, E, R, etc.).
file_key (str): Key to the broadcast orbit file defined in files.conf file.
file_path (pathlib.PosixPath): File path to broadcast orbit file.
day_offset (int): Day offset used to calculate the number of days to read.
"""
super().__init__(rundate=rundate)
self.system = system
self.file_key = "gnss_rinex_nav_{system}" if file_key is None else file_key
self.file_path = file_path
self.day_offset = day_offset
# TODO hjegei: Should it be not enough to 'station' in _dset_raw?
self._dset_raw.vars["station"] = station.lower()
self._dset_raw.vars["STATION"] = self._dset_raw.vars["station"].upper()
self._dset_edit.vars["station"] = station.lower()
self._dset_edit.vars["STATION"] = self._dset_raw.vars["station"].upper()
def _read(self, dset_raw):
"""Read RINEX navigation file data and save it in a Dataset
Note that beside the given day also the navigation files from the day before and the day after is read.
One RINEX navigation file is normally written for each GNSS. The navigation file extension depends on the GNSS
(GPS: *.n, Galileo: *.l, GLONASS: *.g, ...). Therefore we have defined for each GNSS the navigation file
name in the Where configuration file `files.conf`. In addition mixed navigation files exists, which includes
navigation messages of different GNSS. We use following file keys in `files.conf`:
========= ==================
System File key
========= ==================
Galileo gnss_rinex_nav_E
GLONASS gnss_rinex_nav_R
GPS gnss_rinex_nav_G
Mixed gnss_rinex_nav_M
========= ==================
Depending on the configuration options `systems` and `use_mixed_brdc_file` following navigation files are read:
====================== ================== =======================================
Option File key What kind of navigation file is read?
====================== ================== =======================================
systems = G gnss_rinex_nav_G Only the GPS navigation file
systems = G E gnss_rinex_nav_G GPS and Galileo navigation files
gnss_rinex_nav_E
use_mixed_brdc_file gnss_rinex_nav_M Mixed GNSS navigation file
====================== ================== =======================================
Args:
dset_raw (Dataset): Dataset representing raw data from RINEX navigation file
"""
date_to_read = dset_raw.analysis["rundate"] - timedelta(days=self.day_offset)
use_mixed_brdc_file = config.tech.get("use_mixed_brdc_file", default=False).bool
systems = {"M"} if use_mixed_brdc_file == True else self.system
file_paths = list()
# Loop over days to read
while date_to_read <= dset_raw.analysis["rundate"] + timedelta(days=self.day_offset):
date = date_to_read.strftime("%Y-%m-%d")
meta = dict()
for sys in systems:
if self.file_path is None:
file_path = config.files.path(
self.file_key.format(system=sys), file_vars=config.date_vars(date_to_read)
)
else:
file_path = self.file_path
log.debug(f"Parse broadcast orbit file {file_path}")
# Generate temporary Dataset with orbit file data
dset_temp = dataset.Dataset(
rundate=date_to_read, pipeline=dset_raw.vars["pipeline"], stage="temporary"
)
parser = rinex_nav.get_rinex2_or_rinex3(file_path)
dset_temp.update_from(parser.as_dataset())
file_paths.append(str(parser.file_path))
dependencies.add(str(parser.file_path), label=self.file_key.format(system=sys)) # Used for output writing
# Extend Dataset dset_raw with temporary Dataset
if dset_raw.num_obs:
# Merge meta data information
# TODO: Handle meta data information correctly. Meta data information based on different GNSS navigation
# message files has to be merged correctly together. What to do with 'sat_sys' and 'leap_seconds'?
if date in dict(dset_raw.meta).keys():
for key in ["iono_para", "time_sys_corr"]:
dset_temp.meta[key].update(dset_raw.meta[date][key])
dset_raw.extend(dset_temp, meta_key=date)
else:
# Add date key to meta TODO: Better solution?
meta.setdefault(date, dict()).update(dset_temp.meta)
for k in dict(dset_temp["meta"]).keys():
del dset_temp.meta[k]
dset_temp.meta.update(meta)
dset_raw.update_from(dset_temp)
date_to_read += timedelta(days=1)
dset_raw.meta.add("file_path", file_paths, section="parser")
if "E" in dset_raw.unique("system"):
self._galileo_signal_health_status()
def _edit(self, dset_edit: "Dataset") -> "Dataset":
"""Edit RINEX navigation file data and save it in a Dataset
First the navigation messages are sorted after the satellite and time of transmission. Afterwards duplicated
navigation messages for a satellite are removed, whereby the first occurrence of the navigation message is
kept. The satellite navigation epoch (time of clock (toc)) and the IODE navigation number is used for
indication of a duplicated epoch. The meaning of the IODE navigation number depends on the GNSS:
- GPS: Ephemeris issue of data (IODE)
- Galileo: Issue of Data of the NAV batch (IODnav)
- QZSS: Ephemeris issue of data (IODE)
- BeiDou: Age of Data Ephemeris (AODE)
- IRNSS: Issue of Data, Ephemeris and Clock (IODEC)
It can be defined with the configuration option 'navigation_message_type', what kind of navigation message type
should be used in the analysis. Other navigation message types are removed from the broadcast ephemeris. For
example for Galileo INAV or FNAV navigation messages can be chosen.
Args:
dset_edit (Dataset): Dataset representing edited data from RINEX navigation file
"""
# TODO: This does not work. -> Can not find remover ignore_duplicated_navigation_messages().
# cleaners.removers.ignore_duplicated_navigation_messages(dset_edit) #MURKS
# Generate Pandas frame for all navigation message entries
nav = self.dset_raw.as_dataframe()
# Filter frame
nav_filtered = nav.sort_values(by=["satellite", "time", "transmission_time"])
# Remove duplicated navigation message entries (IODEs)
# TODO: Maybe it should be a configuration option, how to filter duplicated epochs. Keep first and last.
idx = nav_filtered.duplicated(
subset=["satellite", "time", "iode", "nav_type", "transmission_time"], keep="first"
)
nav_duplicates = nav_filtered[["satellite", "time", "iode", "nav_type"]][idx]
with pd.option_context("display.max_rows", None, "display.max_columns", 5):
log.info(f"Remove {len(nav_duplicates)} duplicated navigation message entries.")
log.debug(f"List of duplicated navigation messages: \n{nav_duplicates}")
nav_filtered = nav_filtered.drop_duplicates(
subset=["satellite", "time", "iode", "nav_type", "transmission_time"], keep="first"
)
# Remove navigation message types, which are not needed
nav_type = config.tech.get("navigation_message_type", default="").dict
if nav_type:
for sys, type_ in nav_type.items():
sys = sys[0] if len(sys) > 0 else sys
# Overwrite message type definition if only 'INAV' or 'FNAV' is specified
if type_ == "INAV":
type_ = ["INAV_E1", "INAV_E5b", "INAV_E1E5b"]
elif type_ == "FNAV":
type_ = ["FNAV_E5a"]
remove_nav_type = nav_filtered.query("system == @sys and nav_type != @type_")
if not remove_nav_type.empty:
log.info(
f"Remove {', '.join(set(remove_nav_type.nav_type))!r} navigation messages of GNSS {sys!r}"
)
nav_filtered = pd.concat([nav_filtered, remove_nav_type]).drop_duplicates(keep=False)
if nav_filtered.empty:
log.fatal(f"No navigation messages available for GNSS: {','.join(self.dset_raw.unique('system'))} ")
# TODO hjegei: Possible future ...
# dset_edit.copy_from(self.dset_raw)
# dset_edit.reorder(nav_filtered.index.values)
# Convert edited fields to Dataset
nav_np = nav_filtered.values
fields = nav_filtered.columns
dset_edit.vars["orbit"] = self.name
dset_edit.num_obs = nav_filtered.shape[0]
dset_edit.meta.update(self.dset_raw.meta)
for idx, field in enumerate(fields):
if field.startswith("time_") or field.startswith("transmission_time_") or field.startswith("toe_"):
continue # Skip unnecessary fields
elif field in ["time", "transmission_time", "toe"]:
dset_edit.add_time(field, val=nav_np[:, idx], scale="gps", fmt="datetime")
elif field in ["nav_type", "satellite", "system"]:
dset_edit.add_text(field, val=nav_np[:, idx])
else:
unit = self.dset_raw.unit(field)[0] if self.dset_raw.unit(field) else None
dset_edit.add_float(field, val=nav_np[:, idx].astype(float), unit=unit)
def _calculate(self, dset_out: "Dataset", dset_in: "Dataset", time: str = "time") -> None:
"""Calculate broadcast ephemeris and satellite clock correction for given observation epochs
As a first step observations are removed from unavailable satellites, unhealthy satellites and for exceeding the
validity length of navigation records. The input Dataset contains observation epochs for which the broadcast
ephemeris and satellite clock correction should be determined.
Args:
dset_out: Output Dataset representing calculated broadcast ephemeris with following fields:
======================== =============== ======= ========================================================
Field Type Unit Description
======================== =============== ======= ========================================================
gnss_satellite_clock numpy.ndarray m Satellite clock correction
gnss_relativistic_clock numpy.ndarray m Relativistic clock correction due to orbit eccentricity
sat_posvel PosVelTable m Satellite position and velocity
satellite numpy.ndarray Satellite numbers
system numpy.ndarray GNSS identifiers
time TimeTable Observation epochs
used_iode numpy.ndarray IODE of selected broadcast ephemeris block
used_transmission_time TimeTable Transmission time of selected broadcast ephemeris block
used_toe TimeTable Time of ephemeris (TOE) of selected broadcast ephemeris
block
======================= =============== ======= ========================================================
dset_in: Input Dataset containing model data for which broadcast ephemeris should be determined.
time: Define time fields to be used. It can be for example 'time' or 'sat_time'. 'time' is related to
observation time and 'sat_time' to satellite transmission time.
"""
# Clean orbits by removing unavailable satellites, unhealthy satellites and checking validity length of
# navigation records
cleaners.apply_remover("gnss_clean_orbit", dset_in, orbit_flag="broadcast")
not_implemented_sys = set(dset_in.system) - set("EG")
if not_implemented_sys:
log.warn(
f"At the moment Where can provide broadcast ephemeris for GNSS 'E' and 'G', "
f"but not for {', '.join(not_implemented_sys)}."
)
cleaners.apply_remover("gnss_ignore_system", dset_in, systems=not_implemented_sys)
log.info(
f"Calculating satellite position/velocity (broadcast) based on RINEX navigation file "
f"{', '.join(self.dset_edit.meta['parser']['file_path'])}"
)
# Get correct navigation block for given observations times by determining the indices to broadcast ephemeris
# Dataset
dset_brdc_idx = self._get_brdc_block_idx(dset_in, time=time)
# Loop over all observations
# TODO: Generation of vectorized solution, if possible?
# BUG: Use of GPSSEC does not work for GPS WEEK crossovers. MJD * Unit.day2second() would a better solution.
# The problem is that use of GPSSEC compared to MJD * Unit.day2second() is not consistent!!!!
sat_pos = np.zeros((dset_in.num_obs, 3))
sat_vel = np.zeros((dset_in.num_obs, 3))
for obs_idx, (time_gpsweek, time_gpssec, brdc_idx, sys) in enumerate(
# zip(dset_in[time].gps.gpsweek, dset_in[time].gps.gpssec, dset_brdc_idx, dset_in.system)
zip(dset_in[time].gps.gps_ws.week, dset_in[time].gps.gps_ws.seconds, dset_brdc_idx, dset_in.system)
):
# TODO: get_row() function needed for brdc -> brdc.get_row(kk)
sat_pos[obs_idx], sat_vel[obs_idx] = self._get_satellite_position_velocity(
time_gpsweek, time_gpssec, brdc_idx, sys
)
# +DEBUG
# print("DEBUG: {} obs_idx: {:>5d} brdc_idx: {:>5d} toc: {:>5.0f} {:>6.0f} toe: {:>6.0f} trans_time: {:>6.0f}"
# " tk: {:>16.10f} iode: {:>3d} sqrt_a: {:>17.10f} sat_pos: {:>21.10f} {:>21.10f} {:>21.10f} "
# "sat_vel: {:>17.10f} {:>17.10f} {:>17.10f} sat_clk_bias: {:>17.10f}, sat_clk_drft: {:>17.10f} "
# ''.format(self.dset_edit.satellite[brdc_idx], obs_idx, brdc_idx,
# dset_in[time].gps.jd_frac[obs_idx] * 86400,
# dset_in[time].gps.gps_ws.seconds[obs_idx],
# self.dset_edit.toe.gps.gps_ws.seconds[brdc_idx],
# self.dset_edit.transmission_time.gps.gps_ws.seconds[brdc_idx],
# dset_in[time].gps.jd_frac[obs_idx]-self.dset_edit.toe.gps.gps_ws.seconds[brdc_idx],
# int(self.dset_edit.iode[brdc_idx]),
# self.dset_edit.sqrt_a[brdc_idx],
# sat_pos[obs_idx][0], sat_pos[obs_idx][1], sat_pos[obs_idx][2],
# sat_vel[obs_idx][0], sat_vel[obs_idx][1], sat_vel[obs_idx][2],
# self.dset_edit.sat_clock_bias[brdc_idx],
# self.dset_edit.sat_clock_drift[brdc_idx],)
# )
# -DEBUG
# Copy fields from model data Dataset
dset_out.num_obs = dset_in.num_obs
dset_out.add_text("satellite", val=dset_in.satellite)
dset_out.add_text("system", val=dset_in.system)
dset_out.add_time("time", val=dset_in[time])
dset_out.vars["orbit"] = self.name
# Add time field
# MURKS, TODO: How it works to initialize time field with a Time object?
dset_out.add_time("used_transmission_time", val=self.dset_edit.transmission_time[dset_brdc_idx])
dset_out.add_time("used_toe", val=self.dset_edit.toe[dset_brdc_idx])
# Add float fields
for field in ["bgd_e1_e5a", "bgd_e1_e5b", "tgd", "tgd_b1_b3", "tgd_b2_b3"]:
if field in self.dset_edit.fields:
dset_out.add_float(field, val=self.dset_edit[field][dset_brdc_idx])
dset_out.add_float(
"gnss_relativistic_clock", val=self.relativistic_clock_correction(sat_pos, sat_vel), unit="meter"
)
dset_out.add_float(
"gnss_satellite_clock", val=self.satellite_clock_correction(dset_in, time=time), unit="meter"
)
dset_out.add_float("used_iode", val=self.dset_edit.iode[dset_brdc_idx])
# Add satellite position and velocity to Dataset
dset_out.add_posvel("sat_posvel", time=dset_out.time, system="trs", val=np.hstack((sat_pos, sat_vel)))
# +DEBUG
# for num_obs in range(0, dset_out.num_obs):
# print('DEBUG: ', dset_out.satellite[num_obs],
# dset_out.time.gps.datetime[num_obs],
# dset_out.time.gps.mjd_frac[num_obs]*24*3600,
# dset_out.gnss_satellite_clock[num_obs],
# dset_out.gnss_relativistic_clock[num_obs])
# -DEBUG
def get_ephemeris_field(self, field, dset: "Dataset", time: str = "time") -> np.ndarray:
"""Get given ephemeris field values for belonging navigation records
Args:
field: Field name of navigation record.
dset: A Dataset containing model data.
time: Define time fields to be used. It can be for example 'time' or 'sat_time'. 'time' is related to
observation time and 'sat_time' to satellite transmission time.
Returns:
Values for given field
"""
# Get correct navigation block for given observations times by determining the indices to broadcast ephemeris
# Dataset
dset_brdc_idx = self._get_brdc_block_idx(dset)
return self.dset_edit[field][dset_brdc_idx]
def satellite_clock_correction(self, dset: "Dataset", time: str = "time") -> np.ndarray:
"""Determine satellite clock correction
The satellite clock correction is based on Section 20.3.3.3.3.1 in :cite:`is-gps-200h`.
Args:
dset: A Dataset containing model data.
time: Define time fields to be used. It can be for example 'time' or 'sat_time'. 'time' is related to
observation time and 'sat_time' to satellite transmission time.
Returns:
GNSS satellite clock corrections for each observation in [m] (Note: without relativistic orbit eccentricity
correction)
"""
# Get correct navigation block for given observations times by determining the indices to broadcast ephemeris
# Dataset
dset_brdc_idx = self._get_brdc_block_idx(dset)
# Elapsed time referred to clock data reference epoch toc in [s]
# BUG: Use of GPSSEC does not work for GPS WEEK crossovers. MJD * Unit.day2second() would a better solution. The
# problem is that use of GPSSEC compared to MJD * Unit.day2second() is not consistent!!!!
gpsweek_diff = (
dset[time].gps.gps_ws.week - self._add_dim(self.dset_edit.time.gps.gps_ws.week)[dset_brdc_idx]
) * Unit.week2second
tk = dset[time].gps.gps_ws.seconds - self._add_dim(self.dset_edit.time.gps.gps_ws.seconds)[dset_brdc_idx] + gpsweek_diff
return (
self.dset_edit.sat_clock_bias[dset_brdc_idx]
+ self.dset_edit.sat_clock_drift[dset_brdc_idx] * tk
+ self.dset_edit.sat_clock_drift_rate[dset_brdc_idx] * tk ** 2
) * constant.c
def satellite_clock_rate_correction(self, dset: "Dataset", time: str = "time") -> np.ndarray:
"""Determine satellite clock rate correction
The satellite clock rate correction is based on Section 6.2.6.2 in :cite:`zhang2007`.
Args:
dset: A Dataset containing model data.
time: Define time fields to be used. It can be for example 'time' or 'sat_time'. 'time' is related to
observation time and 'sat_time' to satellite transmission time.
Returns:
GNSS satellite clock rate corrections for each observation in [m]
"""
# Get correct navigation block for given observations times by determining the indices to broadcast ephemeris
# Dataset
dset_brdc_idx = self._get_brdc_block_idx(dset)
# Elapsed time referred to clock data reference epoch toc in [s]
# BUG: Use of GPSSEC does not work for GPS WEEK crossovers. MJD * Unit.day2second() would a better solution. The
# problem is that use of GPSSEC compared to MJD * Unit.day2second() is not consistent!!!!
gpsweek_diff = (
dset[time].gps.gps_ws.week - self.dset_edit.time.gps.gps_ws.week[dset_brdc_idx]
) * Unit.week2second
tk = dset[time].gps.gps_ws.seconds - self.dset_edit.time.gps.gps_ws.seconds[dset_brdc_idx] + gpsweek_diff
return (
self.dset_edit.sat_clock_drift[dset_brdc_idx]
+ 2 * self.dset_edit.sat_clock_drift_rate[dset_brdc_idx] * tk
) * constant.c
def add_satellite_clock_parameter(self, dset: "Dataset", time: str = "time") -> np.ndarray:
"""Add satellite clock parameters to dataset
Args:
dset: A Dataset containing model data.
time: Define time fields to be used. It can be for example 'time' or 'sat_time'. 'time' is related to
observation time and 'sat_time' to satellite transmission time.
"""
# Get correct navigation block for given observations times by determining the indices to broadcast ephemeris
# Dataset
dset_brdc_idx = self._get_brdc_block_idx(dset, time=time)
# Add satellite clock parameters to dataset
dset.add_float("sat_clock_bias", val=self.dset_edit.sat_clock_bias[dset_brdc_idx], write_level="analysis")
dset.add_float("sat_clock_drift", val=self.dset_edit.sat_clock_drift[dset_brdc_idx], write_level="analysis")
dset.add_float(
"sat_clock_drift_rate",
val=self.dset_edit.sat_clock_drift_rate[dset_brdc_idx],
write_level="analysis",
)
def _galileo_signal_health_status(self):
"""Determine Galileo signal health status and add sis_status_<signal> fields to Dataset
The determination of the signal health status is defined for Galileo in Section 2.3 of :cite:`galileo-os-sdd`.
The signal health status is indicated by different numbers:
| Number | Status | Description |
|--------|-----------------|-----------------------------------------------------------|
| 0 | Healthy | |
| 1 | Marginal (SISA) | Marginal status based on signal-in-space accuracy (SISA). |
| 2 | Marginal (DVS) | Marginal status based on data validity status. |
| 3 | Unhealthy | |
Args:
dset: A Dataset containing model data.
"""
data = dict()
idx = self.dset_raw.filter(system="E")
for signal in ["e1", "e5a", "e5b"]:
field = "sis_status_" + signal
data = np.zeros(self.dset_raw.num_obs)
data_tmp = np.zeros(len(self.dset_raw.system[idx]))
# Check Signal-in-space Accuracy (SISA)
idx_obs = np.logical_or(self.dset_raw.sv_accuracy[idx] < 0, self.dset_raw.sv_accuracy[idx] >= 126)
data_tmp[idx_obs] = 1
# Check Data Validity Status (DVS)
idx_obs = self.dset_raw["dvs_" + signal][idx] == True
data_tmp[idx_obs] = 2
# Check Signal Health Status (SHS)
idx_obs = self.dset_raw["shs_" + signal][idx] == True
data_tmp[idx_obs] = 3
data[idx] = data_tmp
self.dset_raw.add_float(field, val=data)
def signal_health_status(self, dset: "Dataset", frequencies: Union[None, List] = None) -> np.ndarray:
"""Determine signal health status
20.3.3.5.1.3
How the signal health status has to be handled depends on GNSS, used observation type and navigation message
type. The determination of the signal health status is defined for:
| System | Type | Section | Reference |
|----------|-------|---------------|------------------------------------|
| GPS | LNAV | 20.3.3.3.1.4 | :cite:`is-gps-200h` |
| | CNAV | 30.3.3.1.1.2 | :cite:`is-gps-200h` |
| Galileo | all | 2.3 | :cite:`galileo-os-sdd` |
The signal health status is indicated by different numbers:
| Number | Status | Description |
|--------|-----------------|---------------------------------------------------------------------------------|
| 0 | Healthy | |
| 1 | Marginal (SISA) | Defined only for Galileo and describes marginal status based on signal-in-space |
| | | accuracy (SISA) |
| 2 | Marginal (DVS) | Defined only for Galileo and describes marginal status based on data validity |
| | | status. |
| 3 | Unhealthy | |
TODO: Add handling of BeiDou, QZSS and IRNSS signal health status.
Args:
dset: A Dataset containing model data.
Returns:
Array with GNSS signal health status for each observation
"""
signal_health_status = np.zeros(dset.num_obs, dtype=bool)
nav_type = config.tech.get("navigation_message_type", default="").dict
# Get correct navigation block for given observations times by determining the indices to broadcast ephemeris
# Dataset
dset_brdc_idx = self._get_brdc_block_idx(dset)
for sys in dset.unique("system"):
idx = dset.filter(system=sys)
data_tmp = np.zeros(len(dset.system[idx]), dtype=bool)
# TODO: If several editors are used can it happen, that GNSS is not given in dset.meta["obstypes"], but
# observations still existing.
if "obstypes" in dset.meta.keys():
if sys not in dset.meta["obstypes"].keys():
continue
if sys in ["C", "G", "I", "J", "R"]:
idx_obs = self.dset_edit.sv_health[dset_brdc_idx][idx] > 0
data_tmp[idx_obs] = 3
elif sys == "E":
# Get frequencies by keeping a selection of E1 (1), E5a (5) and E5b (7) frequencies
# TODO: It has to be checked, if following selection of frequencies is correct?
if frequencies is None:
if nav_type["E"] == "FNAV":
frequencies = ["E5a"]
elif nav_type["E"] == "INAV":
keep_freq = {"1", "7"}
if dset.vars["pipeline"] == "sisre":
frequencies = config.tech.frequencies.dict["E"].split("_")
elif dset.vars["pipeline"] == "gnss":
freq_numbers = (
set([t[1] for t in dset.meta["obstypes"]["E"]]) & keep_freq
) # 1: E1, 5: E5a, 7: E5b
frequencies = [enums.gnss_num2freq_E["f" + num] for num in freq_numbers]
else:
frequencies = ["E1", "E5b"]
for freq in frequencies:
data_tmp = self.dset_edit["sis_status_" + freq.lower()][dset_brdc_idx][idx]
else:
log.fatal(f"GNSS '{sys}' is not defined.")
signal_health_status[idx] = data_tmp
# +DEBUG
## Add signal health status to dataset
# if "signal_health_status" in dset.fields:
# dset.signal_health_status[:] = signal_health_status
# else:
# dset.add_float("signal_health_status", val=signal_health_status, write_level="analysis"))
# -DEBUG
return signal_health_status
def total_group_delay(self, dset: "Dataset") -> np.ndarray:
"""Determine total group delay
What kind of total group delay has to be applied depends on GNSS, used observation type and navigation message
type. The determination of the total group delay is defined for GPS in Section 20.3.3.3.3.2 in
:cite:`is-gps-200h` and for Galileo in Section 5.1.5 in :cite:`galileo-os-sis-icd`.
TODO: Add handling of BeiDou, QZSS and IRNSS TGDs.
Args:
dset: A Dataset containing model data.
Returns:
GNSS total group delay for each observation in [m]
"""
# Get correct navigation block for given observations times by determining the indices to broadcast ephemeris
# Dataset
dset_brdc_idx = self._get_brdc_block_idx(dset)
for sys, obstype in dset.meta["obstypes"].items():
idx = dset.filter(system=sys)
if len(obstype) != 1:
log.warn("Total group delay can only be applied for single-frequency solutions.")
return np.zeros(dset.num_obs)
freq = obstype[0][1]
if sys == "G":
f_L1 = enums.gnss_freq_G.L1
f_L2 = enums.gnss_freq_G.L2
if freq == "1":
tgd = self.dset_edit.tgd[dset_brdc_idx][idx] * constant.c
elif freq == "2":
tgd = f_L1 ** 2 / f_L2 ** 2 * self.dset_edit.tgd[dset_brdc_idx][idx] * constant.c
elif sys == "E":
nav_type = config.tech.navigation_message_type.dict["E"]
f_E1 = enums.gnss_freq_E.E1
f_E5a = enums.gnss_freq_E.E5a
f_E5b = enums.gnss_freq_E.E5b
if nav_type.startswith("FNAV"):
if freq == "1":
tgd = self.dset_edit.bgd_e1_e5a[dset_brdc_idx][idx] * constant.c
elif freq == "5":
tgd = f_E1 ** 2 / f_E5a ** 2 * self.dset_edit.bgd_e1_e5a[dset_brdc_idx][idx] * constant.c
elif nav_type.startswith("INAV"):
if freq == "1":
tgd = self.dset_edit.bgd_e1_e5b[dset_brdc_idx][idx] * constant.c
elif freq == "7":
tgd = f_E1 ** 2 / f_E5b ** 2 * self.dset_edit.bgd_e1_e5b[dset_brdc_idx][idx] * constant.c
return tgd
def _add_dim(self, array: np.ndarray) -> np.ndarray:
"""Add dimension to 0 dimensional array
If dimension of Numpy array is zero, then a dimension is added.
Args:
array: Numpy array with 0 or higher dimension
Returns:
Numpy array with at least 1 dimension
"""
if type(array) == np.ndarray: # Handling of numpy arrays
output = np.expand_dims(array, axis=0) if array.ndim == 0 else array
else: # Handling of scalar values
output = np.expand_dims(array, axis=0)
return output
def _get_brdc_block_idx(self, dset: "Dataset", time: str = "time") -> List[int]:
"""Get GNSS broadcast ephemeris block indices for given observation epochs
The indices relate the observation epoch to the correct set of broadcast ephemeris. First the time difference
between the observation epoch and a selected time is calculated to determine the correct broadcast ephemeris
block. The seleted time can be either the navigation epoch (time of clock (TOC)), the time of ephemeris (TOE)
or the transmission time. Afterwards the broastcast block is selected with the smallest time difference.
Following option can be choosen for configuration file option 'brdc_block_nearest_to':
============================== ================================================================================
Option Description
============================== ================================================================================
toc Broadcast block for given observation epoch is selected nearest to navigation
epoch (time of clock (TOC)).
toc:positive Same as 'toc' option, but the difference between observation epoch and TOC has
to be positive.
toe Broadcast block for given observation epoch is selected nearest to time of
ephemeris (TOE).
toe:positive Same as 'toe' option, but the difference between observation epoch and TOE has
to be positive.
transmission_time Broadcast block for given observation epoch is selected nearest to transmission
time.
transmission_time:positive Same as 'transmission_time' option, but the difference between observation
epoch and transmission time has to be positive.
============================= =================================================================================
Args:
dset: A Dataset containing model data.
time: Define time fields to be used. It can be for example 'time' or 'sat_time'. 'time' is related to
observation time and 'sat_time' to satellite transmission time.
Returns:
Broadcast ephemeris block indices for given observation epochs.
"""
brdc_block_nearest_to_options = [
"toc",
"toc:positive",
"toe",
"toe:positive",
"transmission_time",
"transmission_time:positive",
]
brdc_idx = list()
# Get configuration option
brdc_block_nearest_to = config.tech.get("brdc_block_nearest_to", default="toe:positive").str.rsplit(":", 1)
if ":".join(brdc_block_nearest_to) not in brdc_block_nearest_to_options:
log.fatal(
f"Unknown value {':'.join(brdc_block_nearest_to)!r} for configuration option 'brdc_block_nearest_to'. "
f"The following values can be selected: {', '.join(brdc_block_nearest_to_options)}"
)
time_key = brdc_block_nearest_to[0]
positive = True if "positive" in brdc_block_nearest_to else False
log.debug(f"Broadcast block is selected nearest to '{'+' if positive else '+/-'}{time_key}' time.")
# Time of clock (TOC) is 'time' field
if time_key == "toc":
time_key = "time"
# Check if broadcast orbits are available
not_available_sat = sorted(set(dset.satellite) - set(self.dset_edit.satellite))
if not_available_sat:
log.warn(
f"The following satellites are not given in apriori broadcast orbit file "
f"{', '.join(self.dset_edit.meta['parser']['file_path'])}: {', '.join(not_available_sat)}"
)
cleaners.apply_remover("ignore_satellite", dset, satellites=not_available_sat)
# Determine broadcast ephemeris block index for a given satellite and observation epoch
for sat, obs_epoch in zip(dset.satellite, self._add_dim(dset[time].gps.mjd)):
idx = self.dset_edit.filter(satellite=sat)
diff = obs_epoch - self._add_dim(self.dset_edit[time_key].gps.mjd)[idx]
if positive:
nearest_idx = np.array([99999 if v < 0 else v for v in diff]).argmin()
else:
nearest_idx = np.array([abs(diff)]).argmin()
brdc_idx.append(idx.nonzero()[0][nearest_idx])
return brdc_idx
def _get_corrected_broadcast_ephemeris(
self, t_sat_gpsweek: float, t_sat_gpssec: float, idx: int, sys: str
) -> Dict[str, float]:
"""Apply correction for broadcast ephemeris for given time tk
Following equations are based on Table 20-IV. in :cite:`is-gps-200h`.
Args:
t_sat_gpsweek (float): GPS week of satellite transmission time.
t_sat_gpssec (float): GPS seconds of satellite transmission.
idx (int): Index for broadcast ephemeris dataset valid for observation time of receiver
sys (str): GNSS identifier
Returns:
dict: Selected and prepared broadcast ephemeris dictionary with following entries:
=============== ====== =============================================================
Keys Unit Description
=============== ====== =============================================================
a m Semimajor axis
E rad Eccentric anomaly
i rad Inclination
lambda_ rad Instantaneous Greenwich longitude of the ascending node
n rad/s Corrected mean motion
r m Orbit radius
tk s Eclapsed time referred to ephemeris reference epoch
u rad Argument of latitude
vega rad True anomaly
=============== ====== =============================================================
"""
# BUG: Use of GPSSEC does not work for GPS WEEK crossovers. MJD * Unit.day2second() would a better solution. The
# problem is that use of GPSSEC compared to MJD * Unit.day2second() is not consistent!!!!
gpsweek_diff = (t_sat_gpsweek - self._add_dim(self.dset_edit.toe.gps.gps_ws.week)[idx]) * Unit.week2second
toe = self._add_dim(self.dset_edit.toe.gps.gps_ws.seconds)[idx] # Ephemeris reference epoch in [s]
tk = t_sat_gpssec - toe + gpsweek_diff # Eclapsed time referred to ephemeris reference epoch in [s]
# Determine corrected Keplerian elements
e = self.dset_edit.e[idx] # Eccentricity of the orbit
a = self.dset_edit.sqrt_a[idx] ** 2 # Semimajor axis in [m]
omega = self.dset_edit.omega[idx] # Argument of perigee in [rad]
n0 = np.sqrt(GM[sys] / a ** 3) # Mean motion of Keplerian orbit in [rad/s]
n = n0 + self.dset_edit.delta_n[idx] # Corrected mean motion in [rad/s]
M = self.dset_edit.m0[idx] + n * tk # Mean anomaly in [rad]
E = self._get_eccentric_anomaly(M, e) # Eccentric anomaly in [rad]
vega = np.arctan2(np.sqrt(1.0 - e ** 2) * np.sin(E), np.cos(E) - e) # True anomaly in [rad]
u0 = omega + vega # Initial argument of latitude
lambda_ = self.dset_edit.Omega[idx] + (self.dset_edit.Omega_dot[idx] - OMEGA[sys]) * tk - OMEGA[sys] * toe
# Instantaneous Greenwich longitude of the ascending node in [rad]
# Determine argument of latitude, orbit radius and inclination
sin2u0 = np.sin(2 * u0)
cos2u0 = np.cos(2 * u0)
du = self.dset_edit.cus[idx] * sin2u0 + self.dset_edit.cuc[idx] * cos2u0
dr = self.dset_edit.crs[idx] * sin2u0 + self.dset_edit.crc[idx] * cos2u0
di = self.dset_edit.cis[idx] * sin2u0 + self.dset_edit.cic[idx] * cos2u0
u = u0 + du # Argument of latitude in [rad]
r = a * (1.0 - e * np.cos(E)) + dr # Orbit radius in [m]
i = self.dset_edit.i0[idx] + self.dset_edit.idot[idx] * tk + di # Inclination in [rad]
return {"a": a, "E": E, "i": i, "lambda_": lambda_, "n": n, "r": r, "tk": tk, "u": u, "vega": vega}
@staticmethod
def _get_eccentric_anomaly(M, e):
r""" Computes the eccentric anomaly for elliptic orbits
Newton's method used for determination of eccentric anomaly E as shown in Eq. 2.42 in :cite:`montenbruck2012`.
Args:
M (float): Mean anomaly in [rad].
e (float): Eccentricity of the orbit in the interval [0, 1].
Returns:
Float: Eccentric anomaly in [rad].
Example:
>>> _get_eccentric_anomaly(0.00817235075205, 0.00223578442819)
0.0081906631043202408
"""
num_iter = 0
f = 1
max_iter = 10
limit = 10e-14 # TODO: Should limit be related to machine accuracy np.finfo(float)?
# For small eccentriciy (e < 0.8) is it enough to start with E = M. For high eccentric orbits (e > 0.8) the
# iteration should start with E = PI to avoid convergence problems during the iteration (see p. 24 in [1]).
if e < 0.8:
E = M
else:
E = np.pi
while np.fabs(f) > limit:
f = E - e * np.sin(E) - M
E = E - f / (1.0 - e * np.cos(E))
num_iter += 1
if num_iter == max_iter:
log.fatal(f"Convergence problem by determination of eccentric anomaly (max_iter = {max_iter})")
return E
def _get_satellite_position_vector(self, bdict):
"""Determine satellite position vector in Earth centered Earth fixed (ECEF) coordinate system.
Following equations are based on Table 20-IV. in :cite:`is-gps-200h`.
Args:
bdict (dict): Selected and prepared broadcast ephemeris dictionary with following entries
=============== ====== =============================================================
Keys Unit Description
=============== ====== =============================================================
a m Semimajor axis
e Eccentricity of the orbit
E rad Eccentric anomaly
i rad Inclination
lambda_ rad Instantaneous Greenwich longitude of the ascending node
n rad/s Corrected mean motion
r m Orbit radius
tk s Eclapsed time referred to ephemeris reference epoch
u rad Argument of latitude
vega rad True anomaly
=============== ====== =============================================================
Returns:
dict: Following entries are added to broadcast ephemeris dictionary
=============== ====== ==================================================================================
Keys Unit Description
=============== ====== ==================================================================================
r_ecef m 3-dimensional numpy array with satellite position in Earth centered Earth fixed (ECEF)
coordinate system
r_orb m 3-dimensional numpy array with satellite position in orbital coordinate system
=============== ====== ==================================================================================
"""
# TODO: What should be done for GEO satellites (e.g. see in gLAB function getPositionBRDC() in model.c)?
# Transformation from spherical to cartesian orbital coordinate system
r_orb = np.array([bdict["r"] * np.cos(bdict["u"]), bdict["r"] * np.sin(bdict["u"]), 0.0])
# Transformation from cartesian orbital to Earth centered Earth fixed (ECEF) geocentric equatorial coordinate
# system
r_ecef = rotation.R3(-bdict["lambda_"]).dot(rotation.R1(-bdict["i"])).dot(r_orb.T)
bdict.update({"r_ecef": r_ecef, "r_orb": r_orb})
def _get_satellite_position_velocity(self, t_sat_gpsweek, t_sat_gpssec, idx, sys):
"""Determine satellite position and velocity vector based on broadcast ephemeris
Position and velocity is determined for given observation epoch (satellite transmission time) in the Earth-
centered, Earth-fixed (ECEF) coordinate system.
Args:
t_sat_gpsweek (float): GPS week of satellite transmission time.
t_sat_gpssec (float): GPS seconds of satellite transmission.
idx (int): Index for broadcast ephemeris dataset valid for observation time of receiver.
sys (str): GNSS identifier
Returns:
tuple: with following elements
=============== ==================================================================================
Elements Description
=============== ==================================================================================
r_ecef Satellite position vector in ECEF coordinate system in [m]
v_ecef Satellite velocity vector in ECEF coordinate system in [m]
=============== ==================================================================================
"""
# TODO: get_row() from dataset based on idx
# Correct broadcast ephemeris
bdict = self._get_corrected_broadcast_ephemeris(t_sat_gpsweek, t_sat_gpssec, idx, sys)
# Compute satellite position vector
self._get_satellite_position_vector(bdict)
# Compute satellite velocity vector
self._get_satellite_velocity_vector(idx, bdict, sys)
return bdict["r_ecef"], bdict["v_ecef"]
def _get_satellite_velocity_vector(self, idx, bdict, sys):
"""Determine satellite velocity vector in Earth centered Earth fixed (ECEF) coordinate system.
Following equations are described in :cite:`remondi2004`.
Args:
idx (int): Index for broadcast ephemeris dataset valid for observation time of receiver.
bdict (dict): Selected and prepared broadcast ephemeris dictionary with following entries
=============== ====== ==================================================================================
Keys Unit Description
=============== ====== ==================================================================================
a m Semimajor axis
E rad Eccentric anomaly
i rad Inclination
lambda_ rad Instantaneous Greenwich longitude of the ascending node
n rad/s Corrected mean motion
r m Orbit radius
r_orb m 3-dimensional numpy array with satellite position vector in orbital coordinate
system
r_ecef m 3-dimensional numpy array with satellite position vector in Earth centered Earth-fixed
coordinate system
tk s Eclapsed time referred to ephemeris reference epoch
u rad Argument of latitude
vega rad True anomaly
=============== ====== ==================================================================================
Returns:
dict: Following entries are added to broadcast ephemeris dictionary
=============== ====== ==================================================================================
Keys Unit Description
=============== ====== ==================================================================================
v_ecef m 3-dimensional numpy array with satellite velocity vector in Earth centered Earth-fixed
coordinate system
v_orb m 3-dimensional numpy array with satellite velocity vector in orbital coordinate
system
=============== ====== ==================================================================================
"""
# Determine time derivatives of Keplerian elements
M_dot = bdict["n"] # Time derivative of mean anomaly in [rad/s]
E_dot = M_dot / (
1.0 - self.dset_edit.e[idx] * np.cos(bdict["E"])
) # Time derivative of eccentric anomaly in [rad/s]
v_dot = (
(1.0 + self.dset_edit.e[idx] * np.cos(bdict["vega"]))
* E_dot
* np.sin(bdict["E"])
/ ((1.0 - self.dset_edit.e[idx] * np.cos(bdict["E"])) * np.sin(bdict["vega"]))
)
# Time derivative of true anomaly in [rad/s]
u0_dot = v_dot # Time derivative of initial argument of latitude in [rad/s]
lambda_dot = self.dset_edit.Omega_dot[idx] - OMEGA[sys] # Time derivative of instantaneous Greenwich longitude
# of the ascending node in [rad]
# Determine time derivatives of argument of latitude, orbit radius and inclination
sin2u = np.sin(2 * bdict["u"])
cos2u = np.cos(2 * bdict["u"])
du_dot = 2 * u0_dot * (self.dset_edit.cus[idx] * cos2u - self.dset_edit.cuc[idx] * sin2u)
dr_dot = 2 * u0_dot * (self.dset_edit.crs[idx] * cos2u - self.dset_edit.crc[idx] * sin2u)
di_dot = 2 * u0_dot * (self.dset_edit.cis[idx] * cos2u - self.dset_edit.cic[idx] * sin2u)
u_dot = u0_dot + du_dot # Time derivative of argument of latitude in [rad/s]
r_dot = bdict["a"] * self.dset_edit.e[idx] * E_dot * np.sin(bdict["E"]) + dr_dot
# Time derivative of orbit radius in [m/s]
i_dot = self.dset_edit.idot[idx] + di_dot # Time derivative of inclination in [rad/s]
# Determine satellite velocity vector in the orbital plane coordinate system
v_orb = np.array(
[
r_dot * np.cos(bdict["u"]) - bdict["r"] * u_dot * np.sin(bdict["u"]),
r_dot * np.sin(bdict["u"]) + bdict["r"] * u_dot * np.cos(bdict["u"]),
0.0,
]
)
# Satellite velocity vector in Earth centered Earth-fixed coordinate system
x_dot = (
v_orb[0] * np.cos(bdict["lambda_"])
- v_orb[1] * np.cos(bdict["i"]) * np.sin(bdict["lambda_"])
+ bdict["r_orb"][1] * i_dot * np.sin(bdict["i"]) * np.sin(bdict["lambda_"])
- bdict["r_ecef"][1] * lambda_dot
)
y_dot = (
v_orb[0] * np.sin(bdict["lambda_"])
+ v_orb[1] * np.cos(bdict["i"]) * np.cos(bdict["lambda_"])
- bdict["r_orb"][1] * i_dot * np.sin(bdict["i"]) * np.cos(bdict["lambda_"])
+ bdict["r_ecef"][0] * lambda_dot
)
z_dot = v_orb[1] * np.sin(bdict["i"]) + bdict["r_orb"][1] * i_dot * np.cos(bdict["i"])
v_ecef = np.array([x_dot, y_dot, z_dot])
bdict.update({"v_ecef": v_ecef, "v_orb": v_orb})
def _get_relativistic_clock_correction(self, t_sat_gpsweek, t_sat_gpssec, idx, sys):
"""Determine relativistic clock correction due to orbit eccentricity for given observation epoch (satellite
transmission time) based on Section 20.3.3.3.3.1 in :cite:`is-gps-200h`.
Args:
t_sat_gpsweek (float): GPS week of satellite transmission time.
t_sat_gpssec (float): GPS seconds of satellite transmission.
idx (int): Index for broadcast ephemeris dataset valid for observation time of receiver
sys (str): GNSS identifier
Returns:
float64: Relativistic orbit eccentricity correction in [m]
"""
# Correct broadcast ephemeris
bdict = self._get_corrected_broadcast_ephemeris(t_sat_gpsweek, t_sat_gpssec, idx, sys)
# Compute relativistic orbit eccentricity in [m]
return -2 / constant.c * np.sqrt(bdict["a"] * GM[sys]) * self.dset_edit.e[idx] * np.sin(bdict["E"])
| [
"numpy.sqrt",
"numpy.hstack",
"pandas.option_context",
"where.lib.log.fatal",
"numpy.array",
"where.lib.log.warn",
"numpy.sin",
"datetime.timedelta",
"midgard.math.constant.constant.get",
"where.cleaners.apply_remover",
"where.data.dataset3.Dataset",
"where.lib.rotation.R1",
"where.lib.confi... | [((1053, 1090), 'midgard.math.constant.constant.get', 'constant.get', (['"""GM"""'], {'source': '"""cgcs2000"""'}), "('GM', source='cgcs2000')\n", (1065, 1090), False, 'from midgard.math.constant import constant\n'), ((1101, 1134), 'midgard.math.constant.constant.get', 'constant.get', (['"""GM"""'], {'source': '"""gtrf"""'}), "('GM', source='gtrf')\n", (1113, 1134), False, 'from midgard.math.constant import constant\n'), ((1145, 1179), 'midgard.math.constant.constant.get', 'constant.get', (['"""GM"""'], {'source': '"""wgs84"""'}), "('GM', source='wgs84')\n", (1157, 1179), False, 'from midgard.math.constant import constant\n'), ((1190, 1224), 'midgard.math.constant.constant.get', 'constant.get', (['"""GM"""'], {'source': '"""wgs84"""'}), "('GM', source='wgs84')\n", (1202, 1224), False, 'from midgard.math.constant import constant\n'), ((1235, 1267), 'midgard.math.constant.constant.get', 'constant.get', (['"""GM"""'], {'source': '"""jgs"""'}), "('GM', source='jgs')\n", (1247, 1267), False, 'from midgard.math.constant import constant\n'), ((1315, 1355), 'midgard.math.constant.constant.get', 'constant.get', (['"""omega"""'], {'source': '"""cgcs2000"""'}), "('omega', source='cgcs2000')\n", (1327, 1355), False, 'from midgard.math.constant import constant\n'), ((1366, 1402), 'midgard.math.constant.constant.get', 'constant.get', (['"""omega"""'], {'source': '"""gtrf"""'}), "('omega', source='gtrf')\n", (1378, 1402), False, 'from midgard.math.constant import constant\n'), ((1413, 1450), 'midgard.math.constant.constant.get', 'constant.get', (['"""omega"""'], {'source': '"""wgs84"""'}), "('omega', source='wgs84')\n", (1425, 1450), False, 'from midgard.math.constant import constant\n'), ((1461, 1498), 'midgard.math.constant.constant.get', 'constant.get', (['"""omega"""'], {'source': '"""wgs84"""'}), "('omega', source='wgs84')\n", (1473, 1498), False, 'from midgard.math.constant import constant\n'), ((1509, 1544), 'midgard.math.constant.constant.get', 'constant.get', (['"""omega"""'], {'source': '"""jgs"""'}), "('omega', source='jgs')\n", (1521, 1544), False, 'from midgard.math.constant import constant\n'), ((16343, 16418), 'where.cleaners.apply_remover', 'cleaners.apply_remover', (['"""gnss_clean_orbit"""', 'dset_in'], {'orbit_flag': '"""broadcast"""'}), "('gnss_clean_orbit', dset_in, orbit_flag='broadcast')\n", (16365, 16418), False, 'from where import cleaners\n'), ((17555, 17585), 'numpy.zeros', 'np.zeros', (['(dset_in.num_obs, 3)'], {}), '((dset_in.num_obs, 3))\n', (17563, 17585), True, 'import numpy as np\n'), ((17604, 17634), 'numpy.zeros', 'np.zeros', (['(dset_in.num_obs, 3)'], {}), '((dset_in.num_obs, 3))\n', (17612, 17634), True, 'import numpy as np\n'), ((30301, 30335), 'numpy.zeros', 'np.zeros', (['dset.num_obs'], {'dtype': 'bool'}), '(dset.num_obs, dtype=bool)\n', (30309, 30335), True, 'import numpy as np\n'), ((39430, 39539), 'where.lib.log.debug', 'log.debug', (['f"""Broadcast block is selected nearest to \'{\'+\' if positive else \'+/-\'}{time_key}\' time."""'], {}), '(\n f"Broadcast block is selected nearest to \'{\'+\' if positive else \'+/-\'}{time_key}\' time."\n )\n', (39439, 39539), False, 'from where.lib import log\n'), ((43194, 43219), 'numpy.sqrt', 'np.sqrt', (['(GM[sys] / a ** 3)'], {}), '(GM[sys] / a ** 3)\n', (43201, 43219), True, 'import numpy as np\n'), ((43930, 43944), 'numpy.sin', 'np.sin', (['(2 * u0)'], {}), '(2 * u0)\n', (43936, 43944), True, 'import numpy as np\n'), ((43962, 43976), 'numpy.cos', 'np.cos', (['(2 * u0)'], {}), '(2 * u0)\n', (43968, 43976), True, 'import numpy as np\n'), ((53773, 53795), 'numpy.sin', 'np.sin', (["(2 * bdict['u'])"], {}), "(2 * bdict['u'])\n", (53779, 53795), True, 'import numpy as np\n'), ((53812, 53834), 'numpy.cos', 'np.cos', (["(2 * bdict['u'])"], {}), "(2 * bdict['u'])\n", (53818, 53834), True, 'import numpy as np\n'), ((55555, 55586), 'numpy.array', 'np.array', (['[x_dot, y_dot, z_dot]'], {}), '([x_dot, y_dot, z_dot])\n', (55563, 55586), True, 'import numpy as np\n'), ((6561, 6592), 'datetime.timedelta', 'timedelta', ([], {'days': 'self.day_offset'}), '(days=self.day_offset)\n', (6570, 6592), False, 'from datetime import timedelta\n'), ((6623, 6676), 'where.lib.config.tech.get', 'config.tech.get', (['"""use_mixed_brdc_file"""'], {'default': '(False)'}), "('use_mixed_brdc_file', default=False)\n", (6638, 6676), False, 'from where.lib import config\n'), ((8982, 8999), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (8991, 8999), False, 'from datetime import timedelta\n'), ((11283, 11352), 'pandas.option_context', 'pd.option_context', (['"""display.max_rows"""', 'None', '"""display.max_columns"""', '(5)'], {}), "('display.max_rows', None, 'display.max_columns', 5)\n", (11300, 11352), True, 'import pandas as pd\n'), ((11459, 11534), 'where.lib.log.debug', 'log.debug', (['f"""List of duplicated navigation messages: \n{nav_duplicates}"""'], {}), '(f"""List of duplicated navigation messages: \n{nav_duplicates}""")\n', (11468, 11534), False, 'from where.lib import log\n'), ((11776, 11830), 'where.lib.config.tech.get', 'config.tech.get', (['"""navigation_message_type"""'], {'default': '""""""'}), "('navigation_message_type', default='')\n", (11791, 11830), False, 'from where.lib import config\n'), ((16722, 16809), 'where.cleaners.apply_remover', 'cleaners.apply_remover', (['"""gnss_ignore_system"""', 'dset_in'], {'systems': 'not_implemented_sys'}), "('gnss_ignore_system', dset_in, systems=\n not_implemented_sys)\n", (16744, 16809), False, 'from where import cleaners\n'), ((27553, 27584), 'numpy.zeros', 'np.zeros', (['self.dset_raw.num_obs'], {}), '(self.dset_raw.num_obs)\n', (27561, 27584), True, 'import numpy as np\n'), ((27724, 27817), 'numpy.logical_or', 'np.logical_or', (['(self.dset_raw.sv_accuracy[idx] < 0)', '(self.dset_raw.sv_accuracy[idx] >= 126)'], {}), '(self.dset_raw.sv_accuracy[idx] < 0, self.dset_raw.sv_accuracy\n [idx] >= 126)\n', (27737, 27817), True, 'import numpy as np\n'), ((30355, 30409), 'where.lib.config.tech.get', 'config.tech.get', (['"""navigation_message_type"""'], {'default': '""""""'}), "('navigation_message_type', default='')\n", (30370, 30409), False, 'from where.lib import config\n'), ((35891, 35920), 'numpy.expand_dims', 'np.expand_dims', (['array'], {'axis': '(0)'}), '(array, axis=0)\n', (35905, 35920), True, 'import numpy as np\n'), ((40061, 40139), 'where.cleaners.apply_remover', 'cleaners.apply_remover', (['"""ignore_satellite"""', 'dset'], {'satellites': 'not_available_sat'}), "('ignore_satellite', dset, satellites=not_available_sat)\n", (40083, 40139), False, 'from where import cleaners\n'), ((45616, 45626), 'numpy.fabs', 'np.fabs', (['f'], {}), '(f)\n', (45623, 45626), True, 'import numpy as np\n'), ((56650, 56668), 'numpy.sin', 'np.sin', (["bdict['E']"], {}), "(bdict['E'])\n", (56656, 56668), True, 'import numpy as np\n'), ((6877, 6908), 'datetime.timedelta', 'timedelta', ([], {'days': 'self.day_offset'}), '(days=self.day_offset)\n', (6886, 6908), False, 'from datetime import timedelta\n'), ((7324, 7376), 'where.lib.log.debug', 'log.debug', (['f"""Parse broadcast orbit file {file_path}"""'], {}), "(f'Parse broadcast orbit file {file_path}')\n", (7333, 7376), False, 'from where.lib import log\n'), ((7472, 7568), 'where.data.dataset3.Dataset', 'dataset.Dataset', ([], {'rundate': 'date_to_read', 'pipeline': "dset_raw.vars['pipeline']", 'stage': '"""temporary"""'}), "(rundate=date_to_read, pipeline=dset_raw.vars['pipeline'],\n stage='temporary')\n", (7487, 7568), True, 'from where.data import dataset3 as dataset\n'), ((7628, 7669), 'midgard.parsers.rinex_nav.get_rinex2_or_rinex3', 'rinex_nav.get_rinex2_or_rinex3', (['file_path'], {}), '(file_path)\n', (7658, 7669), False, 'from midgard.parsers import rinex_nav\n'), ((20756, 20785), 'numpy.hstack', 'np.hstack', (['(sat_pos, sat_vel)'], {}), '((sat_pos, sat_vel))\n', (20765, 20785), True, 'import numpy as np\n'), ((33877, 33963), 'where.lib.log.warn', 'log.warn', (['"""Total group delay can only be applied for single-frequency solutions."""'], {}), "(\n 'Total group delay can only be applied for single-frequency solutions.')\n", (33885, 33963), False, 'from where.lib import log\n'), ((33982, 34004), 'numpy.zeros', 'np.zeros', (['dset.num_obs'], {}), '(dset.num_obs)\n', (33990, 34004), True, 'import numpy as np\n'), ((35768, 35797), 'numpy.expand_dims', 'np.expand_dims', (['array'], {'axis': '(0)'}), '(array, axis=0)\n', (35782, 35797), True, 'import numpy as np\n'), ((43518, 43539), 'numpy.sqrt', 'np.sqrt', (['(1.0 - e ** 2)'], {}), '(1.0 - e ** 2)\n', (43525, 43539), True, 'import numpy as np\n'), ((43542, 43551), 'numpy.sin', 'np.sin', (['E'], {}), '(E)\n', (43548, 43551), True, 'import numpy as np\n'), ((43553, 43562), 'numpy.cos', 'np.cos', (['E'], {}), '(E)\n', (43559, 43562), True, 'import numpy as np\n'), ((45800, 45905), 'where.lib.log.fatal', 'log.fatal', (['f"""Convergence problem by determination of eccentric anomaly (max_iter = {max_iter})"""'], {}), "(\n f'Convergence problem by determination of eccentric anomaly (max_iter = {max_iter})'\n )\n", (45809, 45905), False, 'from where.lib import log\n'), ((53246, 53264), 'numpy.sin', 'np.sin', (["bdict['E']"], {}), "(bdict['E'])\n", (53252, 53264), True, 'import numpy as np\n'), ((53333, 53354), 'numpy.sin', 'np.sin', (["bdict['vega']"], {}), "(bdict['vega'])\n", (53339, 53354), True, 'import numpy as np\n'), ((54276, 54294), 'numpy.sin', 'np.sin', (["bdict['E']"], {}), "(bdict['E'])\n", (54282, 54294), True, 'import numpy as np\n'), ((55469, 55487), 'numpy.sin', 'np.sin', (["bdict['i']"], {}), "(bdict['i'])\n", (55475, 55487), True, 'import numpy as np\n'), ((55518, 55536), 'numpy.cos', 'np.cos', (["bdict['i']"], {}), "(bdict['i'])\n", (55524, 55536), True, 'import numpy as np\n'), ((32444, 32486), 'where.lib.log.fatal', 'log.fatal', (['f"""GNSS \'{sys}\' is not defined."""'], {}), '(f"GNSS \'{sys}\' is not defined.")\n', (32453, 32486), False, 'from where.lib import log\n'), ((38880, 38944), 'where.lib.config.tech.get', 'config.tech.get', (['"""brdc_block_nearest_to"""'], {'default': '"""toe:positive"""'}), "('brdc_block_nearest_to', default='toe:positive')\n", (38895, 38944), False, 'from where.lib import config\n'), ((48219, 48237), 'numpy.cos', 'np.cos', (["bdict['u']"], {}), "(bdict['u'])\n", (48225, 48237), True, 'import numpy as np\n'), ((48252, 48270), 'numpy.sin', 'np.sin', (["bdict['u']"], {}), "(bdict['u'])\n", (48258, 48270), True, 'import numpy as np\n'), ((48466, 48490), 'where.lib.rotation.R1', 'rotation.R1', (["(-bdict['i'])"], {}), "(-bdict['i'])\n", (48477, 48490), False, 'from where.lib import rotation\n'), ((53048, 53066), 'numpy.cos', 'np.cos', (["bdict['E']"], {}), "(bdict['E'])\n", (53054, 53066), True, 'import numpy as np\n'), ((55078, 55102), 'numpy.sin', 'np.sin', (["bdict['lambda_']"], {}), "(bdict['lambda_'])\n", (55084, 55102), True, 'import numpy as np\n'), ((55360, 55384), 'numpy.cos', 'np.cos', (["bdict['lambda_']"], {}), "(bdict['lambda_'])\n", (55366, 55384), True, 'import numpy as np\n'), ((56594, 56623), 'numpy.sqrt', 'np.sqrt', (["(bdict['a'] * GM[sys])"], {}), "(bdict['a'] * GM[sys])\n", (56601, 56623), True, 'import numpy as np\n'), ((40530, 40579), 'numpy.array', 'np.array', (['[(99999 if v < 0 else v) for v in diff]'], {}), '([(99999 if v < 0 else v) for v in diff])\n', (40538, 40579), True, 'import numpy as np\n'), ((44300, 44309), 'numpy.cos', 'np.cos', (['E'], {}), '(E)\n', (44306, 44309), True, 'import numpy as np\n'), ((45660, 45669), 'numpy.sin', 'np.sin', (['E'], {}), '(E)\n', (45666, 45669), True, 'import numpy as np\n'), ((48431, 48461), 'where.lib.rotation.R3', 'rotation.R3', (["(-bdict['lambda_'])"], {}), "(-bdict['lambda_'])\n", (48442, 48461), False, 'from where.lib import rotation\n'), ((53311, 53329), 'numpy.cos', 'np.cos', (["bdict['E']"], {}), "(bdict['E'])\n", (53317, 53329), True, 'import numpy as np\n'), ((54600, 54618), 'numpy.cos', 'np.cos', (["bdict['u']"], {}), "(bdict['u'])\n", (54606, 54618), True, 'import numpy as np\n'), ((54642, 54660), 'numpy.sin', 'np.sin', (["bdict['u']"], {}), "(bdict['u'])\n", (54648, 54660), True, 'import numpy as np\n'), ((54686, 54704), 'numpy.sin', 'np.sin', (["bdict['u']"], {}), "(bdict['u'])\n", (54692, 54704), True, 'import numpy as np\n'), ((54728, 54746), 'numpy.cos', 'np.cos', (["bdict['u']"], {}), "(bdict['u'])\n", (54734, 54746), True, 'import numpy as np\n'), ((54919, 54943), 'numpy.cos', 'np.cos', (["bdict['lambda_']"], {}), "(bdict['lambda_'])\n", (54925, 54943), True, 'import numpy as np\n'), ((54990, 55014), 'numpy.sin', 'np.sin', (["bdict['lambda_']"], {}), "(bdict['lambda_'])\n", (54996, 55014), True, 'import numpy as np\n'), ((55057, 55075), 'numpy.sin', 'np.sin', (["bdict['i']"], {}), "(bdict['i'])\n", (55063, 55075), True, 'import numpy as np\n'), ((55201, 55225), 'numpy.sin', 'np.sin', (["bdict['lambda_']"], {}), "(bdict['lambda_'])\n", (55207, 55225), True, 'import numpy as np\n'), ((55272, 55296), 'numpy.cos', 'np.cos', (["bdict['lambda_']"], {}), "(bdict['lambda_'])\n", (55278, 55296), True, 'import numpy as np\n'), ((55339, 55357), 'numpy.sin', 'np.sin', (["bdict['i']"], {}), "(bdict['i'])\n", (55345, 55357), True, 'import numpy as np\n'), ((7185, 7215), 'where.lib.config.date_vars', 'config.date_vars', (['date_to_read'], {}), '(date_to_read)\n', (7201, 7215), False, 'from where.lib import config\n'), ((12574, 12616), 'pandas.concat', 'pd.concat', (['[nav_filtered, remove_nav_type]'], {}), '([nav_filtered, remove_nav_type])\n', (12583, 12616), True, 'import pandas as pd\n'), ((45709, 45718), 'numpy.cos', 'np.cos', (['E'], {}), '(E)\n', (45715, 45718), True, 'import numpy as np\n'), ((53189, 53210), 'numpy.cos', 'np.cos', (["bdict['vega']"], {}), "(bdict['vega'])\n", (53195, 53210), True, 'import numpy as np\n'), ((54969, 54987), 'numpy.cos', 'np.cos', (["bdict['i']"], {}), "(bdict['i'])\n", (54975, 54987), True, 'import numpy as np\n'), ((55251, 55269), 'numpy.cos', 'np.cos', (["bdict['i']"], {}), "(bdict['i'])\n", (55257, 55269), True, 'import numpy as np\n')] |
import math
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler, SequentialSampler
from sklearn.model_selection import StratifiedKFold
import dgl
def collate(samples):
# 'samples (graph, label)'
graphs, labels = map(list, zip(*samples))
for g in graphs:
for key in g.node_attr_schemes().keys():
g.ndata[key] = g.ndata[key].float()
batched_graph = dgl.batch(graphs)
labels = torch.tensor(labels)
return batched_graph, labels
class GraphDataLoader():
def __init__(self, dataset, batch_size, device,
collate_fn=collate, seed=0, shuffle=True,
split_name='fold10', fold_idx=0, split_ratio=0.7):
self.shuffle = shuffle
self.seed = seed
self.kwargs = {'pin_memory': True} if device >= 0 else {}
labels = [l for _, l in dataset]
if split_name == 'fold10':
train_idx, valid_idx = self._split_fold10(
labels, fold_idx, seed, shuffle
)
elif split_name == 'rand':
train_idx, valid_idx = self._split_rand(
labels, split_ratio, seed, shuffle
)
else:
raise NotImplementedError()
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
self.train_loader = DataLoader(
dataset, sampler=train_sampler,
batch_size=batch_size, collate_fn=collate_fn, **self.kwargs
)
self.valid_loader = DataLoader(
dataset, sampler=valid_sampler,
batch_size=batch_size, collate_fn=collate_fn, **self.kwargs
)
def train_valid_loader(self):
return self.train_loader, self.valid_loader
def _split_fold10(self, labels, fold_idx=0, seed=0, shuffle=True):
assert 0 <= fold_idx and fold_idx < 10, print(
'fold_idx must be from 0 to 9.'
)
skf = StratifiedKFold(n_splits=10, shuffle=shuffle, random_state=seed)
idx_list = []
for idx in skf.split(np.zeros(len(labels)), labels):
idx_list.append(idx)
train_idx, valid_idx = idx_list[fold_idx]
print(
'train_set: test_set = %d : %d' % (len(train_idx), len(valid_idx))
)
return train_idx, valid_idx
def _split_rand(self, labels, split_ratio=0.7, seed=0, shuffle=True):
num_entries = len(labels)
indices = list(range(num_entries))
np.random.seed(seed)
np.random.shuffle(indices)
split = int(math.floor(split_ratio * num_entries))
train_idx, valid_idx = indices[:split], indices[split:]
print(
'train_set: test_set = %d : %d' % (len(train_idx), len(valid_idx))
)
return train_idx, valid_idx
if __name__ == '__main__':
from Temp.dataset import GINDataset
dataset = GINDataset(name='PROTEINS', self_loop=True, degree_as_nlabel=False)
Loader_list = []
for idx in range(10):
train_loader, valid_loader = GraphDataLoader(
dataset, batch_size=128, device=0, collate_fn=collate,
seed=9, shuffle=True, split_name='fold10', fold_idx=idx
).train_valid_loader()
Loader_list.append((train_loader, valid_loader))
print(Loader_list)
| [
"torch.utils.data.sampler.SubsetRandomSampler",
"dgl.batch",
"numpy.random.shuffle",
"math.floor",
"sklearn.model_selection.StratifiedKFold",
"torch.tensor",
"numpy.random.seed",
"torch.utils.data.DataLoader",
"Temp.dataset.GINDataset"
] | [((475, 492), 'dgl.batch', 'dgl.batch', (['graphs'], {}), '(graphs)\n', (484, 492), False, 'import dgl\n'), ((507, 527), 'torch.tensor', 'torch.tensor', (['labels'], {}), '(labels)\n', (519, 527), False, 'import torch\n'), ((3033, 3100), 'Temp.dataset.GINDataset', 'GINDataset', ([], {'name': '"""PROTEINS"""', 'self_loop': '(True)', 'degree_as_nlabel': '(False)'}), "(name='PROTEINS', self_loop=True, degree_as_nlabel=False)\n", (3043, 3100), False, 'from Temp.dataset import GINDataset\n'), ((1341, 1371), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['train_idx'], {}), '(train_idx)\n', (1360, 1371), False, 'from torch.utils.data.sampler import SubsetRandomSampler, SequentialSampler\n'), ((1397, 1427), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['valid_idx'], {}), '(valid_idx)\n', (1416, 1427), False, 'from torch.utils.data.sampler import SubsetRandomSampler, SequentialSampler\n'), ((1461, 1568), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'sampler': 'train_sampler', 'batch_size': 'batch_size', 'collate_fn': 'collate_fn'}), '(dataset, sampler=train_sampler, batch_size=batch_size,\n collate_fn=collate_fn, **self.kwargs)\n', (1471, 1568), False, 'from torch.utils.data import DataLoader\n'), ((1633, 1740), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'sampler': 'valid_sampler', 'batch_size': 'batch_size', 'collate_fn': 'collate_fn'}), '(dataset, sampler=valid_sampler, batch_size=batch_size,\n collate_fn=collate_fn, **self.kwargs)\n', (1643, 1740), False, 'from torch.utils.data import DataLoader\n'), ((2067, 2131), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(10)', 'shuffle': 'shuffle', 'random_state': 'seed'}), '(n_splits=10, shuffle=shuffle, random_state=seed)\n', (2082, 2131), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((2615, 2635), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2629, 2635), True, 'import numpy as np\n'), ((2645, 2671), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (2662, 2671), True, 'import numpy as np\n'), ((2695, 2732), 'math.floor', 'math.floor', (['(split_ratio * num_entries)'], {}), '(split_ratio * num_entries)\n', (2705, 2732), False, 'import math\n')] |
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import mnist
from tensorflow.examples.tutorials.mnist import input_data
import cdbn_backup as cdbn
""" --------------------------------------------
------------------- DATA -------------------
-------------------------------------------- """
class MNIST_HANDLER(object):
def __init__(self, data):
self.num_training_example = data.train.num_examples
self.num_test_example = data.test.num_examples
self.training_data , self.training_labels = data.train.next_batch(self.num_training_example)
self.test_data , self.test_labels = data.test.next_batch(self.num_test_example)
self.whiten = False
self.training_index = -20
self.test_index = -20
def do_whiten(self):
self.whiten = True
data_to_be_whitened = np.copy(self.training_data)
mean = np.sum(data_to_be_whitened, axis = 0)/self.num_training_example
mean = np.tile(mean,self.num_training_example)
mean = np.reshape(mean,(self.num_training_example,784))
centered_data = data_to_be_whitened - mean
covariance = np.dot(centered_data.T,centered_data)/self.num_training_example
U,S,V = np.linalg.svd(covariance)
epsilon = 1e-5
lambda_square = np.diag(1./np.sqrt(S+epsilon))
self.whitening_mat = np.dot(np.dot(U, lambda_square), V)
self.whitened_training_data = np.dot(centered_data,self.whitening_mat)
data_to_be_whitened = np.copy(self.test_data)
mean = np.sum(data_to_be_whitened, axis = 0)/self.num_test_example
mean = np.tile(mean,self.num_test_example)
mean = np.reshape(mean,(self.num_test_example,784))
centered_data = data_to_be_whitened - mean
self.whitened_test_data = np.dot(centered_data,self.whitening_mat)
def next_batch(self, batch_size, type = 'train'):
if type == 'train':
if self.whiten:
operand = self.whitened_training_data
else:
operand = self.training_data
operand_bis = self.training_labels
self.training_index = (batch_size + self.training_index) % self.num_training_example
index = self.training_index
number = self.num_training_example
elif type == 'test':
if self.whiten:
operand = self.whitened_test_data
else:
operand = self.test_data
operand_bis = self.test_labels
self.test_index = (batch_size + self.test_index) % self.num_test_example
index = self.test_index
number = self.num_test_example
if index + batch_size > number:
part1 = operand[index:,:]
part2 = operand[:(index + batch_size)% number,:]
result = np.concatenate([part1, part2])
part1 = operand_bis[index:,:]
part2 = operand_bis[:(index + batch_size)% number,:]
result_bis = np.concatenate([part1, part2])
else:
result = operand[index:index + batch_size,:]
result_bis = operand_bis[index:index + batch_size,:]
return result, result_bis
mnist_dataset = MNIST_HANDLER(input_data.read_data_sets('data', one_hot=True))
#mnist_dataset.do_whiten()
sess = tf.Session()
""" ---------------------------------------------
------------------- MODEL -------------------
--------------------------------------------- """
my_cdbn = cdbn.CDBN('mnist_cdbn', 20, '/home/joel/Documents/git/Speech-Recognition-for-Broken-Speech/src/classify/Convolutional_Deep_Belief_Network/log', mnist_dataset, sess, verbosity = 2)
my_cdbn.add_layer('layer_1', fully_connected = False, v_height = 28, v_width = 28, v_channels = 1, f_height = 11, f_width = 11, f_number = 40,
init_biases_H = -3, init_biases_V = 0.01, init_weight_stddev = 0.01,
gaussian_unit = True, gaussian_variance = 0.2,
prob_maxpooling = True, padding = True,
learning_rate = 0.00005, learning_rate_decay = 0.5, momentum = 0.9, decay_step = 50000,
weight_decay = 2.0, sparsity_target = 0.003, sparsity_coef = 0.1)
my_cdbn.add_layer('layer_2', fully_connected = False, v_height = 14, v_width = 14, v_channels = 40, f_height = 7, f_width = 7, f_number = 40,
init_biases_H = -3, init_biases_V = 0.025, init_weight_stddev = 0.025,
gaussian_unit = False, gaussian_variance = 0.2,
prob_maxpooling = True, padding = True,
learning_rate = 0.0025, learning_rate_decay = 0.5, momentum = 0.9, decay_step = 50000,
weight_decay = 0.1, sparsity_target = 0.1, sparsity_coef = 0.1)
my_cdbn.add_layer('layer_3', fully_connected = True, v_height = 1, v_width = 1, v_channels = 40*7*7, f_height = 1, f_width = 1, f_number = 200,
init_biases_H = -3, init_biases_V = 0.025, init_weight_stddev = 0.025,
gaussian_unit = False, gaussian_variance = 0.2,
prob_maxpooling = False, padding = False,
learning_rate = 0.0025, learning_rate_decay = 0.5, momentum = 0.9, decay_step = 50000,
weight_decay = 0.1, sparsity_target = 0.1, sparsity_coef = 0.1)
my_cdbn.add_softmax_layer(10, 0.1)
my_cdbn.lock_cdbn()
""" ---------------------------------------------
------------------ TRAINING -----------------
--------------------------------------------- """
my_cdbn.manage_layers(['layer_1','layer_2','layer_3'],[],[10000,10000,10000], [1,1,1], 20000, restore_softmax = False, fine_tune = True)
my_cdbn.do_eval() | [
"numpy.copy",
"numpy.tile",
"numpy.reshape",
"numpy.sqrt",
"tensorflow.Session",
"cdbn_backup.CDBN",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"numpy.dot",
"numpy.sum",
"numpy.concatenate",
"numpy.linalg.svd"
] | [((3295, 3307), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3305, 3307), True, 'import tensorflow as tf\n'), ((3477, 3663), 'cdbn_backup.CDBN', 'cdbn.CDBN', (['"""mnist_cdbn"""', '(20)', '"""/home/joel/Documents/git/Speech-Recognition-for-Broken-Speech/src/classify/Convolutional_Deep_Belief_Network/log"""', 'mnist_dataset', 'sess'], {'verbosity': '(2)'}), "('mnist_cdbn', 20,\n '/home/joel/Documents/git/Speech-Recognition-for-Broken-Speech/src/classify/Convolutional_Deep_Belief_Network/log'\n , mnist_dataset, sess, verbosity=2)\n", (3486, 3663), True, 'import cdbn_backup as cdbn\n'), ((3212, 3259), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['"""data"""'], {'one_hot': '(True)'}), "('data', one_hot=True)\n", (3237, 3259), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((896, 923), 'numpy.copy', 'np.copy', (['self.training_data'], {}), '(self.training_data)\n', (903, 923), True, 'import numpy as np\n'), ((1040, 1080), 'numpy.tile', 'np.tile', (['mean', 'self.num_training_example'], {}), '(mean, self.num_training_example)\n', (1047, 1080), True, 'import numpy as np\n'), ((1106, 1156), 'numpy.reshape', 'np.reshape', (['mean', '(self.num_training_example, 784)'], {}), '(mean, (self.num_training_example, 784))\n', (1116, 1156), True, 'import numpy as np\n'), ((1340, 1365), 'numpy.linalg.svd', 'np.linalg.svd', (['covariance'], {}), '(covariance)\n', (1353, 1365), True, 'import numpy as np\n'), ((1543, 1584), 'numpy.dot', 'np.dot', (['centered_data', 'self.whitening_mat'], {}), '(centered_data, self.whitening_mat)\n', (1549, 1584), True, 'import numpy as np\n'), ((1615, 1638), 'numpy.copy', 'np.copy', (['self.test_data'], {}), '(self.test_data)\n', (1622, 1638), True, 'import numpy as np\n'), ((1751, 1787), 'numpy.tile', 'np.tile', (['mean', 'self.num_test_example'], {}), '(mean, self.num_test_example)\n', (1758, 1787), True, 'import numpy as np\n'), ((1813, 1859), 'numpy.reshape', 'np.reshape', (['mean', '(self.num_test_example, 784)'], {}), '(mean, (self.num_test_example, 784))\n', (1823, 1859), True, 'import numpy as np\n'), ((1944, 1985), 'numpy.dot', 'np.dot', (['centered_data', 'self.whitening_mat'], {}), '(centered_data, self.whitening_mat)\n', (1950, 1985), True, 'import numpy as np\n'), ((950, 985), 'numpy.sum', 'np.sum', (['data_to_be_whitened'], {'axis': '(0)'}), '(data_to_be_whitened, axis=0)\n', (956, 985), True, 'import numpy as np\n'), ((1250, 1288), 'numpy.dot', 'np.dot', (['centered_data.T', 'centered_data'], {}), '(centered_data.T, centered_data)\n', (1256, 1288), True, 'import numpy as np\n'), ((1475, 1499), 'numpy.dot', 'np.dot', (['U', 'lambda_square'], {}), '(U, lambda_square)\n', (1481, 1499), True, 'import numpy as np\n'), ((1665, 1700), 'numpy.sum', 'np.sum', (['data_to_be_whitened'], {'axis': '(0)'}), '(data_to_be_whitened, axis=0)\n', (1671, 1700), True, 'import numpy as np\n'), ((2847, 2877), 'numpy.concatenate', 'np.concatenate', (['[part1, part2]'], {}), '([part1, part2])\n', (2861, 2877), True, 'import numpy as np\n'), ((2992, 3022), 'numpy.concatenate', 'np.concatenate', (['[part1, part2]'], {}), '([part1, part2])\n', (3006, 3022), True, 'import numpy as np\n'), ((1422, 1442), 'numpy.sqrt', 'np.sqrt', (['(S + epsilon)'], {}), '(S + epsilon)\n', (1429, 1442), True, 'import numpy as np\n')] |
from pathlib import Path
import json
from sklearn import manifold
from sklearn.decomposition import PCA
import os,inspect,sys
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
grandpadir = os.path.dirname(os.path.dirname(currentdir))
sys.path.insert(0, grandpadir)
from utils import helper_functions
from utils.pseudo_samples import PseudoSamplesMger
from utils.shared_names import *
import pandas as pd
import numpy as np
import os
import matplotlib.pylab as plt
from adjustText import adjust_text
class Visualizer:
__save_dir = 'figures/vizualizations'
__dim_reduction_methods = {
'pca':
{
'method': PCA(n_components=2, random_state=0),
'axis_labels': ['Principal Component 1', 'Principal Component 2']
},
'tsne':
{
'method': manifold.TSNE(n_components=2, init='pca', perplexity=40, random_state=0),
'axis_labels': ['t-SNE Embedding 1', 't-SNE Embedding 2']
}
}
def __init__(self, path_to_dir, metric_id):
self.original_dataset = None
self.path_to_dir = path_to_dir
self.nav_files = None
self.metric_id = metric_id
def visualize(self, dims=None, original_data_viz=False):
self.nav_files = self.__read_nav_files()
self.nav_files = self.__sort_files_by_dim()
self.__visualize_nav_files(dims)
if original_data_viz:
self.visualize_original_file(dims)
def visualize_original_file(self, dims=None):
for dim, nav_file in self.nav_files.items():
if dims is not None and dim < dims:
continue
self.read_original_dataset(nav_file)
original_data = nav_file[FileKeys.navigator_original_data]
best_model = helper_functions.get_best_model_original_data(original_data, self.metric_id, True)
self.__viz_coordinator(parent_dir=None, df=self.original_dataset, features=best_model['feature_selection']['features'], keep_only_given_features=True)
self.__viz_coordinator(parent_dir=None, df=self.original_dataset, features=np.arange(0, self.original_dataset.shape[1] - 2),
keep_only_given_features=False)
def __visualize_nav_files(self, dims):
for dim, nav_file in self.nav_files.items():
if dims is not None and dim < dims:
continue
self.read_original_dataset(nav_file)
ps_mger = PseudoSamplesMger(nav_file[FileKeys.navigator_pseudo_samples_key], self.metric_id, fs=True)
best_model, k = ps_mger.get_best_model()
print('Best k', k)
dataset_path_of_best_k = ps_mger.get_info_field_of_k(k, FileKeys.navigator_pseudo_samples_data_path)
df = pd.read_csv(dataset_path_of_best_k)
print('Explained Boundary')
self.__viz_coordinator(parent_dir=None, df=df, features=best_model['feature_selection']['features'], keep_only_given_features=True)
print('Original Boundary')
self.__viz_coordinator(parent_dir=None, df=df, features=np.arange(0, self.original_dataset.shape[1] - 2), keep_only_given_features=False)
def read_original_dataset(self, nav_file):
self.original_dataset = pd.read_csv(nav_file[FileKeys.navigator_original_dataset_path])
def __read_nav_files(self):
nav_files = []
nav_files_paths = helper_functions.get_files_recursively(self.path_to_dir, FileNames.navigator_fname)
for f in nav_files_paths:
with open(f) as json_file:
nav_files.append(json.load(json_file))
return nav_files
def __sort_files_by_dim(self):
nav_files_sort_by_dim = {}
for nfile in self.nav_files:
data_dim = pd.read_csv(nfile[FileKeys.navigator_original_dataset_path]).shape[1]
nav_files_sort_by_dim[data_dim] = nfile
return dict(sorted(nav_files_sort_by_dim.items()))
def __viz_coordinator(self, parent_dir, df, features, keep_only_given_features=True):
# todo delete following line
if keep_only_given_features:
features = features[0:10] if len(features) > 10 else features
if len(features) == 2:
self.__viz_in_2d(df,features, features, None, parent_dir, 'viz2d.png')
elif len(features) == 3:
print('Visualization in 3d not ready yet!!!')
pass
elif len(features) > 3:
for method, data in Visualizer.__dim_reduction_methods.items():
self.__viz_in_2d(df, features, data['axis_labels'], data['method'], parent_dir, method + '.png')
else:
assert False, len(features)
def __viz_in_2d(self, df, features, axis_labels, transform_func, parent_dir, fname):
pseudo_samples_indices = self.__get_artificial_point_indices(df)
reduced_df = df.copy()
if pseudo_samples_indices is not None and len(pseudo_samples_indices) > 0:
reduced_df = df.drop(df.index[pseudo_samples_indices])
zindex = self.__zindex_of_points(reduced_df)
colors = self.__colors_of_points(reduced_df)
texts = self.__texts_of_points(reduced_df)
cols_to_drop = ['is_anomaly']
if 'subspaces' in df.columns:
cols_to_drop.append('subspaces')
new_df = df.iloc[:, features]
if transform_func is not None:
new_df = transform_func.fit_transform(new_df)
if pseudo_samples_indices is not None and len(pseudo_samples_indices) > 0:
new_df = np.delete(new_df, pseudo_samples_indices, axis=0)
if not isinstance(new_df, np.ndarray):
new_df = new_df.values()
plt.scatter(new_df[zindex, 0], new_df[zindex, 1], c=colors[zindex], cmap='Spectral')
annotations = []
for i in zindex:
if texts[i] != '':
annotations.append(plt.text(new_df[i, 0], new_df[i, 1], texts[i], color='brown', fontweight='bold', size=9))
adjust_text(annotations, autoalign='')
plt.title("%s" % (fname.replace('.png', '')))
plt.xlabel(axis_labels[0])
plt.ylabel(axis_labels[1])
# plt.savefig(Path(parent_dir, fname), dpi=300, bbox_inches='tight', pad_inches=0.1)
plt.show()
plt.clf()
def __colors_of_points(self, df):
outliers = Visualizer.__get_outlier_indices(df)
colors = np.full(df.shape[0], 'skyblue', dtype=object)
fp = self.__get_false_positive_indices(outliers)
tp = self.__get_true_outliers_indices(outliers)
high_scored_points = np.concatenate((fp, tp)).astype(int)
colors[high_scored_points] = 'r'
# colors[tp] = 'r'
# colors[fp] = 'gold'
return colors
def __texts_of_points(self, df):
inliers = Visualizer.__get_inlier_indices(df)
text = np.array([str(x) for x in range(df.shape[0])], dtype=object)
text[inliers] = ''
return text
def __zindex_of_points(self, df):
inliers = Visualizer.__get_inlier_indices(df)
outliers = Visualizer.__get_outlier_indices(df)
return np.array([*inliers, *outliers])
def __get_false_positive_indices(self, detected_outliers):
outlier_inds_original = Visualizer.__get_outlier_indices(self.original_dataset)
return list(set(detected_outliers) - set(outlier_inds_original))
def __get_true_outliers_indices(self, detected_outliers):
outlier_inds_original = Visualizer.__get_outlier_indices(self.original_dataset)
return list(set(detected_outliers).intersection(set(outlier_inds_original)))
@staticmethod
def __get_inlier_indices(df):
return np.array(df.loc[df['is_anomaly'] == 0, 'is_anomaly'].index)
@staticmethod
def __get_outlier_indices(df):
return np.array(df.loc[df['is_anomaly'] == 1, 'is_anomaly'].index)
def __get_artificial_point_indices(self, df):
if self.original_dataset.shape[0] == df.shape[0]:
return None
else:
return np.arange(self.original_dataset.shape[0], df.shape[0])
@staticmethod
def __get_create_parent_output_dir(path_to_best_model_dir, metric):
parent_dir = Path(path_to_best_model_dir, Visualizer.__save_dir, metric)
parent_dir.mkdir(parents=True, exist_ok=True)
return parent_dir
| [
"sys.path.insert",
"utils.helper_functions.get_files_recursively",
"pandas.read_csv",
"numpy.array",
"matplotlib.pylab.show",
"numpy.arange",
"matplotlib.pylab.clf",
"pathlib.Path",
"sklearn.decomposition.PCA",
"utils.pseudo_samples.PseudoSamplesMger",
"numpy.delete",
"matplotlib.pylab.scatter... | [((271, 301), 'sys.path.insert', 'sys.path.insert', (['(0)', 'grandpadir'], {}), '(0, grandpadir)\n', (286, 301), False, 'import os, inspect, sys\n'), ((242, 269), 'os.path.dirname', 'os.path.dirname', (['currentdir'], {}), '(currentdir)\n', (257, 269), False, 'import os\n'), ((3326, 3389), 'pandas.read_csv', 'pd.read_csv', (['nav_file[FileKeys.navigator_original_dataset_path]'], {}), '(nav_file[FileKeys.navigator_original_dataset_path])\n', (3337, 3389), True, 'import pandas as pd\n'), ((3472, 3560), 'utils.helper_functions.get_files_recursively', 'helper_functions.get_files_recursively', (['self.path_to_dir', 'FileNames.navigator_fname'], {}), '(self.path_to_dir, FileNames.\n navigator_fname)\n', (3510, 3560), False, 'from utils import helper_functions\n'), ((5768, 5857), 'matplotlib.pylab.scatter', 'plt.scatter', (['new_df[zindex, 0]', 'new_df[zindex, 1]'], {'c': 'colors[zindex]', 'cmap': '"""Spectral"""'}), "(new_df[zindex, 0], new_df[zindex, 1], c=colors[zindex], cmap=\n 'Spectral')\n", (5779, 5857), True, 'import matplotlib.pylab as plt\n'), ((6067, 6105), 'adjustText.adjust_text', 'adjust_text', (['annotations'], {'autoalign': '""""""'}), "(annotations, autoalign='')\n", (6078, 6105), False, 'from adjustText import adjust_text\n'), ((6168, 6194), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['axis_labels[0]'], {}), '(axis_labels[0])\n', (6178, 6194), True, 'import matplotlib.pylab as plt\n'), ((6203, 6229), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['axis_labels[1]'], {}), '(axis_labels[1])\n', (6213, 6229), True, 'import matplotlib.pylab as plt\n'), ((6331, 6341), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (6339, 6341), True, 'import matplotlib.pylab as plt\n'), ((6350, 6359), 'matplotlib.pylab.clf', 'plt.clf', ([], {}), '()\n', (6357, 6359), True, 'import matplotlib.pylab as plt\n'), ((6472, 6517), 'numpy.full', 'np.full', (['df.shape[0]', '"""skyblue"""'], {'dtype': 'object'}), "(df.shape[0], 'skyblue', dtype=object)\n", (6479, 6517), True, 'import numpy as np\n'), ((7196, 7227), 'numpy.array', 'np.array', (['[*inliers, *outliers]'], {}), '([*inliers, *outliers])\n', (7204, 7227), True, 'import numpy as np\n'), ((7757, 7816), 'numpy.array', 'np.array', (["df.loc[df['is_anomaly'] == 0, 'is_anomaly'].index"], {}), "(df.loc[df['is_anomaly'] == 0, 'is_anomaly'].index)\n", (7765, 7816), True, 'import numpy as np\n'), ((7886, 7945), 'numpy.array', 'np.array', (["df.loc[df['is_anomaly'] == 1, 'is_anomaly'].index"], {}), "(df.loc[df['is_anomaly'] == 1, 'is_anomaly'].index)\n", (7894, 7945), True, 'import numpy as np\n'), ((8279, 8338), 'pathlib.Path', 'Path', (['path_to_best_model_dir', 'Visualizer.__save_dir', 'metric'], {}), '(path_to_best_model_dir, Visualizer.__save_dir, metric)\n', (8283, 8338), False, 'from pathlib import Path\n'), ((187, 209), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (207, 209), False, 'import os, inspect, sys\n'), ((687, 722), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)', 'random_state': '(0)'}), '(n_components=2, random_state=0)\n', (690, 722), False, 'from sklearn.decomposition import PCA\n'), ((878, 950), 'sklearn.manifold.TSNE', 'manifold.TSNE', ([], {'n_components': '(2)', 'init': '"""pca"""', 'perplexity': '(40)', 'random_state': '(0)'}), "(n_components=2, init='pca', perplexity=40, random_state=0)\n", (891, 950), False, 'from sklearn import manifold\n'), ((1840, 1926), 'utils.helper_functions.get_best_model_original_data', 'helper_functions.get_best_model_original_data', (['original_data', 'self.metric_id', '(True)'], {}), '(original_data, self.metric_id,\n True)\n', (1885, 1926), False, 'from utils import helper_functions\n'), ((2531, 2627), 'utils.pseudo_samples.PseudoSamplesMger', 'PseudoSamplesMger', (['nav_file[FileKeys.navigator_pseudo_samples_key]', 'self.metric_id'], {'fs': '(True)'}), '(nav_file[FileKeys.navigator_pseudo_samples_key], self.\n metric_id, fs=True)\n', (2548, 2627), False, 'from utils.pseudo_samples import PseudoSamplesMger\n'), ((2837, 2872), 'pandas.read_csv', 'pd.read_csv', (['dataset_path_of_best_k'], {}), '(dataset_path_of_best_k)\n', (2848, 2872), True, 'import pandas as pd\n'), ((8112, 8166), 'numpy.arange', 'np.arange', (['self.original_dataset.shape[0]', 'df.shape[0]'], {}), '(self.original_dataset.shape[0], df.shape[0])\n', (8121, 8166), True, 'import numpy as np\n'), ((5626, 5675), 'numpy.delete', 'np.delete', (['new_df', 'pseudo_samples_indices'], {'axis': '(0)'}), '(new_df, pseudo_samples_indices, axis=0)\n', (5635, 5675), True, 'import numpy as np\n'), ((6660, 6684), 'numpy.concatenate', 'np.concatenate', (['(fp, tp)'], {}), '((fp, tp))\n', (6674, 6684), True, 'import numpy as np\n'), ((2173, 2221), 'numpy.arange', 'np.arange', (['(0)', '(self.original_dataset.shape[1] - 2)'], {}), '(0, self.original_dataset.shape[1] - 2)\n', (2182, 2221), True, 'import numpy as np\n'), ((3164, 3212), 'numpy.arange', 'np.arange', (['(0)', '(self.original_dataset.shape[1] - 2)'], {}), '(0, self.original_dataset.shape[1] - 2)\n', (3173, 3212), True, 'import numpy as np\n'), ((3662, 3682), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (3671, 3682), False, 'import json\n'), ((3840, 3900), 'pandas.read_csv', 'pd.read_csv', (['nfile[FileKeys.navigator_original_dataset_path]'], {}), '(nfile[FileKeys.navigator_original_dataset_path])\n', (3851, 3900), True, 'import pandas as pd\n'), ((5969, 6062), 'matplotlib.pylab.text', 'plt.text', (['new_df[i, 0]', 'new_df[i, 1]', 'texts[i]'], {'color': '"""brown"""', 'fontweight': '"""bold"""', 'size': '(9)'}), "(new_df[i, 0], new_df[i, 1], texts[i], color='brown', fontweight=\n 'bold', size=9)\n", (5977, 6062), True, 'import matplotlib.pylab as plt\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright: 2021, <NAME>
# License: MIT (https://opensource.org/licenses/MIT)
# Author: <NAME>, PhD student at University College London (UCL), research assistant at King's College
# London (KCL), supervised by:
# Dr <NAME>, PI of the Robotics and Vision in Medicine (RViM) lab in the School of Biomedical Engineering &
# Imaging Sciences (BMEIS) at King's College London (KCL)
# Prof <NAME>, consultant ophthalmic surgeon, Moorfields Eye Hospital, London UK
#
# This file is part of oflibnumpy
import unittest
import math
import cv2
import numpy as np
from scipy.ndimage import rotate, shift
import sys
sys.path.append('..')
from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, \
matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, \
points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, \
resize_flow, is_zero_flow, track_pts
from src.oflibnumpy.flow_class import Flow
class TestValidityChecks(unittest.TestCase):
def test_get_valid_ref(self):
self.assertEqual(get_valid_ref(None), 't')
self.assertEqual(get_valid_ref('s'), 's')
self.assertEqual(get_valid_ref('t'), 't')
with self.assertRaises(TypeError):
get_valid_ref(0)
with self.assertRaises(ValueError):
get_valid_ref('test')
def test_get_valid_padding(self):
with self.assertRaises(TypeError):
get_valid_padding(100)
with self.assertRaises(ValueError):
get_valid_padding([10, 20, 30, 40, 50])
with self.assertRaises(ValueError):
get_valid_padding([10., 20, 30, 40])
with self.assertRaises(ValueError):
get_valid_padding([-10, 10, 10, 10])
def test_validate_shape(self):
with self.assertRaises(TypeError):
validate_shape('test')
with self.assertRaises(ValueError):
validate_shape([10, 10, 10])
with self.assertRaises(ValueError):
validate_shape([-1, 10])
with self.assertRaises(ValueError):
validate_shape([10., 10])
def test_validate_flow_array(self):
f = np.zeros((10, 10, 2))
self.assertEqual(validate_flow_array(f).dtype, 'float32')
with self.assertRaises(TypeError): # Wrong type
validate_flow_array('test')
with self.assertRaises(ValueError): # Wrong number of dimensions
validate_flow_array(np.zeros((10, 10)))
with self.assertRaises(ValueError): # Wrong number of channels
validate_flow_array(np.zeros((10, 10, 3)))
with self.assertRaises(ValueError): # NaN values
f = np.zeros((10, 10, 2))
f[0, 0] = np.NaN
validate_flow_array(f)
with self.assertRaises(ValueError): # Inf values
f = np.zeros((10, 10, 2))
f[0, 0] = np.Inf
validate_flow_array(f)
class TestMatrixFromTransforms(unittest.TestCase):
# All numerical values in desired_matrix calculated manually
def test_combined_transforms(self):
transforms = [
['translation', -100, -100],
['rotation', 0, 0, 30],
['translation', 100, 100]
]
actual_matrix = matrix_from_transforms(transforms)
desired_matrix = matrix_from_transform('rotation', [100, 100, 30])
self.assertIsNone(np.testing.assert_equal(actual_matrix, desired_matrix))
class TestMatrixFromTransform(unittest.TestCase):
# All numerical values in desired_matrix calculated manually
def test_translation(self):
# Translation of 15 horizontally, 10 vertically
desired_matrix = np.eye(3)
transform = 'translation'
values = [15, 10]
desired_matrix[0, 2] = 15
desired_matrix[1, 2] = 10
self.assertIsNone(np.testing.assert_allclose(desired_matrix, matrix_from_transform(transform, values)))
def test_rotation(self):
# Rotation of 30 degrees counter-clockwise
desired_matrix = np.eye(3)
transform = 'rotation'
values = [0, 0, 30]
desired_matrix[:2, :2] = [[math.sqrt(3) / 2, .5], [-.5, math.sqrt(3) / 2]]
self.assertIsNone(np.testing.assert_allclose(desired_matrix, matrix_from_transform(transform, values)))
# Rotation of 45 degrees clockwise
desired_matrix = np.eye(3)
transform = 'rotation'
values = [0, 0, -45]
desired_matrix[:2, :2] = [[1 / math.sqrt(2), -1 / math.sqrt(2)], [1 / math.sqrt(2), 1 / math.sqrt(2)]]
self.assertIsNone(np.testing.assert_allclose(desired_matrix, matrix_from_transform(transform, values)))
def test_rotation_with_shift(self):
# Rotation of 30 degrees clockwise around point [10, 50] (hor, ver)
desired_matrix = np.eye(3)
transform = 'rotation'
values = [10, 50, -30]
desired_matrix[:2, :2] = [[math.sqrt(3) / 2, -.5], [.5, math.sqrt(3) / 2]]
desired_matrix[0, 2] = 26.3397459622
desired_matrix[1, 2] = 1.69872981078
self.assertIsNone(np.testing.assert_allclose(desired_matrix, matrix_from_transform(transform, values)))
# Rotation of 45 degrees counter-clockwise around point [-20, -30] (hor, ver)
desired_matrix = np.eye(3)
transform = 'rotation'
values = [-20, -30, 45]
desired_matrix[:2, :2] = [[1 / math.sqrt(2), 1 / math.sqrt(2)], [-1 / math.sqrt(2), 1 / math.sqrt(2)]]
desired_matrix[0, 2] = 15.3553390593
desired_matrix[1, 2] = -22.9289321881
self.assertIsNone(np.testing.assert_allclose(desired_matrix, matrix_from_transform(transform, values)))
def test_scaling(self):
# Scaling factor 0.8
desired_matrix = np.eye(3)
transform = 'scaling'
values = [0, 0, 0.8]
desired_matrix[0, 0] = 0.8
desired_matrix[1, 1] = 0.8
self.assertIsNone(np.testing.assert_allclose(desired_matrix, matrix_from_transform(transform, values)))
# Scaling factor 2
desired_matrix = np.eye(3)
transform = 'scaling'
values = [0, 0, 2]
desired_matrix[0, 0] = 2
desired_matrix[1, 1] = 2
self.assertIsNone(np.testing.assert_allclose(desired_matrix, matrix_from_transform(transform, values)))
def test_scaling_with_shift(self):
# Scaling factor 0.8 around point [10, 50] (hor, ver)
desired_matrix = np.eye(3)
transform = 'scaling'
values = [10, 50, 0.8]
desired_matrix[0, 0] = 0.8
desired_matrix[1, 1] = 0.8
desired_matrix[0, 2] = 2
desired_matrix[1, 2] = 10
self.assertIsNone(np.testing.assert_allclose(desired_matrix, matrix_from_transform(transform, values)))
# Scaling factor 2 around point [20, 30] (hor, ver)
desired_matrix = np.eye(3)
transform = 'scaling'
values = [20, 30, 2]
desired_matrix[0, 0] = 2
desired_matrix[1, 1] = 2
desired_matrix[0, 2] = -20
desired_matrix[1, 2] = -30
self.assertIsNone(np.testing.assert_allclose(desired_matrix, matrix_from_transform(transform, values)))
class TestFlowFromMatrix(unittest.TestCase):
# All numerical values in calculated manually and independently
def test_identity(self):
# No transformation, equals passing identity matrix, to 200 by 300 flow field
shape = [200, 300]
matrix = np.eye(3)
flow = flow_from_matrix(matrix, shape)
self.assertIsNone(np.testing.assert_equal(flow, 0))
self.assertIsNone(np.testing.assert_equal(flow.shape[:2], shape))
def test_translation(self):
# Translation of 10 horizontally, 20 vertically, to 200 by 300 flow field
shape = [200, 300]
matrix = np.array([[1, 0, 10], [0, 1, 20], [0, 0, 1]])
flow = flow_from_matrix(matrix, shape)
self.assertIsNone(np.testing.assert_equal(flow[..., 0], 10))
self.assertIsNone(np.testing.assert_equal(flow[..., 1], 20))
self.assertIsNone(np.testing.assert_equal(flow.shape[:2], shape))
def test_rotation(self):
# Rotation of 30 degrees counter-clockwise, to 200 by 300 flow field
shape = [200, 300]
matrix = np.array([[math.sqrt(3) / 2, .5, 0], [-.5, math.sqrt(3) / 2, 0], [0, 0, 1]])
flow = flow_from_matrix(matrix, shape)
self.assertIsNone(np.testing.assert_equal(flow[0, 0], [0, 0]))
self.assertIsNone(np.testing.assert_allclose(flow[0, 299], [-40.0584042685, -149.5]))
self.assertIsNone(np.testing.assert_allclose(flow[199, 0], [99.5, -26.6609446469]))
self.assertIsNone(np.testing.assert_equal(flow.shape[:2], shape))
def test_rotation_with_shift(self):
# Rotation of 30 degrees clockwise around point [10, 50] (hor, ver), to 200 by 300 flow field
shape = [200, 300]
matrix = np.array([[math.sqrt(3) / 2, -.5, 26.3397459622],
[.5, math.sqrt(3) / 2, 1.69872981078],
[0, 0, 1]])
flow = flow_from_matrix(matrix, shape)
self.assertIsNone(np.testing.assert_array_almost_equal(flow[50, 10], [0, 0]))
self.assertIsNone(np.testing.assert_allclose(flow[50, 299], [-38.7186583063, 144.5]))
self.assertIsNone(np.testing.assert_allclose(flow[199, 10], [-74.5, -19.9622148361]))
self.assertIsNone(np.testing.assert_equal(flow.shape[:2], shape))
def test_scaling(self):
# Scaling factor 0.8, to 200 by 300 flow field
shape = [200, 300]
matrix = np.array([[.8, 0, 0], [0, .8, 0], [0, 0, 1]])
flow = flow_from_matrix(matrix, shape)
self.assertIsNone(np.testing.assert_equal(flow[0, 0], [0, 0]))
self.assertIsNone(np.testing.assert_allclose(flow[0, 100], [-20, 0]))
self.assertIsNone(np.testing.assert_allclose(flow[100, 0], [0, -20]))
self.assertIsNone(np.testing.assert_equal(flow.shape[:2], shape))
def test_scaling_with_shift(self):
# Scaling factor 2 around point [20, 30] (hor, ver), to 200 by 300 flow field
shape = [200, 300]
matrix = np.array([[2, 0, -20], [0, 2, -30], [0, 0, 1]])
flow = flow_from_matrix(matrix, shape)
self.assertIsNone(np.testing.assert_array_almost_equal(flow[30, 20], [0, 0]))
self.assertIsNone(np.testing.assert_allclose(flow[30, 70], [50, 0]))
self.assertIsNone(np.testing.assert_allclose(flow[80, 20], [0, 50]))
self.assertIsNone(np.testing.assert_equal(flow.shape[:2], shape))
class TestBilinearInterpolation(unittest.TestCase):
def test_int_on_shift(self):
flow = flow_from_matrix(matrix_from_transform('translation', [10, 20]), (512, 512))
pts = np.array([[200, 300], [1, 2]])
desired_result = np.array([[20, 10], [20, 10]])
actual_result = bilinear_interpolation(flow[..., ::-1], pts)
self.assertIsNone(np.testing.assert_equal(actual_result, desired_result))
def test_float_on_rot(self):
flow = flow_from_matrix(matrix_from_transform('rotation', [0, 0, 30]), (512, 512))
pts = np.array([[20.5, 10.5], [8.3, 7.2], [120.4, 160.2]])
desired_pts = [
[12.5035207776, 19.343266740],
[3.58801085141, 10.385382907],
[24.1694586156, 198.93726969]
]
desired_result = np.array(desired_pts) - pts
actual_result = bilinear_interpolation(flow[..., ::-1], pts)
self.assertIsNone(np.testing.assert_allclose(actual_result, desired_result,
atol=1e-1, rtol=1e-2))
# High tolerance needed as exact result is compared to an interpolated one
def test_invalid_input(self):
flow = flow_from_matrix(matrix_from_transform('rotation', [0, 0, 30]), (512, 512))
with self.assertRaises(IndexError):
bilinear_interpolation(flow, np.array([[-1, 0], [10, 10]]))
with self.assertRaises(IndexError):
bilinear_interpolation(flow, np.array([[0, 0], [511.01, 10]]))
class TestApplyFlow(unittest.TestCase):
def test_rotation(self):
img = cv2.imread('smudge.png', 0)
for ref in ['t', 's']:
flow = Flow.from_transforms([['rotation', 255.5, 255.5, -30]], img.shape[:2], ref).vecs
control_img = rotate(img, -30, reshape=False)
warped_img = apply_flow(flow, img, ref)
self.assertIsNone(np.testing.assert_allclose(control_img[200:300, 200:300], warped_img[200:300, 200:300],
atol=20, rtol=0.05))
# Note: using SSIM would be a better measure of similarity here, but wanted to avoid extra dependency
def test_translation(self):
img = cv2.imread('smudge.png')
for ref in ['t', 's']:
flow = Flow.from_transforms([['translation', 10, 20]], img.shape[:2], ref).vecs
control_img = shift(img, [20, 10, 0])
warped_img = apply_flow(flow, img, ref)
self.assertIsNone(np.testing.assert_equal(warped_img, control_img))
def test_failed_apply(self):
# Test failure cases
img = cv2.imread('smudge.png')
flow = Flow.from_transforms([['translation', 10, 20]], img.shape[:2], 't').vecs
with self.assertRaises(TypeError): # Target wrong type
apply_flow(flow, 2, 't')
with self.assertRaises(ValueError): # Target ndim smaller than 2
apply_flow(flow, img[0, :, 0], 't')
with self.assertRaises(ValueError): # Target ndim larger than 2
apply_flow(flow, img[..., np.newaxis], 't')
with self.assertRaises(ValueError): # Target shape does not match flow
apply_flow(flow, img[:10], 't')
with self.assertRaises(TypeError): # Mask wrong type
apply_flow(flow, img, 't', mask=0)
with self.assertRaises(TypeError): # Mask values wrong type
apply_flow(flow, img, 't', mask=img[..., 0])
with self.assertRaises(ValueError): # Mask wrong shape
apply_flow(flow, img, 't', mask=img)
class TestPointsInsideArea(unittest.TestCase):
def test_points(self):
shape = [10, 20]
pts = np.array([
[-1, -1],
[-1, 0],
[0, -1],
[0, 0],
[9, 20],
[9, 19],
[10, 19],
[10, 20]
])
desired_array = [False, False, False, True, False, True, False, False]
self.assertIsNone(np.testing.assert_equal(points_inside_area(pts, shape), desired_array))
class TestThresholdVectors(unittest.TestCase):
def test_threshold(self):
vecs = np.zeros((10, 1, 2))
vecs[0, 0, 0] = -1e-5
vecs[1, 0, 0] = 1e-4
vecs[2, 0, 0] = -1e-3
vecs[3, 0, 0] = 1
for use_mag in [True, False]:
thresholded = threshold_vectors(vecs, threshold=1e-3, use_mag=use_mag)
self.assertIsNone(np.testing.assert_equal(thresholded[:4, 0, 0], [0, 0, -1e-3, 1]))
thresholded = threshold_vectors(vecs, threshold=1e-4, use_mag=use_mag)
self.assertIsNone(np.testing.assert_equal(thresholded[:4, 0, 0], [0, 1e-4, -1e-3, 1]))
thresholded = threshold_vectors(vecs, threshold=1e-5, use_mag=use_mag)
self.assertIsNone(np.testing.assert_equal(thresholded[:4, 0, 0], [-1e-5, 1e-4, -1e-3, 1]))
class TestFromMatrix(unittest.TestCase):
def test_from_matrix(self):
# With reference 's', this simply corresponds to using flow_from_matrix, tested in test_utils.
# With reference 't':
# Rotation of 30 degrees clockwise around point [10, 50] (hor, ver)
matrix = np.array([[math.sqrt(3) / 2, -.5, 26.3397459622],
[.5, math.sqrt(3) / 2, 1.69872981078],
[0, 0, 1]])
shape = [200, 300]
flow = from_matrix(matrix, shape, 't')
self.assertIsNone(np.testing.assert_array_almost_equal(flow[50, 10], [0, 0]))
self.assertIsNone(np.testing.assert_allclose(flow[50, 299], [38.7186583063, 144.5]))
self.assertIsNone(np.testing.assert_allclose(flow[199, 10], [-74.5, 19.9622148361]))
self.assertIsNone(np.testing.assert_equal(flow.shape[:2], shape))
def test_failed_from_matrix(self):
with self.assertRaises(TypeError): # Invalid matrix type
from_matrix('test', [10, 10], 't')
with self.assertRaises(ValueError): # Invalid matrix shape
from_matrix(np.eye(4), [10, 10], 't')
class TestFromTransforms(unittest.TestCase):
def test_from_transforms_rotation(self):
shape = [200, 300]
transforms = [['rotation', 10, 50, -30]]
flow = from_transforms(transforms, shape, 't')
self.assertIsNone(np.testing.assert_array_almost_equal(flow[50, 10], [0, 0]))
self.assertIsNone(np.testing.assert_allclose(flow[50, 299], [38.7186583063, 144.5]))
self.assertIsNone(np.testing.assert_allclose(flow[199, 10], [-74.5, 19.9622148361]))
self.assertIsNone(np.testing.assert_equal(flow.shape[:2], shape))
def test_from_transforms_scaling(self):
shape = [200, 300]
transforms = [['scaling', 20, 30, 2]]
flow = from_transforms(transforms, shape, 's')
self.assertIsNone(np.testing.assert_array_almost_equal(flow[30, 20], [0, 0]))
self.assertIsNone(np.testing.assert_allclose(flow[30, 70], [50, 0]))
self.assertIsNone(np.testing.assert_allclose(flow[80, 20], [0, 50]))
self.assertIsNone(np.testing.assert_equal(flow.shape[:2], shape))
def test_from_transforms_multiple_s(self):
shape = [200, 300]
transforms = [
['translation', -20, -30],
['scaling', 0, 0, 2],
['translation', 20, 30]
]
flow = from_transforms(transforms, shape, 's')
self.assertIsNone(np.testing.assert_array_almost_equal(flow[30, 20], [0, 0]))
self.assertIsNone(np.testing.assert_allclose(flow[30, 70], [50, 0]))
self.assertIsNone(np.testing.assert_allclose(flow[80, 20], [0, 50]))
self.assertIsNone(np.testing.assert_equal(flow.shape[:2], shape))
def test_from_transforms_multiple_t(self):
shape = [200, 300]
transforms = [
['translation', -10, -50],
['rotation', 0, 0, -30],
['translation', 10, 50]
]
flow = from_transforms(transforms, shape, 't')
self.assertIsNone(np.testing.assert_array_almost_equal(flow[50, 10], [0, 0]))
self.assertIsNone(np.testing.assert_allclose(flow[50, 299], [38.7186583063, 144.5]))
self.assertIsNone(np.testing.assert_allclose(flow[199, 10], [-74.5, 19.9622148361]))
self.assertIsNone(np.testing.assert_equal(flow.shape[:2], shape))
def test_failed_from_transforms(self):
shape = [200, 300]
transforms = 'test'
with self.assertRaises(TypeError): # transforms not a list
from_transforms(transforms, shape, 't')
transforms = ['test']
with self.assertRaises(TypeError): # transform not a list
from_transforms(transforms, shape, 't')
transforms = [['translation', 20, 10], ['rotation']]
with self.assertRaises(ValueError): # transform missing information
from_transforms(transforms, shape, 't')
transforms = [['translation', 20, 10], ['rotation', 1]]
with self.assertRaises(ValueError): # transform with incomplete information
from_transforms(transforms, shape, 't')
transforms = [['translation', 20, 10], ['rotation', 1, 'test', 10]]
with self.assertRaises(ValueError): # transform with invalid information
from_transforms(transforms, shape, 't')
transforms = [['translation', 20, 10], ['test', 1, 1, 10]]
with self.assertRaises(ValueError): # transform type invalid
from_transforms(transforms, shape, 't')
class TestFromKITTI(unittest.TestCase):
def test_load(self):
output = load_kitti('kitti.png')
self.assertIsInstance(output, np.ndarray)
desired_flow = np.arange(0, 10)[:, np.newaxis] * np.arange(0, 20)[np.newaxis, :]
self.assertIsNone(np.testing.assert_equal(output[..., 0], desired_flow))
self.assertIsNone(np.testing.assert_equal(output[..., 1], 0))
self.assertIsNone(np.testing.assert_equal(output[:, 0, 2], 1))
self.assertIsNone(np.testing.assert_equal(output[:, 10, 2], 0))
def test_failed_load(self):
with self.assertRaises(ValueError): # Wrong path
load_kitti('test')
with self.assertRaises(ValueError): # Wrong flow shape
load_kitti('kitti_wrong.png')
class TestFromSintel(unittest.TestCase):
def test_load_flow(self):
f = load_sintel('sintel.flo')
self.assertIsInstance(f, np.ndarray)
desired_flow = np.arange(0, 10)[:, np.newaxis] * np.arange(0, 20)[np.newaxis, :]
self.assertIsNone(np.testing.assert_equal(f[..., 0], desired_flow))
self.assertIsNone(np.testing.assert_equal(f[..., 1], 0))
def test_failed_load_flow(self):
with self.assertRaises(TypeError): # Path not a string
load_sintel(0)
with self.assertRaises(ValueError): # Wrong tag
load_sintel('sintel_wrong.flo')
def test_load_mask(self):
m = load_sintel_mask('sintel_invalid.png')
self.assertIsInstance(m, np.ndarray)
self.assertIsNone(np.testing.assert_equal(m[:, 0], True))
self.assertIsNone(np.testing.assert_equal(m[:, 10], False))
def test_failed_load_mask(self):
with self.assertRaises(TypeError): # Path not a string
load_sintel_mask(0)
with self.assertRaises(ValueError): # File does not exist
load_sintel_mask('test.png')
class TestResizeFlow(unittest.TestCase):
def test_resize(self):
shape = [20, 10]
ref = 's'
flow = Flow.from_transforms([['rotation', 30, 50, 30]], shape, ref).vecs
# Different scales
scales = [.2, .5, 1, 1.5, 2, 10]
for scale in scales:
resized_flow = resize_flow(flow, scale)
resized_shape = scale * np.array(shape)
self.assertIsNone(np.testing.assert_equal(resized_flow.shape[:2], resized_shape))
self.assertIsNone(np.testing.assert_allclose(resized_flow[0, 0], flow[0, 0] * scale, rtol=.1))
# Scale list
scale = [.5, 2]
resized_flow = resize_flow(flow, scale)
resized_shape = np.array(scale) * np.array(shape)
self.assertIsNone(np.testing.assert_equal(resized_flow.shape[:2], resized_shape))
self.assertIsNone(np.testing.assert_allclose(resized_flow[0, 0], flow[0, 0] * np.array(scale)[::-1], rtol=.1))
# Scale tuple
scale = (2, .5)
resized_flow = resize_flow(flow, scale)
resized_shape = np.array(scale) * np.array(shape)
self.assertIsNone(np.testing.assert_equal(resized_flow.shape[:2], resized_shape))
self.assertIsNone(np.testing.assert_allclose(resized_flow[0, 0], flow[0, 0] * np.array(scale)[::-1], rtol=.1))
def test_resize_on_fields(self):
# Check scaling is performed correctly based on the actual flow field
ref = 't'
flow_small = Flow.from_transforms([['rotation', 0, 0, 30]], (50, 80), ref).vecs
flow_large = Flow.from_transforms([['rotation', 0, 0, 30]], (150, 240), ref).vecs
flow_resized = resize_flow(flow_large, 1 / 3)
self.assertIsNone(np.testing.assert_allclose(flow_resized, flow_small, atol=1, rtol=.1))
def test_failed_resize(self):
flow = Flow.from_transforms([['rotation', 30, 50, 30]], [20, 10], 's').vecs
with self.assertRaises(TypeError): # Wrong shape type
resize_flow(flow, 'test')
with self.assertRaises(ValueError): # Wrong shape values
resize_flow(flow, ['test', 0])
with self.assertRaises(ValueError): # Wrong shape shape
resize_flow(flow, [1, 2, 3])
with self.assertRaises(ValueError): # Shape is 0
resize_flow(flow, 0)
with self.assertRaises(ValueError): # Shape below 0
resize_flow(flow, -0.1)
class TestIsZeroFlow(unittest.TestCase):
def test_is_zero_flow(self):
flow = np.zeros((10, 10, 2), 'float32')
self.assertEqual(is_zero_flow(flow, thresholded=True), True)
self.assertEqual(is_zero_flow(flow, thresholded=False), True)
flow[:3, :, 0] = 1e-4
self.assertEqual(is_zero_flow(flow, thresholded=True), True)
self.assertEqual(is_zero_flow(flow, thresholded=False), False)
flow[:3, :, 1] = -1e-3
self.assertEqual(is_zero_flow(flow, thresholded=True), False)
self.assertEqual(is_zero_flow(flow, thresholded=False), False)
flow[0, 0] = 10
self.assertEqual(is_zero_flow(flow, thresholded=True), False)
self.assertEqual(is_zero_flow(flow, thresholded=False), False)
def test_failed_is_zero_flow(self):
with self.assertRaises(TypeError): # Wrong thresholded type
is_zero_flow(np.zeros((10, 10, 2)), 'test')
class TestTrackPts(unittest.TestCase):
def test(self):
f_s = Flow.from_transforms([['rotation', 0, 0, 30]], (512, 512), 's').vecs
f_t = Flow.from_transforms([['rotation', 0, 0, 30]], (512, 512), 't').vecs
pts = np.array([[20.5, 10.5], [8.3, 7.2], [120.4, 160.2]])
desired_pts = [
[12.5035207776, 19.343266740],
[3.58801085141, 10.385382907],
[24.1694586156, 198.93726969]
]
pts_tracked_s = track_pts(f_s, 's', pts)
self.assertIsNone(np.testing.assert_allclose(pts_tracked_s, desired_pts,
atol=1e-1, rtol=1e-2))
# High tolerance needed as exact result is compared to an interpolated one
pts_tracked_s = track_pts(f_s, 's', pts, s_exact_mode=True)
self.assertIsNone(np.testing.assert_allclose(pts_tracked_s, desired_pts))
pts_tracked_t = track_pts(f_t, 't', pts)
self.assertIsNone(np.testing.assert_allclose(pts_tracked_t, desired_pts,
atol=1e-6, rtol=1e-6))
pts_tracked_t = track_pts(f_t, 't', pts, int_out=True)
self.assertIsNone(np.testing.assert_equal(pts_tracked_t, np.round(desired_pts)))
self.assertEqual(pts_tracked_t.dtype, int)
# Test tracking for 's' flow and int pts (checked via debugger)
f = Flow.from_transforms([['translation', 10, 20]], (512, 512), 's').vecs
pts = np.array([[20, 10], [8, 7]])
desired_pts = [[40, 20], [28, 17]]
pts_tracked_s = track_pts(f, 's', pts)
self.assertIsNone(np.testing.assert_equal(pts_tracked_s, desired_pts))
def test_failed_track_pts(self):
pts = np.array([[20, 10], [20, 10], [8, 7]])
flow = np.zeros((10, 10, 2))
with self.assertRaises(TypeError): # Wrong pts type
track_pts(flow, 's', pts='test')
with self.assertRaises(ValueError): # Wrong pts shape
track_pts(flow, 's', pts=np.zeros((10, 10, 2)))
with self.assertRaises(ValueError): # Pts channel not of size 2
track_pts(flow, 's', pts=pts.transpose())
with self.assertRaises(TypeError): # Wrong int_out type
track_pts(flow, 's', pts, int_out='test')
with self.assertRaises(TypeError): # Wrong s_exact_mode type
track_pts(flow, 's', pts, True, s_exact_mode='test')
if __name__ == '__main__':
unittest.main()
| [
"src.oflibnumpy.utils.is_zero_flow",
"numpy.testing.assert_equal",
"src.oflibnumpy.utils.flow_from_matrix",
"src.oflibnumpy.utils.apply_flow",
"src.oflibnumpy.utils.threshold_vectors",
"math.sqrt",
"src.oflibnumpy.utils.bilinear_interpolation",
"numpy.array",
"src.oflibnumpy.utils.matrix_from_transf... | [((664, 685), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (679, 685), False, 'import sys\n'), ((27748, 27763), 'unittest.main', 'unittest.main', ([], {}), '()\n', (27761, 27763), False, 'import unittest\n'), ((2297, 2318), 'numpy.zeros', 'np.zeros', (['(10, 10, 2)'], {}), '((10, 10, 2))\n', (2305, 2318), True, 'import numpy as np\n'), ((3385, 3419), 'src.oflibnumpy.utils.matrix_from_transforms', 'matrix_from_transforms', (['transforms'], {}), '(transforms)\n', (3407, 3419), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((3445, 3494), 'src.oflibnumpy.utils.matrix_from_transform', 'matrix_from_transform', (['"""rotation"""', '[100, 100, 30]'], {}), "('rotation', [100, 100, 30])\n", (3466, 3494), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((3807, 3816), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (3813, 3816), True, 'import numpy as np\n'), ((4163, 4172), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4169, 4172), True, 'import numpy as np\n'), ((4496, 4505), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4502, 4505), True, 'import numpy as np\n'), ((4931, 4940), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4937, 4940), True, 'import numpy as np\n'), ((5400, 5409), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (5406, 5409), True, 'import numpy as np\n'), ((5870, 5879), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (5876, 5879), True, 'import numpy as np\n'), ((6174, 6183), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (6180, 6183), True, 'import numpy as np\n'), ((6546, 6555), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (6552, 6555), True, 'import numpy as np\n'), ((6952, 6961), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (6958, 6961), True, 'import numpy as np\n'), ((7543, 7552), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (7549, 7552), True, 'import numpy as np\n'), ((7568, 7599), 'src.oflibnumpy.utils.flow_from_matrix', 'flow_from_matrix', (['matrix', 'shape'], {}), '(matrix, shape)\n', (7584, 7599), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((7893, 7938), 'numpy.array', 'np.array', (['[[1, 0, 10], [0, 1, 20], [0, 0, 1]]'], {}), '([[1, 0, 10], [0, 1, 20], [0, 0, 1]])\n', (7901, 7938), True, 'import numpy as np\n'), ((7954, 7985), 'src.oflibnumpy.utils.flow_from_matrix', 'flow_from_matrix', (['matrix', 'shape'], {}), '(matrix, shape)\n', (7970, 7985), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((8441, 8472), 'src.oflibnumpy.utils.flow_from_matrix', 'flow_from_matrix', (['matrix', 'shape'], {}), '(matrix, shape)\n', (8457, 8472), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((9161, 9192), 'src.oflibnumpy.utils.flow_from_matrix', 'flow_from_matrix', (['matrix', 'shape'], {}), '(matrix, shape)\n', (9177, 9192), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((9669, 9716), 'numpy.array', 'np.array', (['[[0.8, 0, 0], [0, 0.8, 0], [0, 0, 1]]'], {}), '([[0.8, 0, 0], [0, 0.8, 0], [0, 0, 1]])\n', (9677, 9716), True, 'import numpy as np\n'), ((9730, 9761), 'src.oflibnumpy.utils.flow_from_matrix', 'flow_from_matrix', (['matrix', 'shape'], {}), '(matrix, shape)\n', (9746, 9761), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((10233, 10280), 'numpy.array', 'np.array', (['[[2, 0, -20], [0, 2, -30], [0, 0, 1]]'], {}), '([[2, 0, -20], [0, 2, -30], [0, 0, 1]])\n', (10241, 10280), True, 'import numpy as np\n'), ((10296, 10327), 'src.oflibnumpy.utils.flow_from_matrix', 'flow_from_matrix', (['matrix', 'shape'], {}), '(matrix, shape)\n', (10312, 10327), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((10835, 10865), 'numpy.array', 'np.array', (['[[200, 300], [1, 2]]'], {}), '([[200, 300], [1, 2]])\n', (10843, 10865), True, 'import numpy as np\n'), ((10891, 10921), 'numpy.array', 'np.array', (['[[20, 10], [20, 10]]'], {}), '([[20, 10], [20, 10]])\n', (10899, 10921), True, 'import numpy as np\n'), ((10946, 10990), 'src.oflibnumpy.utils.bilinear_interpolation', 'bilinear_interpolation', (['flow[..., ::-1]', 'pts'], {}), '(flow[..., ::-1], pts)\n', (10968, 10990), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((11212, 11264), 'numpy.array', 'np.array', (['[[20.5, 10.5], [8.3, 7.2], [120.4, 160.2]]'], {}), '([[20.5, 10.5], [8.3, 7.2], [120.4, 160.2]])\n', (11220, 11264), True, 'import numpy as np\n'), ((11504, 11548), 'src.oflibnumpy.utils.bilinear_interpolation', 'bilinear_interpolation', (['flow[..., ::-1]', 'pts'], {}), '(flow[..., ::-1], pts)\n', (11526, 11548), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((12238, 12265), 'cv2.imread', 'cv2.imread', (['"""smudge.png"""', '(0)'], {}), "('smudge.png', 0)\n", (12248, 12265), False, 'import cv2\n'), ((12864, 12888), 'cv2.imread', 'cv2.imread', (['"""smudge.png"""'], {}), "('smudge.png')\n", (12874, 12888), False, 'import cv2\n'), ((13271, 13295), 'cv2.imread', 'cv2.imread', (['"""smudge.png"""'], {}), "('smudge.png')\n", (13281, 13295), False, 'import cv2\n'), ((14323, 14412), 'numpy.array', 'np.array', (['[[-1, -1], [-1, 0], [0, -1], [0, 0], [9, 20], [9, 19], [10, 19], [10, 20]]'], {}), '([[-1, -1], [-1, 0], [0, -1], [0, 0], [9, 20], [9, 19], [10, 19], [\n 10, 20]])\n', (14331, 14412), True, 'import numpy as np\n'), ((14785, 14805), 'numpy.zeros', 'np.zeros', (['(10, 1, 2)'], {}), '((10, 1, 2))\n', (14793, 14805), True, 'import numpy as np\n'), ((16004, 16035), 'src.oflibnumpy.utils.from_matrix', 'from_matrix', (['matrix', 'shape', '"""t"""'], {}), "(matrix, shape, 't')\n", (16015, 16035), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((16836, 16875), 'src.oflibnumpy.utils.from_transforms', 'from_transforms', (['transforms', 'shape', '"""t"""'], {}), "(transforms, shape, 't')\n", (16851, 16875), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((17355, 17394), 'src.oflibnumpy.utils.from_transforms', 'from_transforms', (['transforms', 'shape', '"""s"""'], {}), "(transforms, shape, 's')\n", (17370, 17394), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((17941, 17980), 'src.oflibnumpy.utils.from_transforms', 'from_transforms', (['transforms', 'shape', '"""s"""'], {}), "(transforms, shape, 's')\n", (17956, 17980), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((18530, 18569), 'src.oflibnumpy.utils.from_transforms', 'from_transforms', (['transforms', 'shape', '"""t"""'], {}), "(transforms, shape, 't')\n", (18545, 18569), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((20158, 20181), 'src.oflibnumpy.utils.load_kitti', 'load_kitti', (['"""kitti.png"""'], {}), "('kitti.png')\n", (20168, 20181), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((20928, 20953), 'src.oflibnumpy.utils.load_sintel', 'load_sintel', (['"""sintel.flo"""'], {}), "('sintel.flo')\n", (20939, 20953), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((21502, 21540), 'src.oflibnumpy.utils.load_sintel_mask', 'load_sintel_mask', (['"""sintel_invalid.png"""'], {}), "('sintel_invalid.png')\n", (21518, 21540), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((22627, 22651), 'src.oflibnumpy.utils.resize_flow', 'resize_flow', (['flow', 'scale'], {}), '(flow, scale)\n', (22638, 22651), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((22989, 23013), 'src.oflibnumpy.utils.resize_flow', 'resize_flow', (['flow', 'scale'], {}), '(flow, scale)\n', (23000, 23013), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((23616, 23646), 'src.oflibnumpy.utils.resize_flow', 'resize_flow', (['flow_large', '(1 / 3)'], {}), '(flow_large, 1 / 3)\n', (23627, 23646), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((24458, 24490), 'numpy.zeros', 'np.zeros', (['(10, 10, 2)', '"""float32"""'], {}), "((10, 10, 2), 'float32')\n", (24466, 24490), True, 'import numpy as np\n'), ((25547, 25599), 'numpy.array', 'np.array', (['[[20.5, 10.5], [8.3, 7.2], [120.4, 160.2]]'], {}), '([[20.5, 10.5], [8.3, 7.2], [120.4, 160.2]])\n', (25555, 25599), True, 'import numpy as np\n'), ((25786, 25810), 'src.oflibnumpy.utils.track_pts', 'track_pts', (['f_s', '"""s"""', 'pts'], {}), "(f_s, 's', pts)\n", (25795, 25810), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((26075, 26118), 'src.oflibnumpy.utils.track_pts', 'track_pts', (['f_s', '"""s"""', 'pts'], {'s_exact_mode': '(True)'}), "(f_s, 's', pts, s_exact_mode=True)\n", (26084, 26118), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((26225, 26249), 'src.oflibnumpy.utils.track_pts', 'track_pts', (['f_t', '"""t"""', 'pts'], {}), "(f_t, 't', pts)\n", (26234, 26249), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((26431, 26469), 'src.oflibnumpy.utils.track_pts', 'track_pts', (['f_t', '"""t"""', 'pts'], {'int_out': '(True)'}), "(f_t, 't', pts, int_out=True)\n", (26440, 26469), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((26779, 26807), 'numpy.array', 'np.array', (['[[20, 10], [8, 7]]'], {}), '([[20, 10], [8, 7]])\n', (26787, 26807), True, 'import numpy as np\n'), ((26875, 26897), 'src.oflibnumpy.utils.track_pts', 'track_pts', (['f', '"""s"""', 'pts'], {}), "(f, 's', pts)\n", (26884, 26897), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((27029, 27067), 'numpy.array', 'np.array', (['[[20, 10], [20, 10], [8, 7]]'], {}), '([[20, 10], [20, 10], [8, 7]])\n', (27037, 27067), True, 'import numpy as np\n'), ((27083, 27104), 'numpy.zeros', 'np.zeros', (['(10, 10, 2)'], {}), '((10, 10, 2))\n', (27091, 27104), True, 'import numpy as np\n'), ((1207, 1226), 'src.oflibnumpy.utils.get_valid_ref', 'get_valid_ref', (['None'], {}), '(None)\n', (1220, 1226), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((1258, 1276), 'src.oflibnumpy.utils.get_valid_ref', 'get_valid_ref', (['"""s"""'], {}), "('s')\n", (1271, 1276), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((1308, 1326), 'src.oflibnumpy.utils.get_valid_ref', 'get_valid_ref', (['"""t"""'], {}), "('t')\n", (1321, 1326), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((1388, 1404), 'src.oflibnumpy.utils.get_valid_ref', 'get_valid_ref', (['(0)'], {}), '(0)\n', (1401, 1404), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((1461, 1482), 'src.oflibnumpy.utils.get_valid_ref', 'get_valid_ref', (['"""test"""'], {}), "('test')\n", (1474, 1482), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((1577, 1599), 'src.oflibnumpy.utils.get_valid_padding', 'get_valid_padding', (['(100)'], {}), '(100)\n', (1594, 1599), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((1656, 1695), 'src.oflibnumpy.utils.get_valid_padding', 'get_valid_padding', (['[10, 20, 30, 40, 50]'], {}), '([10, 20, 30, 40, 50])\n', (1673, 1695), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((1752, 1789), 'src.oflibnumpy.utils.get_valid_padding', 'get_valid_padding', (['[10.0, 20, 30, 40]'], {}), '([10.0, 20, 30, 40])\n', (1769, 1789), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((1845, 1881), 'src.oflibnumpy.utils.get_valid_padding', 'get_valid_padding', (['[-10, 10, 10, 10]'], {}), '([-10, 10, 10, 10])\n', (1862, 1881), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((1973, 1995), 'src.oflibnumpy.utils.validate_shape', 'validate_shape', (['"""test"""'], {}), "('test')\n", (1987, 1995), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((2052, 2080), 'src.oflibnumpy.utils.validate_shape', 'validate_shape', (['[10, 10, 10]'], {}), '([10, 10, 10])\n', (2066, 2080), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((2137, 2161), 'src.oflibnumpy.utils.validate_shape', 'validate_shape', (['[-1, 10]'], {}), '([-1, 10])\n', (2151, 2161), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((2218, 2244), 'src.oflibnumpy.utils.validate_shape', 'validate_shape', (['[10.0, 10]'], {}), '([10.0, 10])\n', (2232, 2244), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((2454, 2481), 'src.oflibnumpy.utils.validate_flow_array', 'validate_flow_array', (['"""test"""'], {}), "('test')\n", (2473, 2481), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((2809, 2830), 'numpy.zeros', 'np.zeros', (['(10, 10, 2)'], {}), '((10, 10, 2))\n', (2817, 2830), True, 'import numpy as np\n'), ((2872, 2894), 'src.oflibnumpy.utils.validate_flow_array', 'validate_flow_array', (['f'], {}), '(f)\n', (2891, 2894), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((2969, 2990), 'numpy.zeros', 'np.zeros', (['(10, 10, 2)'], {}), '((10, 10, 2))\n', (2977, 2990), True, 'import numpy as np\n'), ((3032, 3054), 'src.oflibnumpy.utils.validate_flow_array', 'validate_flow_array', (['f'], {}), '(f)\n', (3051, 3054), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((3521, 3575), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['actual_matrix', 'desired_matrix'], {}), '(actual_matrix, desired_matrix)\n', (3544, 3575), True, 'import numpy as np\n'), ((7626, 7658), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['flow', '(0)'], {}), '(flow, 0)\n', (7649, 7658), True, 'import numpy as np\n'), ((7686, 7732), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['flow.shape[:2]', 'shape'], {}), '(flow.shape[:2], shape)\n', (7709, 7732), True, 'import numpy as np\n'), ((8012, 8053), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['flow[..., 0]', '(10)'], {}), '(flow[..., 0], 10)\n', (8035, 8053), True, 'import numpy as np\n'), ((8081, 8122), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['flow[..., 1]', '(20)'], {}), '(flow[..., 1], 20)\n', (8104, 8122), True, 'import numpy as np\n'), ((8150, 8196), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['flow.shape[:2]', 'shape'], {}), '(flow.shape[:2], shape)\n', (8173, 8196), True, 'import numpy as np\n'), ((8499, 8542), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['flow[0, 0]', '[0, 0]'], {}), '(flow[0, 0], [0, 0])\n', (8522, 8542), True, 'import numpy as np\n'), ((8570, 8636), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['flow[0, 299]', '[-40.0584042685, -149.5]'], {}), '(flow[0, 299], [-40.0584042685, -149.5])\n', (8596, 8636), True, 'import numpy as np\n'), ((8664, 8728), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['flow[199, 0]', '[99.5, -26.6609446469]'], {}), '(flow[199, 0], [99.5, -26.6609446469])\n', (8690, 8728), True, 'import numpy as np\n'), ((8756, 8802), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['flow.shape[:2]', 'shape'], {}), '(flow.shape[:2], shape)\n', (8779, 8802), True, 'import numpy as np\n'), ((9219, 9277), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['flow[50, 10]', '[0, 0]'], {}), '(flow[50, 10], [0, 0])\n', (9255, 9277), True, 'import numpy as np\n'), ((9305, 9371), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['flow[50, 299]', '[-38.7186583063, 144.5]'], {}), '(flow[50, 299], [-38.7186583063, 144.5])\n', (9331, 9371), True, 'import numpy as np\n'), ((9399, 9465), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['flow[199, 10]', '[-74.5, -19.9622148361]'], {}), '(flow[199, 10], [-74.5, -19.9622148361])\n', (9425, 9465), True, 'import numpy as np\n'), ((9493, 9539), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['flow.shape[:2]', 'shape'], {}), '(flow.shape[:2], shape)\n', (9516, 9539), True, 'import numpy as np\n'), ((9788, 9831), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['flow[0, 0]', '[0, 0]'], {}), '(flow[0, 0], [0, 0])\n', (9811, 9831), True, 'import numpy as np\n'), ((9859, 9909), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['flow[0, 100]', '[-20, 0]'], {}), '(flow[0, 100], [-20, 0])\n', (9885, 9909), True, 'import numpy as np\n'), ((9937, 9987), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['flow[100, 0]', '[0, -20]'], {}), '(flow[100, 0], [0, -20])\n', (9963, 9987), True, 'import numpy as np\n'), ((10015, 10061), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['flow.shape[:2]', 'shape'], {}), '(flow.shape[:2], shape)\n', (10038, 10061), True, 'import numpy as np\n'), ((10354, 10412), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['flow[30, 20]', '[0, 0]'], {}), '(flow[30, 20], [0, 0])\n', (10390, 10412), True, 'import numpy as np\n'), ((10440, 10489), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['flow[30, 70]', '[50, 0]'], {}), '(flow[30, 70], [50, 0])\n', (10466, 10489), True, 'import numpy as np\n'), ((10517, 10566), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['flow[80, 20]', '[0, 50]'], {}), '(flow[80, 20], [0, 50])\n', (10543, 10566), True, 'import numpy as np\n'), ((10594, 10640), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['flow.shape[:2]', 'shape'], {}), '(flow.shape[:2], shape)\n', (10617, 10640), True, 'import numpy as np\n'), ((10761, 10807), 'src.oflibnumpy.utils.matrix_from_transform', 'matrix_from_transform', (['"""translation"""', '[10, 20]'], {}), "('translation', [10, 20])\n", (10782, 10807), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((11017, 11071), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['actual_result', 'desired_result'], {}), '(actual_result, desired_result)\n', (11040, 11071), True, 'import numpy as np\n'), ((11139, 11184), 'src.oflibnumpy.utils.matrix_from_transform', 'matrix_from_transform', (['"""rotation"""', '[0, 0, 30]'], {}), "('rotation', [0, 0, 30])\n", (11160, 11184), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((11452, 11473), 'numpy.array', 'np.array', (['desired_pts'], {}), '(desired_pts)\n', (11460, 11473), True, 'import numpy as np\n'), ((11575, 11653), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['actual_result', 'desired_result'], {'atol': '(0.1)', 'rtol': '(0.01)'}), '(actual_result, desired_result, atol=0.1, rtol=0.01)\n', (11601, 11653), True, 'import numpy as np\n'), ((11859, 11904), 'src.oflibnumpy.utils.matrix_from_transform', 'matrix_from_transform', (['"""rotation"""', '[0, 0, 30]'], {}), "('rotation', [0, 0, 30])\n", (11880, 11904), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((12423, 12454), 'scipy.ndimage.rotate', 'rotate', (['img', '(-30)'], {'reshape': '(False)'}), '(img, -30, reshape=False)\n', (12429, 12454), False, 'from scipy.ndimage import rotate, shift\n'), ((12480, 12506), 'src.oflibnumpy.utils.apply_flow', 'apply_flow', (['flow', 'img', 'ref'], {}), '(flow, img, ref)\n', (12490, 12506), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((13038, 13061), 'scipy.ndimage.shift', 'shift', (['img', '[20, 10, 0]'], {}), '(img, [20, 10, 0])\n', (13043, 13061), False, 'from scipy.ndimage import rotate, shift\n'), ((13087, 13113), 'src.oflibnumpy.utils.apply_flow', 'apply_flow', (['flow', 'img', 'ref'], {}), '(flow, img, ref)\n', (13097, 13113), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((13311, 13378), 'src.oflibnumpy.flow_class.Flow.from_transforms', 'Flow.from_transforms', (["[['translation', 10, 20]]", 'img.shape[:2]', '"""t"""'], {}), "([['translation', 10, 20]], img.shape[:2], 't')\n", (13331, 13378), False, 'from src.oflibnumpy.flow_class import Flow\n'), ((13460, 13484), 'src.oflibnumpy.utils.apply_flow', 'apply_flow', (['flow', '(2)', '"""t"""'], {}), "(flow, 2, 't')\n", (13470, 13484), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((13571, 13606), 'src.oflibnumpy.utils.apply_flow', 'apply_flow', (['flow', 'img[0, :, 0]', '"""t"""'], {}), "(flow, img[0, :, 0], 't')\n", (13581, 13606), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((13692, 13735), 'src.oflibnumpy.utils.apply_flow', 'apply_flow', (['flow', 'img[..., np.newaxis]', '"""t"""'], {}), "(flow, img[..., np.newaxis], 't')\n", (13702, 13735), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((13828, 13859), 'src.oflibnumpy.utils.apply_flow', 'apply_flow', (['flow', 'img[:10]', '"""t"""'], {}), "(flow, img[:10], 't')\n", (13838, 13859), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((13934, 13968), 'src.oflibnumpy.utils.apply_flow', 'apply_flow', (['flow', 'img', '"""t"""'], {'mask': '(0)'}), "(flow, img, 't', mask=0)\n", (13944, 13968), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((14050, 14094), 'src.oflibnumpy.utils.apply_flow', 'apply_flow', (['flow', 'img', '"""t"""'], {'mask': 'img[..., 0]'}), "(flow, img, 't', mask=img[..., 0])\n", (14060, 14094), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((14171, 14207), 'src.oflibnumpy.utils.apply_flow', 'apply_flow', (['flow', 'img', '"""t"""'], {'mask': 'img'}), "(flow, img, 't', mask=img)\n", (14181, 14207), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((14985, 15042), 'src.oflibnumpy.utils.threshold_vectors', 'threshold_vectors', (['vecs'], {'threshold': '(0.001)', 'use_mag': 'use_mag'}), '(vecs, threshold=0.001, use_mag=use_mag)\n', (15002, 15042), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((15164, 15222), 'src.oflibnumpy.utils.threshold_vectors', 'threshold_vectors', (['vecs'], {'threshold': '(0.0001)', 'use_mag': 'use_mag'}), '(vecs, threshold=0.0001, use_mag=use_mag)\n', (15181, 15222), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((15346, 15403), 'src.oflibnumpy.utils.threshold_vectors', 'threshold_vectors', (['vecs'], {'threshold': '(1e-05)', 'use_mag': 'use_mag'}), '(vecs, threshold=1e-05, use_mag=use_mag)\n', (15363, 15403), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((16062, 16120), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['flow[50, 10]', '[0, 0]'], {}), '(flow[50, 10], [0, 0])\n', (16098, 16120), True, 'import numpy as np\n'), ((16148, 16213), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['flow[50, 299]', '[38.7186583063, 144.5]'], {}), '(flow[50, 299], [38.7186583063, 144.5])\n', (16174, 16213), True, 'import numpy as np\n'), ((16241, 16306), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['flow[199, 10]', '[-74.5, 19.9622148361]'], {}), '(flow[199, 10], [-74.5, 19.9622148361])\n', (16267, 16306), True, 'import numpy as np\n'), ((16334, 16380), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['flow.shape[:2]', 'shape'], {}), '(flow.shape[:2], shape)\n', (16357, 16380), True, 'import numpy as np\n'), ((16500, 16534), 'src.oflibnumpy.utils.from_matrix', 'from_matrix', (['"""test"""', '[10, 10]', '"""t"""'], {}), "('test', [10, 10], 't')\n", (16511, 16534), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((16902, 16960), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['flow[50, 10]', '[0, 0]'], {}), '(flow[50, 10], [0, 0])\n', (16938, 16960), True, 'import numpy as np\n'), ((16988, 17053), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['flow[50, 299]', '[38.7186583063, 144.5]'], {}), '(flow[50, 299], [38.7186583063, 144.5])\n', (17014, 17053), True, 'import numpy as np\n'), ((17081, 17146), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['flow[199, 10]', '[-74.5, 19.9622148361]'], {}), '(flow[199, 10], [-74.5, 19.9622148361])\n', (17107, 17146), True, 'import numpy as np\n'), ((17174, 17220), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['flow.shape[:2]', 'shape'], {}), '(flow.shape[:2], shape)\n', (17197, 17220), True, 'import numpy as np\n'), ((17421, 17479), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['flow[30, 20]', '[0, 0]'], {}), '(flow[30, 20], [0, 0])\n', (17457, 17479), True, 'import numpy as np\n'), ((17507, 17556), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['flow[30, 70]', '[50, 0]'], {}), '(flow[30, 70], [50, 0])\n', (17533, 17556), True, 'import numpy as np\n'), ((17584, 17633), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['flow[80, 20]', '[0, 50]'], {}), '(flow[80, 20], [0, 50])\n', (17610, 17633), True, 'import numpy as np\n'), ((17661, 17707), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['flow.shape[:2]', 'shape'], {}), '(flow.shape[:2], shape)\n', (17684, 17707), True, 'import numpy as np\n'), ((18007, 18065), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['flow[30, 20]', '[0, 0]'], {}), '(flow[30, 20], [0, 0])\n', (18043, 18065), True, 'import numpy as np\n'), ((18093, 18142), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['flow[30, 70]', '[50, 0]'], {}), '(flow[30, 70], [50, 0])\n', (18119, 18142), True, 'import numpy as np\n'), ((18170, 18219), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['flow[80, 20]', '[0, 50]'], {}), '(flow[80, 20], [0, 50])\n', (18196, 18219), True, 'import numpy as np\n'), ((18247, 18293), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['flow.shape[:2]', 'shape'], {}), '(flow.shape[:2], shape)\n', (18270, 18293), True, 'import numpy as np\n'), ((18596, 18654), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['flow[50, 10]', '[0, 0]'], {}), '(flow[50, 10], [0, 0])\n', (18632, 18654), True, 'import numpy as np\n'), ((18682, 18747), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['flow[50, 299]', '[38.7186583063, 144.5]'], {}), '(flow[50, 299], [38.7186583063, 144.5])\n', (18708, 18747), True, 'import numpy as np\n'), ((18775, 18840), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['flow[199, 10]', '[-74.5, 19.9622148361]'], {}), '(flow[199, 10], [-74.5, 19.9622148361])\n', (18801, 18840), True, 'import numpy as np\n'), ((18868, 18914), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['flow.shape[:2]', 'shape'], {}), '(flow.shape[:2], shape)\n', (18891, 18914), True, 'import numpy as np\n'), ((19095, 19134), 'src.oflibnumpy.utils.from_transforms', 'from_transforms', (['transforms', 'shape', '"""t"""'], {}), "(transforms, shape, 't')\n", (19110, 19134), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((19244, 19283), 'src.oflibnumpy.utils.from_transforms', 'from_transforms', (['transforms', 'shape', '"""t"""'], {}), "(transforms, shape, 't')\n", (19259, 19283), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((19434, 19473), 'src.oflibnumpy.utils.from_transforms', 'from_transforms', (['transforms', 'shape', '"""t"""'], {}), "(transforms, shape, 't')\n", (19449, 19473), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((19635, 19674), 'src.oflibnumpy.utils.from_transforms', 'from_transforms', (['transforms', 'shape', '"""t"""'], {}), "(transforms, shape, 't')\n", (19650, 19674), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((19845, 19884), 'src.oflibnumpy.utils.from_transforms', 'from_transforms', (['transforms', 'shape', '"""t"""'], {}), "(transforms, shape, 't')\n", (19860, 19884), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((20034, 20073), 'src.oflibnumpy.utils.from_transforms', 'from_transforms', (['transforms', 'shape', '"""t"""'], {}), "(transforms, shape, 't')\n", (20049, 20073), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((20347, 20400), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['output[..., 0]', 'desired_flow'], {}), '(output[..., 0], desired_flow)\n', (20370, 20400), True, 'import numpy as np\n'), ((20428, 20470), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['output[..., 1]', '(0)'], {}), '(output[..., 1], 0)\n', (20451, 20470), True, 'import numpy as np\n'), ((20498, 20541), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['output[:, 0, 2]', '(1)'], {}), '(output[:, 0, 2], 1)\n', (20521, 20541), True, 'import numpy as np\n'), ((20569, 20613), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['output[:, 10, 2]', '(0)'], {}), '(output[:, 10, 2], 0)\n', (20592, 20613), True, 'import numpy as np\n'), ((20718, 20736), 'src.oflibnumpy.utils.load_kitti', 'load_kitti', (['"""test"""'], {}), "('test')\n", (20728, 20736), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((20813, 20842), 'src.oflibnumpy.utils.load_kitti', 'load_kitti', (['"""kitti_wrong.png"""'], {}), "('kitti_wrong.png')\n", (20823, 20842), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((21114, 21162), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['f[..., 0]', 'desired_flow'], {}), '(f[..., 0], desired_flow)\n', (21137, 21162), True, 'import numpy as np\n'), ((21190, 21227), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['f[..., 1]', '(0)'], {}), '(f[..., 1], 0)\n', (21213, 21227), True, 'import numpy as np\n'), ((21343, 21357), 'src.oflibnumpy.utils.load_sintel', 'load_sintel', (['(0)'], {}), '(0)\n', (21354, 21357), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((21427, 21458), 'src.oflibnumpy.utils.load_sintel', 'load_sintel', (['"""sintel_wrong.flo"""'], {}), "('sintel_wrong.flo')\n", (21438, 21458), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((21612, 21650), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['m[:, 0]', '(True)'], {}), '(m[:, 0], True)\n', (21635, 21650), True, 'import numpy as np\n'), ((21678, 21718), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['m[:, 10]', '(False)'], {}), '(m[:, 10], False)\n', (21701, 21718), True, 'import numpy as np\n'), ((21834, 21853), 'src.oflibnumpy.utils.load_sintel_mask', 'load_sintel_mask', (['(0)'], {}), '(0)\n', (21850, 21853), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((21933, 21961), 'src.oflibnumpy.utils.load_sintel_mask', 'load_sintel_mask', (['"""test.png"""'], {}), "('test.png')\n", (21949, 21961), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((22090, 22150), 'src.oflibnumpy.flow_class.Flow.from_transforms', 'Flow.from_transforms', (["[['rotation', 30, 50, 30]]", 'shape', 'ref'], {}), "([['rotation', 30, 50, 30]], shape, ref)\n", (22110, 22150), False, 'from src.oflibnumpy.flow_class import Flow\n'), ((22280, 22304), 'src.oflibnumpy.utils.resize_flow', 'resize_flow', (['flow', 'scale'], {}), '(flow, scale)\n', (22291, 22304), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((22676, 22691), 'numpy.array', 'np.array', (['scale'], {}), '(scale)\n', (22684, 22691), True, 'import numpy as np\n'), ((22694, 22709), 'numpy.array', 'np.array', (['shape'], {}), '(shape)\n', (22702, 22709), True, 'import numpy as np\n'), ((22736, 22798), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['resized_flow.shape[:2]', 'resized_shape'], {}), '(resized_flow.shape[:2], resized_shape)\n', (22759, 22798), True, 'import numpy as np\n'), ((23038, 23053), 'numpy.array', 'np.array', (['scale'], {}), '(scale)\n', (23046, 23053), True, 'import numpy as np\n'), ((23056, 23071), 'numpy.array', 'np.array', (['shape'], {}), '(shape)\n', (23064, 23071), True, 'import numpy as np\n'), ((23098, 23160), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['resized_flow.shape[:2]', 'resized_shape'], {}), '(resized_flow.shape[:2], resized_shape)\n', (23121, 23160), True, 'import numpy as np\n'), ((23436, 23497), 'src.oflibnumpy.flow_class.Flow.from_transforms', 'Flow.from_transforms', (["[['rotation', 0, 0, 30]]", '(50, 80)', 'ref'], {}), "([['rotation', 0, 0, 30]], (50, 80), ref)\n", (23456, 23497), False, 'from src.oflibnumpy.flow_class import Flow\n'), ((23524, 23587), 'src.oflibnumpy.flow_class.Flow.from_transforms', 'Flow.from_transforms', (["[['rotation', 0, 0, 30]]", '(150, 240)', 'ref'], {}), "([['rotation', 0, 0, 30]], (150, 240), ref)\n", (23544, 23587), False, 'from src.oflibnumpy.flow_class import Flow\n'), ((23673, 23743), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['flow_resized', 'flow_small'], {'atol': '(1)', 'rtol': '(0.1)'}), '(flow_resized, flow_small, atol=1, rtol=0.1)\n', (23699, 23743), True, 'import numpy as np\n'), ((23794, 23857), 'src.oflibnumpy.flow_class.Flow.from_transforms', 'Flow.from_transforms', (["[['rotation', 30, 50, 30]]", '[20, 10]', '"""s"""'], {}), "([['rotation', 30, 50, 30]], [20, 10], 's')\n", (23814, 23857), False, 'from src.oflibnumpy.flow_class import Flow\n'), ((23938, 23963), 'src.oflibnumpy.utils.resize_flow', 'resize_flow', (['flow', '"""test"""'], {}), "(flow, 'test')\n", (23949, 23963), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((24042, 24072), 'src.oflibnumpy.utils.resize_flow', 'resize_flow', (['flow', "['test', 0]"], {}), "(flow, ['test', 0])\n", (24053, 24072), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((24150, 24178), 'src.oflibnumpy.utils.resize_flow', 'resize_flow', (['flow', '[1, 2, 3]'], {}), '(flow, [1, 2, 3])\n', (24161, 24178), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((24249, 24269), 'src.oflibnumpy.utils.resize_flow', 'resize_flow', (['flow', '(0)'], {}), '(flow, 0)\n', (24260, 24269), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((24343, 24366), 'src.oflibnumpy.utils.resize_flow', 'resize_flow', (['flow', '(-0.1)'], {}), '(flow, -0.1)\n', (24354, 24366), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((24516, 24552), 'src.oflibnumpy.utils.is_zero_flow', 'is_zero_flow', (['flow'], {'thresholded': '(True)'}), '(flow, thresholded=True)\n', (24528, 24552), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((24585, 24622), 'src.oflibnumpy.utils.is_zero_flow', 'is_zero_flow', (['flow'], {'thresholded': '(False)'}), '(flow, thresholded=False)\n', (24597, 24622), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((24686, 24722), 'src.oflibnumpy.utils.is_zero_flow', 'is_zero_flow', (['flow'], {'thresholded': '(True)'}), '(flow, thresholded=True)\n', (24698, 24722), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((24755, 24792), 'src.oflibnumpy.utils.is_zero_flow', 'is_zero_flow', (['flow'], {'thresholded': '(False)'}), '(flow, thresholded=False)\n', (24767, 24792), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((24858, 24894), 'src.oflibnumpy.utils.is_zero_flow', 'is_zero_flow', (['flow'], {'thresholded': '(True)'}), '(flow, thresholded=True)\n', (24870, 24894), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((24928, 24965), 'src.oflibnumpy.utils.is_zero_flow', 'is_zero_flow', (['flow'], {'thresholded': '(False)'}), '(flow, thresholded=False)\n', (24940, 24965), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((25024, 25060), 'src.oflibnumpy.utils.is_zero_flow', 'is_zero_flow', (['flow'], {'thresholded': '(True)'}), '(flow, thresholded=True)\n', (25036, 25060), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((25094, 25131), 'src.oflibnumpy.utils.is_zero_flow', 'is_zero_flow', (['flow'], {'thresholded': '(False)'}), '(flow, thresholded=False)\n', (25106, 25131), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((25381, 25444), 'src.oflibnumpy.flow_class.Flow.from_transforms', 'Flow.from_transforms', (["[['rotation', 0, 0, 30]]", '(512, 512)', '"""s"""'], {}), "([['rotation', 0, 0, 30]], (512, 512), 's')\n", (25401, 25444), False, 'from src.oflibnumpy.flow_class import Flow\n'), ((25464, 25527), 'src.oflibnumpy.flow_class.Flow.from_transforms', 'Flow.from_transforms', (["[['rotation', 0, 0, 30]]", '(512, 512)', '"""t"""'], {}), "([['rotation', 0, 0, 30]], (512, 512), 't')\n", (25484, 25527), False, 'from src.oflibnumpy.flow_class import Flow\n'), ((25837, 25912), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['pts_tracked_s', 'desired_pts'], {'atol': '(0.1)', 'rtol': '(0.01)'}), '(pts_tracked_s, desired_pts, atol=0.1, rtol=0.01)\n', (25863, 25912), True, 'import numpy as np\n'), ((26145, 26199), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['pts_tracked_s', 'desired_pts'], {}), '(pts_tracked_s, desired_pts)\n', (26171, 26199), True, 'import numpy as np\n'), ((26276, 26354), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['pts_tracked_t', 'desired_pts'], {'atol': '(1e-06)', 'rtol': '(1e-06)'}), '(pts_tracked_t, desired_pts, atol=1e-06, rtol=1e-06)\n', (26302, 26354), True, 'import numpy as np\n'), ((26695, 26759), 'src.oflibnumpy.flow_class.Flow.from_transforms', 'Flow.from_transforms', (["[['translation', 10, 20]]", '(512, 512)', '"""s"""'], {}), "([['translation', 10, 20]], (512, 512), 's')\n", (26715, 26759), False, 'from src.oflibnumpy.flow_class import Flow\n'), ((26924, 26975), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['pts_tracked_s', 'desired_pts'], {}), '(pts_tracked_s, desired_pts)\n', (26947, 26975), True, 'import numpy as np\n'), ((27178, 27210), 'src.oflibnumpy.utils.track_pts', 'track_pts', (['flow', '"""s"""'], {'pts': '"""test"""'}), "(flow, 's', pts='test')\n", (27187, 27210), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((27538, 27579), 'src.oflibnumpy.utils.track_pts', 'track_pts', (['flow', '"""s"""', 'pts'], {'int_out': '"""test"""'}), "(flow, 's', pts, int_out='test')\n", (27547, 27579), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((27662, 27714), 'src.oflibnumpy.utils.track_pts', 'track_pts', (['flow', '"""s"""', 'pts', '(True)'], {'s_exact_mode': '"""test"""'}), "(flow, 's', pts, True, s_exact_mode='test')\n", (27671, 27714), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((2344, 2366), 'src.oflibnumpy.utils.validate_flow_array', 'validate_flow_array', (['f'], {}), '(f)\n', (2363, 2366), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((2588, 2606), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (2596, 2606), True, 'import numpy as np\n'), ((2712, 2733), 'numpy.zeros', 'np.zeros', (['(10, 10, 3)'], {}), '((10, 10, 3))\n', (2720, 2733), True, 'import numpy as np\n'), ((4014, 4054), 'src.oflibnumpy.utils.matrix_from_transform', 'matrix_from_transform', (['transform', 'values'], {}), '(transform, values)\n', (4035, 4054), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((4384, 4424), 'src.oflibnumpy.utils.matrix_from_transform', 'matrix_from_transform', (['transform', 'values'], {}), '(transform, values)\n', (4405, 4424), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((4746, 4786), 'src.oflibnumpy.utils.matrix_from_transform', 'matrix_from_transform', (['transform', 'values'], {}), '(transform, values)\n', (4767, 4786), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((5245, 5285), 'src.oflibnumpy.utils.matrix_from_transform', 'matrix_from_transform', (['transform', 'values'], {}), '(transform, values)\n', (5266, 5285), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((5744, 5784), 'src.oflibnumpy.utils.matrix_from_transform', 'matrix_from_transform', (['transform', 'values'], {}), '(transform, values)\n', (5765, 5784), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((6078, 6118), 'src.oflibnumpy.utils.matrix_from_transform', 'matrix_from_transform', (['transform', 'values'], {}), '(transform, values)\n', (6099, 6118), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((6376, 6416), 'src.oflibnumpy.utils.matrix_from_transform', 'matrix_from_transform', (['transform', 'values'], {}), '(transform, values)\n', (6397, 6416), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((6823, 6863), 'src.oflibnumpy.utils.matrix_from_transform', 'matrix_from_transform', (['transform', 'values'], {}), '(transform, values)\n', (6844, 6863), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((7226, 7266), 'src.oflibnumpy.utils.matrix_from_transform', 'matrix_from_transform', (['transform', 'values'], {}), '(transform, values)\n', (7247, 7266), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((12003, 12032), 'numpy.array', 'np.array', (['[[-1, 0], [10, 10]]'], {}), '([[-1, 0], [10, 10]])\n', (12011, 12032), True, 'import numpy as np\n'), ((12119, 12151), 'numpy.array', 'np.array', (['[[0, 0], [511.01, 10]]'], {}), '([[0, 0], [511.01, 10]])\n', (12127, 12151), True, 'import numpy as np\n'), ((12316, 12391), 'src.oflibnumpy.flow_class.Flow.from_transforms', 'Flow.from_transforms', (["[['rotation', 255.5, 255.5, -30]]", 'img.shape[:2]', 'ref'], {}), "([['rotation', 255.5, 255.5, -30]], img.shape[:2], ref)\n", (12336, 12391), False, 'from src.oflibnumpy.flow_class import Flow\n'), ((12537, 12649), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['control_img[200:300, 200:300]', 'warped_img[200:300, 200:300]'], {'atol': '(20)', 'rtol': '(0.05)'}), '(control_img[200:300, 200:300], warped_img[200:\n 300, 200:300], atol=20, rtol=0.05)\n', (12563, 12649), True, 'import numpy as np\n'), ((12939, 13006), 'src.oflibnumpy.flow_class.Flow.from_transforms', 'Flow.from_transforms', (["[['translation', 10, 20]]", 'img.shape[:2]', 'ref'], {}), "([['translation', 10, 20]], img.shape[:2], ref)\n", (12959, 13006), False, 'from src.oflibnumpy.flow_class import Flow\n'), ((13144, 13192), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['warped_img', 'control_img'], {}), '(warped_img, control_img)\n', (13167, 13192), True, 'import numpy as np\n'), ((14643, 14673), 'src.oflibnumpy.utils.points_inside_area', 'points_inside_area', (['pts', 'shape'], {}), '(pts, shape)\n', (14661, 14673), False, 'from src.oflibnumpy.utils import get_valid_ref, get_valid_padding, validate_shape, validate_flow_array, matrix_from_transforms, matrix_from_transform, flow_from_matrix, bilinear_interpolation, apply_flow, points_inside_area, threshold_vectors, from_matrix, from_transforms, load_kitti, load_sintel, load_sintel_mask, resize_flow, is_zero_flow, track_pts\n'), ((15072, 15137), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['thresholded[:4, 0, 0]', '[0, 0, -0.001, 1]'], {}), '(thresholded[:4, 0, 0], [0, 0, -0.001, 1])\n', (15095, 15137), True, 'import numpy as np\n'), ((15251, 15321), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['thresholded[:4, 0, 0]', '[0, 0.0001, -0.001, 1]'], {}), '(thresholded[:4, 0, 0], [0, 0.0001, -0.001, 1])\n', (15274, 15321), True, 'import numpy as np\n'), ((15433, 15508), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['thresholded[:4, 0, 0]', '[-1e-05, 0.0001, -0.001, 1]'], {}), '(thresholded[:4, 0, 0], [-1e-05, 0.0001, -0.001, 1])\n', (15456, 15508), True, 'import numpy as np\n'), ((16627, 16636), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (16633, 16636), True, 'import numpy as np\n'), ((20255, 20271), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (20264, 20271), True, 'import numpy as np\n'), ((20289, 20305), 'numpy.arange', 'np.arange', (['(0)', '(20)'], {}), '(0, 20)\n', (20298, 20305), True, 'import numpy as np\n'), ((21022, 21038), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (21031, 21038), True, 'import numpy as np\n'), ((21056, 21072), 'numpy.arange', 'np.arange', (['(0)', '(20)'], {}), '(0, 20)\n', (21065, 21072), True, 'import numpy as np\n'), ((22341, 22356), 'numpy.array', 'np.array', (['shape'], {}), '(shape)\n', (22349, 22356), True, 'import numpy as np\n'), ((22387, 22449), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['resized_flow.shape[:2]', 'resized_shape'], {}), '(resized_flow.shape[:2], resized_shape)\n', (22410, 22449), True, 'import numpy as np\n'), ((22481, 22557), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['resized_flow[0, 0]', '(flow[0, 0] * scale)'], {'rtol': '(0.1)'}), '(resized_flow[0, 0], flow[0, 0] * scale, rtol=0.1)\n', (22507, 22557), True, 'import numpy as np\n'), ((25275, 25296), 'numpy.zeros', 'np.zeros', (['(10, 10, 2)'], {}), '((10, 10, 2))\n', (25283, 25296), True, 'import numpy as np\n'), ((26535, 26556), 'numpy.round', 'np.round', (['desired_pts'], {}), '(desired_pts)\n', (26543, 26556), True, 'import numpy as np\n'), ((4267, 4279), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (4276, 4279), False, 'import math\n'), ((4296, 4308), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (4305, 4308), False, 'import math\n'), ((4605, 4617), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (4614, 4617), False, 'import math\n'), ((4624, 4636), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (4633, 4636), False, 'import math\n'), ((4644, 4656), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (4653, 4656), False, 'import math\n'), ((4662, 4674), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (4671, 4674), False, 'import math\n'), ((5038, 5050), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (5047, 5050), False, 'import math\n'), ((5067, 5079), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (5076, 5079), False, 'import math\n'), ((5512, 5524), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (5521, 5524), False, 'import math\n'), ((5530, 5542), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (5539, 5542), False, 'import math\n'), ((5551, 5563), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (5560, 5563), False, 'import math\n'), ((5569, 5581), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (5578, 5581), False, 'import math\n'), ((27311, 27332), 'numpy.zeros', 'np.zeros', (['(10, 10, 2)'], {}), '((10, 10, 2))\n', (27319, 27332), True, 'import numpy as np\n'), ((8360, 8372), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (8369, 8372), False, 'import math\n'), ((8392, 8404), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (8401, 8404), False, 'import math\n'), ((9002, 9014), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (9011, 9014), False, 'import math\n'), ((9073, 9085), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (9082, 9085), False, 'import math\n'), ((15818, 15830), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (15827, 15830), False, 'import math\n'), ((15889, 15901), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (15898, 15901), False, 'import math\n'), ((22886, 22901), 'numpy.array', 'np.array', (['scale'], {}), '(scale)\n', (22894, 22901), True, 'import numpy as np\n'), ((23248, 23263), 'numpy.array', 'np.array', (['scale'], {}), '(scale)\n', (23256, 23263), True, 'import numpy as np\n')] |
from nolearn.lasagne import BatchIterator
import numpy as np
from skimage.io import imread
from joblib import Parallel, delayed
import os
def load_im_f(name):
im = imread(name).astype(np.float32) / 256.
im = np.transpose(im, [2, 0, 1])
return im
class LazyBatchIterator(BatchIterator):
"""docstring for LazyBatchIterator"""
def transform(self, Xb, yb):
X_n, yb = super(LazyBatchIterator, self).transform(Xb, yb)
Xb = Parallel(n_jobs=-1)(delayed(load_im_f)(name)
for name in X_n)
Xb = np.asarray(Xb)
return Xb, yb
| [
"numpy.asarray",
"joblib.Parallel",
"skimage.io.imread",
"joblib.delayed",
"numpy.transpose"
] | [((218, 245), 'numpy.transpose', 'np.transpose', (['im', '[2, 0, 1]'], {}), '(im, [2, 0, 1])\n', (230, 245), True, 'import numpy as np\n'), ((566, 580), 'numpy.asarray', 'np.asarray', (['Xb'], {}), '(Xb)\n', (576, 580), True, 'import numpy as np\n'), ((458, 477), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (466, 477), False, 'from joblib import Parallel, delayed\n'), ((170, 182), 'skimage.io.imread', 'imread', (['name'], {}), '(name)\n', (176, 182), False, 'from skimage.io import imread\n'), ((478, 496), 'joblib.delayed', 'delayed', (['load_im_f'], {}), '(load_im_f)\n', (485, 496), False, 'from joblib import Parallel, delayed\n')] |
# File path of cleaned_loan_data.csv is stored in path
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv(path)
y = data['paid.back.loan']
X = data.iloc[:, 1:-1]
print(X.shape)
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3, random_state=0)
import matplotlib.pyplot as plt
fully_paid = y_train.value_counts()
plt.bar(fully_paid.index, fully_paid)
import numpy as np
from sklearn.preprocessing import LabelEncoder
X_train['int.rate'] = X_train['int.rate'].str.replace('%', ' ')
X_train['int.rate'] = (X_train['int.rate'].astype(float)) / 100
X_test['int.rate'] = X_test['int.rate'].str.replace('%', ' ')
X_test['int.rate'] = (X_test['int.rate'].astype(float)) / 100
num_df = X_train.select_dtypes(include=['number'])
cat_df = X_test.select_dtypes(include=['object'])
print(cat_df)
import seaborn as sns
cols = num_df.columns
fig ,axes = plt.subplots(nrows=9, ncols=1, figsize=(15,10))
for i in range(0,9) :
sns.boxplot(x=y_train, y=num_df[cols[i]], ax=axes[i])
cols = cat_df.columns
fig ,axes = plt.subplots(nrows=2, ncols=2)
for i in range(0,2) :
for j in range(0,2) :
sns.countplot(x=X_train[cols[i*2+j]], hue=y_train, ax=axes[i,j])
from sklearn.tree import DecisionTreeClassifier
for i in cat_df.columns :
X_train[i].fillna('NA', inplace=True)
le = LabelEncoder()
X_train[i] = le.fit_transform(X_train[i])
X_test[i].fillna('NA', inplace=True)
X_test[i] = le.transform(X_test[i])
y_train.replace({'No': 0, 'Yes': 1}, inplace=True)
y_test.replace({'No': 0, 'Yes': 1}, inplace=True)
model = DecisionTreeClassifier(random_state=0)
model.fit(X_train, y_train)
acc = model.score(X_test, y_test)
from sklearn.model_selection import GridSearchCV
parameter_grid = {'max_depth': np.arange(3,10), 'min_samples_leaf': range(10,50,10)}
model_2 = DecisionTreeClassifier(random_state=0)
p_tree = GridSearchCV(estimator=model_2, param_grid=parameter_grid, cv=5)
p_tree.fit(X_train, y_train)
acc_2 = p_tree.score(X_test, y_test)
from io import StringIO
from sklearn.tree import export_graphviz
from sklearn import tree
from sklearn import metrics
from IPython.display import Image
import pydotplus
dot_data = export_graphviz(decision_tree=p_tree.best_estimator_, out_file=None, feature_names=X.columns, filled = True, class_names=['loan_paid_back_yes','loan_paid_back_no'])
graph_big = pydotplus.graph_from_dot_data(dot_data)
img_path = user_data_dir+'/file.png'
graph_big.write_png(img_path)
plt.figure(figsize=(20,15))
plt.imshow(plt.imread(img_path))
plt.axis('off')
plt.show()
| [
"sklearn.model_selection.GridSearchCV",
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.tree.DecisionTreeClassifier",
"matplotlib.pyplot.imread",
"seaborn.boxplot",
"sklearn.tree.export_graphviz",
"matplotlib.pyplot.bar",
"matplotlib.py... | [((136, 153), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (147, 153), True, 'import pandas as pd\n'), ((251, 304), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': '(0)'}), '(X, y, test_size=0.3, random_state=0)\n', (267, 304), False, 'from sklearn.model_selection import train_test_split\n'), ((372, 409), 'matplotlib.pyplot.bar', 'plt.bar', (['fully_paid.index', 'fully_paid'], {}), '(fully_paid.index, fully_paid)\n', (379, 409), True, 'import matplotlib.pyplot as plt\n'), ((901, 949), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(9)', 'ncols': '(1)', 'figsize': '(15, 10)'}), '(nrows=9, ncols=1, figsize=(15, 10))\n', (913, 949), True, 'import matplotlib.pyplot as plt\n'), ((1064, 1094), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)'}), '(nrows=2, ncols=2)\n', (1076, 1094), True, 'import matplotlib.pyplot as plt\n'), ((1593, 1631), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (1615, 1631), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((1839, 1877), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (1861, 1877), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((1887, 1951), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'model_2', 'param_grid': 'parameter_grid', 'cv': '(5)'}), '(estimator=model_2, param_grid=parameter_grid, cv=5)\n', (1899, 1951), False, 'from sklearn.model_selection import GridSearchCV\n'), ((2199, 2370), 'sklearn.tree.export_graphviz', 'export_graphviz', ([], {'decision_tree': 'p_tree.best_estimator_', 'out_file': 'None', 'feature_names': 'X.columns', 'filled': '(True)', 'class_names': "['loan_paid_back_yes', 'loan_paid_back_no']"}), "(decision_tree=p_tree.best_estimator_, out_file=None,\n feature_names=X.columns, filled=True, class_names=['loan_paid_back_yes',\n 'loan_paid_back_no'])\n", (2214, 2370), False, 'from sklearn.tree import export_graphviz\n'), ((2376, 2415), 'pydotplus.graph_from_dot_data', 'pydotplus.graph_from_dot_data', (['dot_data'], {}), '(dot_data)\n', (2405, 2415), False, 'import pydotplus\n'), ((2483, 2511), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 15)'}), '(figsize=(20, 15))\n', (2493, 2511), True, 'import matplotlib.pyplot as plt\n'), ((2544, 2559), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2552, 2559), True, 'import matplotlib.pyplot as plt\n'), ((2560, 2570), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2568, 2570), True, 'import matplotlib.pyplot as plt\n'), ((975, 1028), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': 'y_train', 'y': 'num_df[cols[i]]', 'ax': 'axes[i]'}), '(x=y_train, y=num_df[cols[i]], ax=axes[i])\n', (986, 1028), True, 'import seaborn as sns\n'), ((1342, 1356), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1354, 1356), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((1775, 1791), 'numpy.arange', 'np.arange', (['(3)', '(10)'], {}), '(3, 10)\n', (1784, 1791), True, 'import numpy as np\n'), ((2522, 2542), 'matplotlib.pyplot.imread', 'plt.imread', (['img_path'], {}), '(img_path)\n', (2532, 2542), True, 'import matplotlib.pyplot as plt\n'), ((1151, 1220), 'seaborn.countplot', 'sns.countplot', ([], {'x': 'X_train[cols[i * 2 + j]]', 'hue': 'y_train', 'ax': 'axes[i, j]'}), '(x=X_train[cols[i * 2 + j]], hue=y_train, ax=axes[i, j])\n', (1164, 1220), True, 'import seaborn as sns\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.