code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
A shared experiment class for inferring 2D objects with multiple cortical
columns
"""
import abc
import collections
import random
import numpy as np
from htmresearch.algorithms.apical_tiebreak_temporal_memory import (
ApicalTiebreakPairMemory)
from htmresearch.algorithms.location_modules import (
BodyToSpecificObjectModule2D, SensorToBodyModule2D,
SensorToSpecificObjectModule)
from htmresearch.algorithms.column_pooler import ColumnPooler
from htmresearch.frameworks.layers.sensor_placement import greedySensorPositions
class CorticalColumn(object):
"""
Data structure that holds each layer in the cortical column and stores
classification info for each column.
"""
def __init__(self, inputLayer, objectLayer, sensorToBodyModules,
sensorToSpecificObjectModules):
self.inputLayer = inputLayer
self.objectLayer = objectLayer
self.sensorToBodyModules = sensorToBodyModules
self.sensorToSpecificObjectModules = sensorToSpecificObjectModules
# Use these for classifying SDRs and for testing whether they're correct.
self.sensorLocationRepresentations = {
# Example:
# (objectName, (top, left)): [0, 26, 54, 77, 101, ...]
}
self.inputRepresentations = {
# Example:
# (objectName, (top, left), featureName): [0, 26, 54, 77, 101, ...]
}
self.objectRepresentations = {
# Example:
# objectName: [14, 19, 54, 107, 201, ...]
}
def getSensorToSpecificObjectSDR(self):
activeCells = np.array([], dtype="uint32")
totalPrevCells = 0
for module in self.sensorToSpecificObjectModules:
activeCells = np.append(activeCells,
module.activeCells + totalPrevCells)
totalPrevCells += module.cellCount
return activeCells
def getAllCellActivity(self):
return (set(self.objectLayer.getActiveCells()),
set(self.inputLayer.getActiveCells()),
tuple(set(module.activeCells)
for module in self.sensorToSpecificObjectModules))
class MultiColumn2DExperiment(object):
"""
The experiment code organized into a class.
"""
def __init__(self, objects, objectPlacements, featureNames, locationConfigs,
numCorticalColumns, worldDimensions, featureW=15,
cellsPerColumn=32):
self.objects = objects
self.objectPlacements = objectPlacements
self.numCorticalColumns = numCorticalColumns
self.worldDimensions = worldDimensions
self.locationConfigs = locationConfigs
self.features = dict(
((iCol, k), np.array(sorted(random.sample(xrange(150), featureW)), dtype="uint32"))
for k in featureNames
for iCol in xrange(numCorticalColumns))
self.corticalColumns = []
for _ in xrange(numCorticalColumns):
inputLayer = ApicalTiebreakPairMemory(**{
"columnCount": 150,
"cellsPerColumn": cellsPerColumn,
"initialPermanence": 1.0,
"basalInputSize": sum(
np.prod(config["cellDimensions"])
for config in locationConfigs),
"apicalInputSize": 4096,
"seed": random.randint(0,2048)})
objectLayer = ColumnPooler(**{
"inputWidth": 150 * cellsPerColumn,
"initialProximalPermanence": 1.0,
"initialProximalPermanence": 1.0,
"lateralInputWidths": [4096] * (numCorticalColumns - 1),
"seed": random.randint(0,2048)})
sensorToBodyModules = [SensorToBodyModule2D(**config)
for config in locationConfigs]
sensorToSpecificObjectModules = [
SensorToSpecificObjectModule(**{
"cellDimensions": config["cellDimensions"],
"anchorInputSize": inputLayer.numberOfCells(),
"initialPermanence": 1.0,
"seed": random.randint(0,2048)})
for config in locationConfigs]
self.corticalColumns.append(
CorticalColumn(inputLayer, objectLayer, sensorToBodyModules,
sensorToSpecificObjectModules))
self.bodyToSpecificObjectModules = []
for iModule, config in enumerate(locationConfigs):
module = BodyToSpecificObjectModule2D(config["cellDimensions"])
pairedSensorModules = [c.sensorToSpecificObjectModules[iModule]
for c in self.corticalColumns]
module.formReciprocalSynapses(pairedSensorModules)
self.bodyToSpecificObjectModules.append(module)
self.maxSettlingTime = 10
self.monitors = {}
self.nextMonitorToken = 1
def addMonitor(self, monitor):
"""
Subscribe to MultiColumn2DExperimentMonitor events.
@param monitor (MultiColumn2DExperimentMonitor)
An object that implements a set of monitor methods
@return (object)
An opaque object that can be used to refer to this monitor.
"""
token = self.nextMonitorToken
self.nextMonitorToken += 1
self.monitors[token] = monitor
return token
def removeMonitor(self, monitorToken):
"""
Unsubscribe from LocationExperiment events.
@param monitorToken (object)
The return value of addMonitor() from when this monitor was added
"""
del self.monitors[monitorToken]
def compute(self, egocentricLocationByColumn, featureSDRByColumn, learn):
# Layer 5B
paramsByModuleByColumn = [
[{"egocentricLocation": egocentricLocation}
for _ in c.sensorToBodyModules]
for c, egocentricLocation in zip(self.corticalColumns,
egocentricLocationByColumn)]
for c, paramsByModule in zip(self.corticalColumns, paramsByModuleByColumn):
for module, params in zip(c.sensorToBodyModules, paramsByModule):
module.compute(**params)
for monitor in self.monitors.itervalues():
monitor.afterSensorToBodyCompute(paramsByModuleByColumn)
# Layer 6A
paramsByModuleByColumn = [
[{"sensorToBody": sensorToBody.activeCells,
"bodyToSpecificObject": bodyToSpecificObject.activeCells}
for sensorToBody, _, bodyToSpecificObject
in zip(c.sensorToBodyModules,
c.sensorToSpecificObjectModules,
self.bodyToSpecificObjectModules)]
for c in self.corticalColumns]
for c, paramsByModule in zip(self.corticalColumns, paramsByModuleByColumn):
for module, params in zip(c.sensorToSpecificObjectModules,
paramsByModule):
module.metricCompute(**params)
for monitor in self.monitors.itervalues():
monitor.afterSensorMetricCompute(paramsByModuleByColumn)
# Layer 4
paramsByColumn = [
{"activeColumns": featureSDR,
"basalInput": c.getSensorToSpecificObjectSDR(),
"apicalInput": c.objectLayer.getActiveCells(),
"learn": learn}
for c, featureSDR in zip(self.corticalColumns, featureSDRByColumn)
]
for c, params in zip(self.corticalColumns, paramsByColumn):
c.inputLayer.compute(**params)
for monitor in self.monitors.itervalues():
monitor.afterInputCompute(paramsByColumn)
# Layer 2
paramsByColumn = [
{"feedforwardInput": c.inputLayer.getActiveCells(),
"feedforwardGrowthCandidates": c.inputLayer.getWinnerCells(),
"lateralInputs": [c2.objectLayer.getActiveCells()
for i2, c2 in enumerate(self.corticalColumns)
if i2 != i],
"learn": learn}
for i, c in enumerate(self.corticalColumns)]
for c, params in zip(self.corticalColumns, paramsByColumn):
c.objectLayer.compute(**params)
for monitor in self.monitors.itervalues():
monitor.afterObjectCompute(paramsByColumn)
# Layer 6A
paramsByColumn = [
{"anchorInput": c.inputLayer.getActiveCells(),
"learn": learn}
for c in self.corticalColumns]
for c, params in zip(self.corticalColumns, paramsByColumn):
for module in c.sensorToSpecificObjectModules:
module.anchorCompute(**params)
for monitor in self.monitors.itervalues():
monitor.afterSensorLocationAnchor(paramsByColumn)
# Subcortical
paramsByModule = [
{"sensorToBodyByColumn": [
c.sensorToBodyModules[iModule].activeCells
for c in self.corticalColumns],
"sensorToSpecificObjectByColumn": [
c.sensorToSpecificObjectModules[iModule].activeCells
for c in self.corticalColumns]}
for iModule, module in enumerate(self.bodyToSpecificObjectModules)]
for module, params in zip(self.bodyToSpecificObjectModules,
paramsByModule):
module.compute(**params)
for monitor in self.monitors.itervalues():
monitor.afterBodyLocationAnchor(paramsByModule)
def learnObjects(self, bodyPlacement):
"""
Learn each provided object.
This method simultaneously learns 4 sets of synapses:
- sensor-to-specific-object -> input
- input -> sensor-to-specific-object
- input -> object
- object -> input
"""
for monitor in self.monitors.itervalues():
monitor.afterBodyWorldLocationChanged(bodyPlacement)
for objectName, objectFeatures in self.objects.iteritems():
self.reset()
objectPlacement = self.objectPlacements[objectName]
for module in self.bodyToSpecificObjectModules:
module.activateRandomLocation()
for iFeatureStart in xrange(len(objectFeatures)):
featureIndexByColumn = np.mod(np.arange(iFeatureStart,
iFeatureStart +
self.numCorticalColumns),
len(objectFeatures))
featureByColumn = [objectFeatures[iFeature]
for iFeature in featureIndexByColumn]
featureSDRByColumn = [self.features[(iCol, feature["name"])]
for iCol, feature in enumerate(featureByColumn)]
locationOnObjectByColumn = np.array(
[[feature["top"] + feature["height"]/2,
feature["left"] + feature["width"]/2]
for feature in featureByColumn])
worldLocationByColumn = objectPlacement + locationOnObjectByColumn
egocentricLocationByColumn = worldLocationByColumn - bodyPlacement
for monitor in self.monitors.itervalues():
monitor.afterSensorWorldLocationChanged(worldLocationByColumn)
for t in xrange(2):
for monitor in self.monitors.itervalues():
monitor.beforeCompute(egocentricLocationByColumn, featureSDRByColumn,
isRepeat=(t > 0))
self.compute(egocentricLocationByColumn, featureSDRByColumn,
learn=True)
for iFeature, locationOnObject, c in zip(featureIndexByColumn,
locationOnObjectByColumn,
self.corticalColumns):
locationOnObject = tuple(locationOnObject.tolist())
c.sensorLocationRepresentations[(objectName, locationOnObject)] = (
c.getSensorToSpecificObjectSDR())
c.inputRepresentations[(objectName, locationOnObject,
objectFeatures[iFeature]["name"])] = (
c.inputLayer.getActiveCells())
c.objectRepresentations[objectName] = c.objectLayer.getActiveCells()
def isObjectClassified(self, objectName, minOverlap=30, maxActive=60):
for c in self.corticalColumns:
overlap = len(np.intersect1d(
c.objectRepresentations[objectName],
c.objectLayer.getActiveCells()
))
totalActive = len(c.objectLayer.getActiveCells())
if overlap < minOverlap or totalActive > maxActive:
return False
return True
def inferObjects(self, bodyPlacement, maxTouches=2):
"""
Touch each object with multiple sensors twice.
:returns: dict mapping the number of touches required to the number of
objects that took that many touches to be uniquely inferred. The 'None'
key is reserved for objects not recognized after `maxTouches` touches
"""
for monitor in self.monitors.itervalues():
monitor.afterBodyWorldLocationChanged(bodyPlacement)
numTouchesRequired = collections.defaultdict(int)
for objectName, objectFeatures in self.objects.iteritems():
self.reset()
objectPlacement = self.objectPlacements[objectName]
featureIndexByColumnIterator = (
greedySensorPositions(self.numCorticalColumns, len(objectFeatures)))
for touch in xrange(maxTouches):
# Choose where to place each sensor.
featureIndexByColumn = featureIndexByColumnIterator.next()
sensedFeatures = [objectFeatures[i] for i in featureIndexByColumn]
featureSDRByColumn = [self.features[(iCol, feature["name"])]
for iCol, feature in enumerate(sensedFeatures)]
worldLocationByColumn = np.array([
[objectPlacement[0] + feature["top"] + feature["height"]/2,
objectPlacement[1] + feature["left"] + feature["width"]/2]
for feature in sensedFeatures])
for monitor in self.monitors.itervalues():
monitor.afterSensorWorldLocationChanged(worldLocationByColumn)
egocentricLocationByColumn = worldLocationByColumn - bodyPlacement
prevCellActivity = None
for t in xrange(self.maxSettlingTime):
for monitor in self.monitors.itervalues():
monitor.beforeCompute(egocentricLocationByColumn, featureSDRByColumn,
isRepeat=(t > 0))
self.compute(egocentricLocationByColumn, featureSDRByColumn, learn=False)
cellActivity = (
tuple(c.getAllCellActivity()
for c in self.corticalColumns),
tuple(set(module.activeCells)
for module in self.bodyToSpecificObjectModules))
if cellActivity == prevCellActivity:
# It settled. Cancel logging this timestep.
for monitor in self.monitors.itervalues():
monitor.clearUnflushedData()
break
else:
prevCellActivity = cellActivity
for monitor in self.monitors.itervalues():
monitor.flush()
# Check if the object is narrowed down
if self.isObjectClassified(objectName):
numTouchesRequired[touch + 1] += 1
break
else:
numTouchesRequired[None] += 1
return numTouchesRequired
def reset(self):
for c in self.corticalColumns:
for module in c.sensorToSpecificObjectModules:
module.reset()
c.inputLayer.reset()
c.objectLayer.reset()
for module in self.bodyToSpecificObjectModules:
module.reset()
for monitor in self.monitors.itervalues():
monitor.afterReset()
class MultiColumn2DExperimentMonitor(object):
"""
Abstract base class for a MultiColumn2DExperiment monitor.
"""
__metaclass__ = abc.ABCMeta
def beforeCompute(self, egocentricLocationByColumn, featureSDRByColumn,
isRepeat): pass
def afterReset(self): pass
def afterBodyWorldLocationChanged(self, bodyWorldLocation): pass
def afterSensorWorldLocationChanged(self, worldLocationByColumn): pass
def afterSensorToBodyCompute(self, paramsByColumn): pass
def afterSensorMetricCompute(self, paramsByColumn): pass
def afterSensorLocationAnchor(self, paramsByColumn): pass
def afterBodyLocationAnchor(self, params): pass
def afterInputCompute(self, paramsByColumn): pass
def afterObjectCompute(self, paramsByColumn): pass
def flush(self): pass
def clearUnflushedData(self): pass
| [
"numpy.prod",
"numpy.arange",
"htmresearch.algorithms.location_modules.SensorToBodyModule2D",
"numpy.append",
"numpy.array",
"collections.defaultdict",
"random.randint",
"htmresearch.algorithms.location_modules.BodyToSpecificObjectModule2D"
] | [((2479, 2507), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""uint32"""'}), "([], dtype='uint32')\n", (2487, 2507), True, 'import numpy as np\n'), ((13192, 13220), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (13215, 13220), False, 'import collections\n'), ((2606, 2665), 'numpy.append', 'np.append', (['activeCells', '(module.activeCells + totalPrevCells)'], {}), '(activeCells, module.activeCells + totalPrevCells)\n', (2615, 2665), True, 'import numpy as np\n'), ((5079, 5133), 'htmresearch.algorithms.location_modules.BodyToSpecificObjectModule2D', 'BodyToSpecificObjectModule2D', (["config['cellDimensions']"], {}), "(config['cellDimensions'])\n", (5107, 5133), False, 'from htmresearch.algorithms.location_modules import BodyToSpecificObjectModule2D, SensorToBodyModule2D, SensorToSpecificObjectModule\n'), ((4404, 4434), 'htmresearch.algorithms.location_modules.SensorToBodyModule2D', 'SensorToBodyModule2D', ([], {}), '(**config)\n', (4424, 4434), False, 'from htmresearch.algorithms.location_modules import BodyToSpecificObjectModule2D, SensorToBodyModule2D, SensorToSpecificObjectModule\n'), ((10871, 10999), 'numpy.array', 'np.array', (["[[feature['top'] + feature['height'] / 2, feature['left'] + feature['width'\n ] / 2] for feature in featureByColumn]"], {}), "([[feature['top'] + feature['height'] / 2, feature['left'] + \n feature['width'] / 2] for feature in featureByColumn])\n", (10879, 10999), True, 'import numpy as np\n'), ((13886, 14059), 'numpy.array', 'np.array', (["[[objectPlacement[0] + feature['top'] + feature['height'] / 2, \n objectPlacement[1] + feature['left'] + feature['width'] / 2] for\n feature in sensedFeatures]"], {}), "([[objectPlacement[0] + feature['top'] + feature['height'] / 2, \n objectPlacement[1] + feature['left'] + feature['width'] / 2] for\n feature in sensedFeatures])\n", (13894, 14059), True, 'import numpy as np\n'), ((10348, 10413), 'numpy.arange', 'np.arange', (['iFeatureStart', '(iFeatureStart + self.numCorticalColumns)'], {}), '(iFeatureStart, iFeatureStart + self.numCorticalColumns)\n', (10357, 10413), True, 'import numpy as np\n'), ((4077, 4100), 'random.randint', 'random.randint', (['(0)', '(2048)'], {}), '(0, 2048)\n', (4091, 4100), False, 'import random\n'), ((4349, 4372), 'random.randint', 'random.randint', (['(0)', '(2048)'], {}), '(0, 2048)\n', (4363, 4372), False, 'import random\n'), ((4742, 4765), 'random.randint', 'random.randint', (['(0)', '(2048)'], {}), '(0, 2048)\n', (4756, 4765), False, 'import random\n'), ((3952, 3985), 'numpy.prod', 'np.prod', (["config['cellDimensions']"], {}), "(config['cellDimensions'])\n", (3959, 3985), True, 'import numpy as np\n')] |
import os
import tempfile
import subprocess
import logging
import uuid
import time
import socket
import numpy as np
import cclib
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import PeriodicTable
from rdkit.Chem.rdMolTransforms import GetBondLength
logging.getLogger("cclib").setLevel(30)
class GaussianRunner(object):
def __init__(self, smiles, cid, type_, max_conformers=1000,
min_conformers=100, nprocs=18, mem='40GB',
scratchdir='/tmp/scratch',
projectdir='/projects/rlmolecule/svss/home/Projects-python/crest-bde/',
crest='~/bin/crest/crest',
crest_timeout=86400,
gaussian_timeout=86400):
""" Class to handle the overall temporary directory management for
running Gaussian on Eagle """
self.smiles = smiles
self.cid = cid
self.type_ = type_
self.max_conformers = max_conformers
self.min_conformers = min_conformers
self.nprocs = nprocs
self.mem = mem
self.scratchdir = scratchdir
self.projectdir = projectdir
self.crest = crest
self.crest_timeout = crest_timeout
self.gaussian_timeout = gaussian_timeout
def process(self):
with tempfile.TemporaryDirectory(dir=self.scratchdir) as tmpdirname:
print("starting SMILES {0} on host {1}".format(self.smiles, socket.gethostname()))
mol, confId = self.optimize_molecule_mmff()
# running crest
self.run_crest(mol, confId,tmpdirname)
self.write_gaussian_input_file(tmpdirname)
# Run gaussian and time calculation
gauss_start_time = time.time()
self.run_gaussian(tmpdirname)
gauss_run_time = time.time() - gauss_start_time
print("python walltime for SMILES {0} on host {1}: {2}".format(
self.smiles, socket.gethostname(), gauss_run_time))
mol, enthalpy, freeenergy, scfenergy = self.parse_log_file()
log = self.cleanup()
molstr = Chem.MolToMolBlock(mol)
return molstr, enthalpy, freeenergy, scfenergy, log
def optimize_molecule_mmff(self):
""" Embed a molecule in 3D space, optimizing a number of conformers and
selecting the most stable
"""
mol = Chem.MolFromSmiles(self.smiles)
mol = Chem.rdmolops.AddHs(mol)
# If the molecule is a radical; add a hydrogen so MMFF converges to a
# reasonable structure
is_radical = False
radical_index = None
for i, atom in enumerate(mol.GetAtoms()):
if atom.GetNumRadicalElectrons() != 0:
is_radical = True
radical_index = i
atom.SetNumExplicitHs(atom.GetNumExplicitHs() + 1)
atom.SetNumRadicalElectrons(0)
# Use min < 3^n < max conformers, where n is the number of rotatable bonds
NumRotatableBonds = AllChem.CalcNumRotatableBonds(mol)
NumConformers = np.clip(3**NumRotatableBonds, self.min_conformers,
self.max_conformers)
conformers = AllChem.EmbedMultipleConfs(
mol, numConfs=int(NumConformers), pruneRmsThresh=0.2, randomSeed=1,
useExpTorsionAnglePrefs=True, useBasicKnowledge=True)
def optimize_conformer(conformer):
prop = AllChem.MMFFGetMoleculeProperties(mol, mmffVariant="MMFF94s")
ff = AllChem.MMFFGetMoleculeForceField(mol, prop, confId=conformer)
ff.Minimize()
return float(ff.CalcEnergy())
assert conformers, "Conformer embedding failed"
if len(conformers) == 1:
logging.critical(
'Only 1 conformer for SMILES {}'.format(self.smiles))
most_stable_conformer = conformers[0]
else:
conformer_energies = np.array(
[optimize_conformer(conformer) for conformer in conformers])
most_stable_conformer = conformer_energies.argmin()
# If hydrogen was added; remove it before returning the final mol
if is_radical:
radical_atom = mol.GetAtomWithIdx(radical_index)
radical_atom.SetNumExplicitHs(int(radical_atom.GetNumExplicitHs()) - 1)
radical_atom.SetNumRadicalElectrons(1)
return mol, int(most_stable_conformer)
def run_crest(self, mol, confId, tmpdirname):
""" Given an rdkit.Mol object with an optimized, minimum energy conformer
ID, converting to .xyz and running crest in the scratch folder """
self.run_hex = uuid.uuid4().hex[:6]
self.xyz = tmpdirname + '/{0}_{1}.xyz'.format(self.cid, self.run_hex)
self.crest_out = tmpdirname + '/{0}_{1}.crest.out'.format(self.cid, self.run_hex)
self.best_xyz = tmpdirname + '/{0}_{1}_best.xyz.'.format(self.cid, self.run_hex)
#writing to xyzfile
rdkit.Chem.rdmolfiles.MolToXYZFile(mol,self.xyz,confId=confId)
#running calc in temporary TemporaryDirectory : tmpdirname
env = os.environ.copy()
crest_cmd = "{0} {1} > {2}".format(self.crest,
self.xyz, self.crest_out)
subprocess.run(crest_cmd, shell=True, env=env,
timeout=self.crest_timeout)
#crest outputs common name files such as crest_best.xyz. We have to move it
#such that it can be accessed again to creat gjf file for gaussian
subprocess.run(['mv', 'crest_best.xyz', self.best_xyz])
def write_gaussian_input_file(self, tmpdirname):
""" Given an best conformer after crest,
write a gaussian input file using openbabel to the scratch folder """
self.gjf = tmpdirname + '/{0}_{1}.gjf'.format(self.cid, self.run_hex)
checkpoint_file = tmpdirname + '/{0}_{1}.chk'.format(self.cid, self.run_hex)
if self.type_ == 'fragment':
# Run stable=opt
header1 = [
'%chk={0}'.format(checkpoint_file),
'%MEM={}'.format(self.mem),
'%nprocshared={}'.format(self.nprocs),
'# stable=opt M062X/Def2TZVP scf=(xqc,maxconventionalcycles=400)'
' nosymm guess=mix']
subprocess.run(
['obabel', '-ixyz',self.best_xyz, '-O', self.gjf, '-xk',
'\n'.join(header1)])
with open(self.gjf, 'r') as f:
inputs = f.read().split('\n')
inputs[5] = f'{self.cid}_{self.run_hex}'
chg_mul = inputs[7]
inputs += [
'--link1--',
'%chk={0}'.format(checkpoint_file),
'%MEM={}'.format(self.mem),
'%nprocshared={}'.format(self.nprocs),
'# opt freq M062X/Def2TZVP scf=(xqc,maxconventionalcycles=400)'
' nosymm guess=read geom=check\n',
' {0}_{1}\n'.format(self.cid, self.run_hex),
chg_mul
]
else:
header1 = [
'%MEM={}'.format(self.mem),
'%nprocshared={}'.format(self.nprocs),
'# opt freq M062X/Def2TZVP scf=(xqc,maxconventionalcycles=400) nosymm']
subprocess.run(
['obabel', '-ixyz',self.best_xyz, '-O', self.gjf, '-xk',
'\n'.join(header1)])
with open(self.gjf, 'r') as f:
inputs = f.read().split('\n')
inputs[4] = f'{self.cid}_{self.run_hex}'
with open(self.gjf, 'wt') as f:
f.write('\n'.join(inputs))
# debug -- keep a copy before running gaussian
gjf_basename = os.path.basename(self.gjf)
newgjf = self.projectdir + 'gjf_errors/' + gjf_basename
subprocess.run(['cp', self.gjf, newgjf])
def run_gaussian(self, tmpdirname):
""" Run the given Guassian input file (with associated mol ID) """
self.log = tmpdirname + '/{0}_{1}.log'.format(self.cid, self.run_hex)
gaussian_cmd = "module load gaussian/G16C && g16 < {0} > {1}".format(
self.gjf, self.log)
with tempfile.TemporaryDirectory(dir=tmpdirname) as gausstmp:
env = os.environ.copy()
env['GAUSS_SCRDIR'] = gausstmp
subprocess.run(gaussian_cmd, shell=True, env=env,
timeout=self.gaussian_timeout)
def parse_log_file(self):
""" Parse the gaussian log file using cclib, return the optimized mol and
enthalpy. """
# Parse the output log with cclib, assert the optimization completed
data = cclib.io.ccread(self.log)
assert data.optdone, "Optimization not converged"
if hasattr(data, 'vibfreqs'): # single atoms don't have this property
assert min(data.vibfreqs) >= 0, "Imaginary Frequency"
# Create an RDKit Molecule from the SMILES string
mol = Chem.MolFromSmiles(self.smiles)
mol = AllChem.AddHs(mol)
AllChem.EmbedMolecule(mol)
conf = mol.GetConformer()
assert np.allclose(
np.array([a.GetAtomicNum() for a in mol.GetAtoms()]),
data.atomnos), "Stoichiometry check failed"
# Correct the 3D positions of the atoms using the optimized geometry
for i in range(conf.GetNumAtoms()):
conf.SetAtomPosition(i, data.atomcoords[-1][i])
pt = Chem.GetPeriodicTable()
covalent_radii = lambda x: PeriodicTable.GetRcovalent(pt, x)
# Check bond lengths
for bond in mol.GetBonds():
length = GetBondLength(
mol.GetConformer(), bond.GetBeginAtomIdx(), bond.GetEndAtomIdx())
max_length = (covalent_radii(bond.GetBeginAtom().GetSymbol()) +
covalent_radii(bond.GetEndAtom().GetSymbol()) + 0.4)
assert length <= max_length, "bond greater than maximum covalent length"
# Set property fields of the molecule for final SDF export
mol.SetProp("_Name", str(self.cid))
mol.SetProp('SMILES', self.smiles)
mol.SetDoubleProp('Enthalpy', data.enthalpy)
return mol, data.enthalpy, data.freeenergy, data.scfenergies[-1] / 27.2114
def cleanup(self):
""" Compress files and store in /projects """
log_basename = os.path.basename(self.log)
gjf_basename = os.path.basename(self.gjf)
newlog = self.projectdir + 'log/' + log_basename + '.gz'
newgjf = self.projectdir + 'gjf/' + gjf_basename + '.gz'
subprocess.run(['gzip', self.log, self.gjf])
subprocess.run(['mv', self.log + '.gz', newlog])
subprocess.run(['mv', self.gjf + '.gz', newgjf])
return newlog
| [
"logging.getLogger",
"numpy.clip",
"rdkit.Chem.AllChem.CalcNumRotatableBonds",
"cclib.io.ccread",
"subprocess.run",
"rdkit.Chem.AllChem.MMFFGetMoleculeProperties",
"socket.gethostname",
"rdkit.Chem.AllChem.AddHs",
"rdkit.Chem.GetPeriodicTable",
"rdkit.Chem.PeriodicTable.GetRcovalent",
"rdkit.Che... | [((289, 315), 'logging.getLogger', 'logging.getLogger', (['"""cclib"""'], {}), "('cclib')\n", (306, 315), False, 'import logging\n'), ((2400, 2431), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['self.smiles'], {}), '(self.smiles)\n', (2418, 2431), False, 'from rdkit import Chem\n'), ((2446, 2470), 'rdkit.Chem.rdmolops.AddHs', 'Chem.rdmolops.AddHs', (['mol'], {}), '(mol)\n', (2465, 2470), False, 'from rdkit import Chem\n'), ((3034, 3068), 'rdkit.Chem.AllChem.CalcNumRotatableBonds', 'AllChem.CalcNumRotatableBonds', (['mol'], {}), '(mol)\n', (3063, 3068), False, 'from rdkit.Chem import AllChem\n'), ((3093, 3166), 'numpy.clip', 'np.clip', (['(3 ** NumRotatableBonds)', 'self.min_conformers', 'self.max_conformers'], {}), '(3 ** NumRotatableBonds, self.min_conformers, self.max_conformers)\n', (3100, 3166), True, 'import numpy as np\n'), ((4996, 5060), 'rdkit.Chem.rdmolfiles.MolToXYZFile', 'rdkit.Chem.rdmolfiles.MolToXYZFile', (['mol', 'self.xyz'], {'confId': 'confId'}), '(mol, self.xyz, confId=confId)\n', (5030, 5060), False, 'import rdkit\n'), ((5141, 5158), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (5156, 5158), False, 'import os\n'), ((5261, 5335), 'subprocess.run', 'subprocess.run', (['crest_cmd'], {'shell': '(True)', 'env': 'env', 'timeout': 'self.crest_timeout'}), '(crest_cmd, shell=True, env=env, timeout=self.crest_timeout)\n', (5275, 5335), False, 'import subprocess\n'), ((5527, 5582), 'subprocess.run', 'subprocess.run', (["['mv', 'crest_best.xyz', self.best_xyz]"], {}), "(['mv', 'crest_best.xyz', self.best_xyz])\n", (5541, 5582), False, 'import subprocess\n'), ((7726, 7752), 'os.path.basename', 'os.path.basename', (['self.gjf'], {}), '(self.gjf)\n', (7742, 7752), False, 'import os\n'), ((7825, 7865), 'subprocess.run', 'subprocess.run', (["['cp', self.gjf, newgjf]"], {}), "(['cp', self.gjf, newgjf])\n", (7839, 7865), False, 'import subprocess\n'), ((8672, 8697), 'cclib.io.ccread', 'cclib.io.ccread', (['self.log'], {}), '(self.log)\n', (8687, 8697), False, 'import cclib\n'), ((8974, 9005), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['self.smiles'], {}), '(self.smiles)\n', (8992, 9005), False, 'from rdkit import Chem\n'), ((9020, 9038), 'rdkit.Chem.AllChem.AddHs', 'AllChem.AddHs', (['mol'], {}), '(mol)\n', (9033, 9038), False, 'from rdkit.Chem import AllChem\n'), ((9047, 9073), 'rdkit.Chem.AllChem.EmbedMolecule', 'AllChem.EmbedMolecule', (['mol'], {}), '(mol)\n', (9068, 9073), False, 'from rdkit.Chem import AllChem\n'), ((9455, 9478), 'rdkit.Chem.GetPeriodicTable', 'Chem.GetPeriodicTable', ([], {}), '()\n', (9476, 9478), False, 'from rdkit import Chem\n'), ((10369, 10395), 'os.path.basename', 'os.path.basename', (['self.log'], {}), '(self.log)\n', (10385, 10395), False, 'import os\n'), ((10419, 10445), 'os.path.basename', 'os.path.basename', (['self.gjf'], {}), '(self.gjf)\n', (10435, 10445), False, 'import os\n'), ((10586, 10630), 'subprocess.run', 'subprocess.run', (["['gzip', self.log, self.gjf]"], {}), "(['gzip', self.log, self.gjf])\n", (10600, 10630), False, 'import subprocess\n'), ((10639, 10687), 'subprocess.run', 'subprocess.run', (["['mv', self.log + '.gz', newlog]"], {}), "(['mv', self.log + '.gz', newlog])\n", (10653, 10687), False, 'import subprocess\n'), ((10696, 10744), 'subprocess.run', 'subprocess.run', (["['mv', self.gjf + '.gz', newgjf]"], {}), "(['mv', self.gjf + '.gz', newgjf])\n", (10710, 10744), False, 'import subprocess\n'), ((1310, 1358), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {'dir': 'self.scratchdir'}), '(dir=self.scratchdir)\n', (1337, 1358), False, 'import tempfile\n'), ((1742, 1753), 'time.time', 'time.time', ([], {}), '()\n', (1751, 1753), False, 'import time\n'), ((2130, 2153), 'rdkit.Chem.MolToMolBlock', 'Chem.MolToMolBlock', (['mol'], {}), '(mol)\n', (2148, 2153), False, 'from rdkit import Chem\n'), ((3456, 3517), 'rdkit.Chem.AllChem.MMFFGetMoleculeProperties', 'AllChem.MMFFGetMoleculeProperties', (['mol'], {'mmffVariant': '"""MMFF94s"""'}), "(mol, mmffVariant='MMFF94s')\n", (3489, 3517), False, 'from rdkit.Chem import AllChem\n'), ((3535, 3597), 'rdkit.Chem.AllChem.MMFFGetMoleculeForceField', 'AllChem.MMFFGetMoleculeForceField', (['mol', 'prop'], {'confId': 'conformer'}), '(mol, prop, confId=conformer)\n', (3568, 3597), False, 'from rdkit.Chem import AllChem\n'), ((8187, 8230), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {'dir': 'tmpdirname'}), '(dir=tmpdirname)\n', (8214, 8230), False, 'import tempfile\n'), ((8262, 8279), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (8277, 8279), False, 'import os\n'), ((8335, 8420), 'subprocess.run', 'subprocess.run', (['gaussian_cmd'], {'shell': '(True)', 'env': 'env', 'timeout': 'self.gaussian_timeout'}), '(gaussian_cmd, shell=True, env=env, timeout=self.gaussian_timeout\n )\n', (8349, 8420), False, 'import subprocess\n'), ((9514, 9547), 'rdkit.Chem.PeriodicTable.GetRcovalent', 'PeriodicTable.GetRcovalent', (['pt', 'x'], {}), '(pt, x)\n', (9540, 9547), False, 'from rdkit.Chem import PeriodicTable\n'), ((1825, 1836), 'time.time', 'time.time', ([], {}), '()\n', (1834, 1836), False, 'import time\n'), ((4681, 4693), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4691, 4693), False, 'import uuid\n'), ((1447, 1467), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (1465, 1467), False, 'import socket\n'), ((1961, 1981), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (1979, 1981), False, 'import socket\n')] |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
from abc import ABCMeta, abstractmethod
from collections.abc import Iterable
from typing import Dict
from typing import Iterable as Iter
from typing import Union
import numpy as np
from ..core._imperative_rt.core2 import pop_scope, push_scope, set_option
from ..core.tensor.utils import set_convert_inputs
from ..tensor import Parameter, Tensor
from ..utils.deprecation import deprecated
class _RequiredParameter:
def __repr__(self):
return "<required parameter>"
required = _RequiredParameter()
class Optimizer(metaclass=ABCMeta):
r"""Base class for all optimizers.
Args:
params: specifies what Tensors should be optimized.
defaults: a dict of default parameters of Optimizer, like learning rate or momentum.
"""
def __init__( # pylint: disable=too-many-branches
self, params: Union[Iter[Parameter], dict], defaults: dict,
):
self._state = dict()
self._defaults = defaults
self._disable_type_convert = False
if isinstance(params, (Parameter, dict)):
params = [params]
else:
if not isinstance(params, Iterable):
raise TypeError(
"params argument given to the optimizer should be "
"Parameter or dict, or Iterable of them"
)
self.param_groups = [] # type: list
param_groups = list(params)
if len(param_groups) == 0:
raise ValueError("optimizer got an empty parameter list")
param_type = type(param_groups[0])
for param in param_groups:
if not isinstance(param, param_type):
raise TypeError(
"types of params argument given to the optimizer shoud be same"
)
if not isinstance(param_groups[0], dict):
param_groups = [{"params": param_groups}]
for group in param_groups:
self.add_param_group(group)
for group in self.param_groups:
self._create_state(group)
def add_param_group(self, param_group: dict):
r"""Add a param group to ``param_groups`` of the :class:`~megengine.optim.optimizer.Optimizer`.
This can be useful when fine tuning a pre-trained network as frozen layers can be made
trainable and added to the :class:`~megengine.optim.optimizer.Optimizer` as training progresses.
Args:
param_group: specifies what tensors should be optimized along with group.
"""
assert isinstance(param_group, dict), "param group must be a dict"
if isinstance(param_group["params"], Parameter):
param_group["params"] = [param_group["params"]]
else:
param_group["params"] = list(param_group["params"])
for param in param_group["params"]:
if not isinstance(param, Parameter):
raise TypeError(
"optimizer can only optimize Parameters, but one of the params is "
+ str(type(param))
)
param._reset(Tensor(param.numpy(), no_cache=True))
for name, default in self._defaults.items():
if default is required and name not in param_group:
raise ValueError(
"parameter group didn't specify a value of "
"required optimization parameter " + name
)
param_group.setdefault(name, default)
param_set = set()
for group in self.param_groups:
param_set.update(set(map(id, group["params"])))
assert param_set.isdisjoint(
set(map(id, param_group["params"]))
), "some parameters appear in more than one parameter group"
self.param_groups.append(param_group)
def _add_state(self, param, state_name, initializer=None):
if initializer is None:
initializer = np.zeros(param.shape, dtype=np.float32)
state_dict = self._state.setdefault(param, {})
assert state_name not in state_dict
state = Tensor(initializer, no_cache=True)
state_dict[state_name] = state
@abstractmethod
def _create_state(self, param_group):
pass
@abstractmethod
def _updates(self, param_group):
pass
def _get_params(self):
params = []
for group in self.param_groups:
for param in group["params"]:
params.append(param)
return params
def step(self):
r"""Performs a single optimization step."""
# set the globle state `_enable_convert_inputs` to `False` to disable
# the `convert_inputs` for param updates
set_option("record_computing_path", 0)
if self._disable_type_convert:
backup = set_convert_inputs(False)
for group in self.param_groups:
if isinstance(group["params"], set):
raise TypeError(
"optimized parameters need to be organized in ordered collections, "
"but the ordering of parameters in sets will change between runs. "
"Please use a list instead."
)
push_scope("step")
self._updates(group)
pop_scope("step")
if self._disable_type_convert:
# restore the globle state `_enable_convert_inputs`
set_convert_inputs(backup)
set_option("record_computing_path", 1)
return self
@deprecated(version="1.0", reason="use clear_grad instead")
def zero_grad(self):
for param_group in self.param_groups:
for param in param_group["params"]:
if param.grad is not None:
param.grad.reset_zero()
def clear_grad(self):
r"""Set the grad attribute to None for all parameters."""
for param_group in self.param_groups:
push_scope("clear_grad")
for param in param_group["params"]:
param.grad = None
pop_scope("clear_grad")
def state_dict(self, keep_var=False) -> Dict:
r"""Export the optimizer state.
Return:
optimizer state. Can be loaded by :meth:`load_state_dict`.
"""
param_groups = []
state = dict()
param2id = dict()
cur_id = 0
for group in self.param_groups:
for param in group["params"]:
if param not in param2id:
param2id[param] = cur_id
cur_id += 1
for param, st in self._state.items():
_st = copy.copy(st)
if not keep_var:
for k, v in st.items():
_st[k] = v.numpy()
state[param2id[param]] = _st
for group in self.param_groups:
param_group = {k: v for k, v in group.items() if k != "params"}
param_group["params"] = [param2id[param] for param in group["params"]]
param_groups.append(param_group)
return {"param_groups": param_groups, "state": state}
def load_state_dict(self, state: dict):
r"""Loads the optimizer state.
Args:
state: optimizer state. Should be an object returned
from a call to :meth:`state_dict`.
"""
if len(self.param_groups) != len(state["param_groups"]):
raise ValueError(
"loaded state dict has a different number of parameter groups"
)
for group_new, group_saved in zip(self.param_groups, state["param_groups"]):
if len(group_new["params"]) != len(group_saved["params"]):
raise ValueError(
"loaded state dict contains a parameter group that "
"doesn't match the size of optimizer's group"
)
for param_new, param_saved in zip(
group_new["params"], group_saved["params"]
):
p = param_new
self._state[p] = state["state"][param_saved].copy()
for k, v in self._state[p].items():
if isinstance(v, Tensor):
self._state[p][k] = v.detach()
else:
self._state[p][k] = Tensor(v)
if set(group_new.keys()) != set(group_saved.keys()):
raise ValueError(
"loaded state dict contains a parameter group that "
"doesn't match the keys of optimizer's group"
)
for key in group_new.keys():
if key != "params":
group_new[key] = group_saved[key]
if len(self._state.keys()) != len(state["state"].keys()):
raise ValueError(
"loaded state dict contains a state that doesn't match "
"the size of optimizer's state"
)
def backward(self, loss):
raise NotImplementedError("use autodiff.GradManager instead")
def bcast_param(self):
raise NotImplementedError("use distributed.bcast_list_ instead")
| [
"numpy.zeros",
"copy.copy"
] | [((4311, 4350), 'numpy.zeros', 'np.zeros', (['param.shape'], {'dtype': 'np.float32'}), '(param.shape, dtype=np.float32)\n', (4319, 4350), True, 'import numpy as np\n'), ((6995, 7008), 'copy.copy', 'copy.copy', (['st'], {}), '(st)\n', (7004, 7008), False, 'import copy\n')] |
#!/usr/bin/env python
__author__ = '<NAME>'
#========================================================================
import os, sys
import copy
import time
import uuid
import pickle
import subprocess
import numpy as np
import tensorflow as tf
from gryffin.utilities import Logger
from gryffin.utilities.decorators import thread
#========================================================================
class DescriptorGenerator(Logger):
eta = 1e-3
max_iter = 10**3
def __init__(self, config):
self.config = config
self.is_generating = False
self.exec_name = '%s/descriptor_generator/generation_process.py' % self.config.get('home')
# define registers
self.auto_gen_descs = {}
self.comp_corr_coeffs = {}
self.gen_descs_cov = {}
self.min_corrs = {}
self.reduced_gen_descs = {}
self.weights = {}
self.sufficient_indices = {}
@thread
def single_generate(self, descs, objs, feature_index, result_dict = None):
# collect all relevant properties
sim_dict = {}
for prop in dir(self):
if callable(getattr(self, prop)) or prop.startswith(('__', 'W', 'config')): continue
sim_dict[prop] = getattr(self, prop)
sim_dict['num_samples'] = descs.shape[0]
sim_dict['num_descs'] = descs.shape[1]
sim_dict['descs'] = descs
sim_dict['objs'] = objs
sim_dict['grid_descs'] = self.config.feature_descriptors[feature_index]
identifier = str(uuid.uuid4())[:8]
config_name = '%s/descriptor_generation_%d_%s.pkl' % (self.config.get('scratch_dir'), feature_index, identifier)
with open(config_name, 'wb') as content:
pickle.dump(sim_dict, content)
# FNULL = open(os.devnull, 'w')
# subprocess.call('%s %s' % (self.exec_name, config_name), shell = True, stdout = FNULL, stderr = subprocess.STDOUT)
subprocess.call('python %s %s' % (self.exec_name, config_name), shell = True)
print('SUBMITTED DESC GENERATION')
results_name = '%s/completed_descriptor_generation_%d_%s.pkl' % (self.config.get('scratch_dir'), feature_index, identifier)
# wait for results to be written
while not os.path.isfile(results_name):
time.sleep(0.05)
current_size = 0
while current_size != os.path.getsize(results_name):
current_size = os.path.getsize(results_name)
time.sleep(0.05)
time.sleep(0.2)
try:
with open(results_name, 'rb') as content:
results = pickle.load(content)
except EOFError:
time.sleep(2)
with open(results_name, 'rb') as content:
results = pickle.load(content)
self.min_corrs[feature_index] = results['min_corrs']
self.auto_gen_descs[feature_index] = results['auto_gen_descs']
self.comp_corr_coeffs[feature_index] = results['comp_corr_coeffs']
self.gen_descs_cov[feature_index] = results['gen_descs_cov']
self.reduced_gen_descs[feature_index] = results['reduced_gen_descs']
self.weights[feature_index] = results['weights']
self.sufficient_indices[feature_index] = results['sufficient_indices']
# print("WEIGHTS", feature_index, self.weights[feature_index])
# print("REDUCED_DESCS", feature_index, results['reduced_gen_descs'].shape)
result_dict[feature_index] = results['reduced_gen_descs']
os.remove(config_name)
os.remove(results_name)
@thread
def generate(self, obs_params, obs_objs):
import time
start = time.time()
self.is_generating = True
result_dict = {}
feature_types = self.config.feature_types
feature_descriptors = self.config.feature_descriptors
# print('FEATURE DESCRIPTORS', feature_descriptors, np.array(feature_descriptors[0]).shape)
# print('*'*10)
# for element in feature_descriptors:
# print(element)
# print('*'*10)
for feature_index, feature_options in enumerate(self.config.feature_options):
if feature_types[feature_index] == 'continuous':
self.weights[feature_index] = None
self.reduced_gen_descs[feature_index] = None
result_dict[feature_index] = None
continue
if feature_descriptors[feature_index] is None:
self.weights[feature_index] = None
self.reduced_gen_descs[feature_index] = None
result_dict[feature_index] = None
continue
if feature_descriptors[feature_index].shape[1] == 1:
self.weights[feature_index] = np.array([[1.]])
self.reduced_gen_descs[feature_index] = feature_descriptors[feature_index]
result_dict[feature_index] = feature_descriptors[feature_index]
continue
sampled_params = obs_params[:, feature_index].astype(np.int32)
sampled_descriptors = feature_descriptors[feature_index][sampled_params]
sampled_objs = np.reshape(obs_objs, (len(obs_objs), 1))
self.single_generate(sampled_descriptors, sampled_objs, feature_index, result_dict)
# avoid parallel execution if not desired
if not self.config.get('parallel'):
if feature_types[feature_index] == 'continuous': continue
while not feature_index in result_dict:
time.sleep(0.1)
for feature_index in range(len(self.config.feature_options)):
if feature_types[feature_index] == 'continuous': continue
while not feature_index in result_dict:
time.sleep(0.1)
gen_feature_descriptors = [result_dict[feature_index] for feature_index in range(len(result_dict.keys()))]
self.gen_feature_descriptors = gen_feature_descriptors
self.is_generating = False
end = time.time()
self.desc_gen_time = end - start
def get_descriptors(self):
while self.is_generating:
time.sleep(0.1)
if hasattr(self, 'gen_feature_descriptors'):
print('[TIME: ', self.desc_gen_time, ' (descriptor generation)')
return self.gen_feature_descriptors
else:
return self.config.feature_descriptors
def get_summary(self):
summary = {}
feature_types = self.config.feature_types
if not hasattr(self, 'gen_feature_descriptors'):
for feature_index in range(len(self.config.feature_options)):
contribs = {}
if feature_types[feature_index] == 'continuous': continue
feature_descriptors = self.config.feature_descriptors[feature_index]
if feature_descriptors is None: continue
for desc_index in range(feature_descriptors.shape[1]):
desc_summary_dict = {}
desc_summary_dict['relevant_given_descriptors'] = np.arange(len(feature_descriptors[:, desc_index]))
desc_summary_dict['given_descriptor_contributions'] = np.ones(len(feature_descriptors[:, desc_index]))
contribs['descriptor_%d' % desc_index] = copy.deepcopy(desc_summary_dict)
summary['feature_%d' % feature_index] = copy.deepcopy(contribs)
return summary
for feature_index in range(len(self.config.feature_options)):
if feature_types[feature_index] == 'continuous': continue
weights = self.weights[feature_index]
reduced_gen_descs = self.reduced_gen_descs[feature_index]
sufficient_indices = self.sufficient_indices[feature_index]
if weights is None: continue
if len(sufficient_indices) == 0: continue
# normalize weights
normed_weights = np.empty(weights.shape)
for index, weight_elements in enumerate(weights):
normed_weights[index] = weight_elements / np.sum(np.abs(weight_elements))
# identify contributing indices
contribs = {}
# for new_desc_index in range(reduced_gen_descs.shape[1]):
for new_desc_index in sufficient_indices:
desc_summary_dict = {}
relevant_weights = normed_weights[new_desc_index]
sorting_indices = np.argsort(np.abs(relevant_weights))
cumulative_sum = np.cumsum(np.abs(relevant_weights[sorting_indices]))
include_indices = np.where(cumulative_sum > 0.1)[0]
relevant_given_descriptors = sorting_indices[include_indices]
desc_summary_dict['relevant_given_descriptors'] = relevant_given_descriptors
desc_summary_dict['given_descriptor_contributions'] = weights[new_desc_index]
contribs['descriptor_%d' % new_desc_index] = copy.deepcopy(desc_summary_dict)
summary['feature_%d' % feature_index] = copy.deepcopy(contribs)
return summary
#========================================================================
| [
"numpy.abs",
"os.path.getsize",
"pickle.dump",
"numpy.where",
"pickle.load",
"time.sleep",
"uuid.uuid4",
"os.path.isfile",
"numpy.array",
"numpy.empty",
"subprocess.call",
"copy.deepcopy",
"time.time",
"os.remove"
] | [((1834, 1909), 'subprocess.call', 'subprocess.call', (["('python %s %s' % (self.exec_name, config_name))"], {'shell': '(True)'}), "('python %s %s' % (self.exec_name, config_name), shell=True)\n", (1849, 1909), False, 'import subprocess\n'), ((2320, 2335), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (2330, 2335), False, 'import time\n'), ((3230, 3252), 'os.remove', 'os.remove', (['config_name'], {}), '(config_name)\n', (3239, 3252), False, 'import os, sys\n'), ((3255, 3278), 'os.remove', 'os.remove', (['results_name'], {}), '(results_name)\n', (3264, 3278), False, 'import os, sys\n'), ((3360, 3371), 'time.time', 'time.time', ([], {}), '()\n', (3369, 3371), False, 'import time\n'), ((5425, 5436), 'time.time', 'time.time', ([], {}), '()\n', (5434, 5436), False, 'import time\n'), ((1647, 1677), 'pickle.dump', 'pickle.dump', (['sim_dict', 'content'], {}), '(sim_dict, content)\n', (1658, 1677), False, 'import pickle\n'), ((2125, 2153), 'os.path.isfile', 'os.path.isfile', (['results_name'], {}), '(results_name)\n', (2139, 2153), False, 'import os, sys\n'), ((2158, 2174), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (2168, 2174), False, 'import time\n'), ((2218, 2247), 'os.path.getsize', 'os.path.getsize', (['results_name'], {}), '(results_name)\n', (2233, 2247), False, 'import os, sys\n'), ((2267, 2296), 'os.path.getsize', 'os.path.getsize', (['results_name'], {}), '(results_name)\n', (2282, 2296), False, 'import os, sys\n'), ((2300, 2316), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (2310, 2316), False, 'import time\n'), ((5533, 5548), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (5543, 5548), False, 'import time\n'), ((7058, 7081), 'numpy.empty', 'np.empty', (['weights.shape'], {}), '(weights.shape)\n', (7066, 7081), True, 'import numpy as np\n'), ((8004, 8027), 'copy.deepcopy', 'copy.deepcopy', (['contribs'], {}), '(contribs)\n', (8017, 8027), False, 'import copy\n'), ((1468, 1480), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1478, 1480), False, 'import uuid\n'), ((2404, 2424), 'pickle.load', 'pickle.load', (['content'], {}), '(content)\n', (2415, 2424), False, 'import pickle\n'), ((2447, 2460), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2457, 2460), False, 'import time\n'), ((4328, 4345), 'numpy.array', 'np.array', (['[[1.0]]'], {}), '([[1.0]])\n', (4336, 4345), True, 'import numpy as np\n'), ((5204, 5219), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (5214, 5219), False, 'import time\n'), ((6586, 6609), 'copy.deepcopy', 'copy.deepcopy', (['contribs'], {}), '(contribs)\n', (6599, 6609), False, 'import copy\n'), ((7928, 7960), 'copy.deepcopy', 'copy.deepcopy', (['desc_summary_dict'], {}), '(desc_summary_dict)\n', (7941, 7960), False, 'import copy\n'), ((2520, 2540), 'pickle.load', 'pickle.load', (['content'], {}), '(content)\n', (2531, 2540), False, 'import pickle\n'), ((5015, 5030), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (5025, 5030), False, 'import time\n'), ((6509, 6541), 'copy.deepcopy', 'copy.deepcopy', (['desc_summary_dict'], {}), '(desc_summary_dict)\n', (6522, 6541), False, 'import copy\n'), ((7488, 7512), 'numpy.abs', 'np.abs', (['relevant_weights'], {}), '(relevant_weights)\n', (7494, 7512), True, 'import numpy as np\n'), ((7546, 7587), 'numpy.abs', 'np.abs', (['relevant_weights[sorting_indices]'], {}), '(relevant_weights[sorting_indices])\n', (7552, 7587), True, 'import numpy as np\n'), ((7611, 7641), 'numpy.where', 'np.where', (['(cumulative_sum > 0.1)'], {}), '(cumulative_sum > 0.1)\n', (7619, 7641), True, 'import numpy as np\n'), ((7188, 7211), 'numpy.abs', 'np.abs', (['weight_elements'], {}), '(weight_elements)\n', (7194, 7211), True, 'import numpy as np\n')] |
import sys
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
sys.path.append('..')
from datasets import dataloaders
from tqdm import tqdm
def get_score(acc_list):
mean = np.mean(acc_list)
interval = 1.96*np.sqrt(np.var(acc_list)/len(acc_list))
return mean,interval
def meta_test(data_path,model,way,shot,pre,transform_type,query_shot=16,trial=10000,return_list=False):
eval_loader = dataloaders.meta_test_dataloader(data_path=data_path,
way=way,
shot=shot,
pre=pre,
transform_type=transform_type,
query_shot=query_shot,
trial=trial)
target = torch.LongTensor([i//query_shot for i in range(query_shot*way)]).cuda()
acc_list = []
for i, (inp,_) in tqdm(enumerate(eval_loader)):
# inp format: (way*shot) + (way*query_shot) , first (way*shot) are support images, the rest (way*query_shot) are query
inp = inp.cuda()
max_index = model.meta_test(inp,way=way,shot=shot,query_shot=query_shot)
acc = 100*torch.sum(torch.eq(max_index,target)).item()/query_shot/way
acc_list.append(acc)
if return_list:
return np.array(acc_list)
else:
mean,interval = get_score(acc_list)
return mean,interval | [
"numpy.mean",
"torch.eq",
"numpy.array",
"datasets.dataloaders.meta_test_dataloader",
"sys.path.append",
"numpy.var"
] | [((97, 118), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (112, 118), False, 'import sys\n'), ((213, 230), 'numpy.mean', 'np.mean', (['acc_list'], {}), '(acc_list)\n', (220, 230), True, 'import numpy as np\n'), ((442, 595), 'datasets.dataloaders.meta_test_dataloader', 'dataloaders.meta_test_dataloader', ([], {'data_path': 'data_path', 'way': 'way', 'shot': 'shot', 'pre': 'pre', 'transform_type': 'transform_type', 'query_shot': 'query_shot', 'trial': 'trial'}), '(data_path=data_path, way=way, shot=shot,\n pre=pre, transform_type=transform_type, query_shot=query_shot, trial=trial)\n', (474, 595), False, 'from datasets import dataloaders\n'), ((1420, 1438), 'numpy.array', 'np.array', (['acc_list'], {}), '(acc_list)\n', (1428, 1438), True, 'import numpy as np\n'), ((259, 275), 'numpy.var', 'np.var', (['acc_list'], {}), '(acc_list)\n', (265, 275), True, 'import numpy as np\n'), ((1305, 1332), 'torch.eq', 'torch.eq', (['max_index', 'target'], {}), '(max_index, target)\n', (1313, 1332), False, 'import torch\n')] |
from RoboPy import *
import RoboPy as rp
import numpy as np
from numpy import pi
from john_radlab.Jacobian_Orthogonality.source import AIM, findY
# dh = [[0, 0, 2, 0], [0, 0, 1, 0], [0, 0, 0.3, 0], [0, 0, 1, 0]]
# dh = [[0, 0, 2.41497930, 0], [0, 0, 1.71892394e+00, 0], [0, 0, 1.38712293e-03, 0], [0, 0, 3.89431195e-01, 0]]
# dh = [[0, 0, 1, 0], [0, 0, 1, 0], [0, 0, 1, 0], [0, 0, 1, 0]]
dh = np.random.random_sample((4,4))
arm = SerialArm(dh)
viz = VizScene()
viz.add_arm(arm)
viz.add_frame
q0 = np.array([0, pi/2, pi/2, pi/2])
J = arm.jacob(q0)
Y = findY(J)
w = AIM(J, 'w')
np.set_printoptions(precision=2)
print(f"q: {q0}\nJ: \n{clean_rotation_matrix(J, 0.01)}\n Y: \n{clean_rotation_matrix(Y)}\n---- Scores ----\nW: {w}, Fro: {AIM(J,'fro')}, Min: {AIM(J,'min')}")
viz.update(q0)
dq = np.array([pi/1000, 2*pi/1000, 4*pi/1000, 8*pi/1000])
q = q0
while True:
q += dq
viz.update(q)
viz.hold()
| [
"john_radlab.Jacobian_Orthogonality.source.AIM",
"john_radlab.Jacobian_Orthogonality.source.findY",
"numpy.random.random_sample",
"numpy.array",
"numpy.set_printoptions"
] | [((394, 425), 'numpy.random.random_sample', 'np.random.random_sample', (['(4, 4)'], {}), '((4, 4))\n', (417, 425), True, 'import numpy as np\n'), ((500, 537), 'numpy.array', 'np.array', (['[0, pi / 2, pi / 2, pi / 2]'], {}), '([0, pi / 2, pi / 2, pi / 2])\n', (508, 537), True, 'import numpy as np\n'), ((554, 562), 'john_radlab.Jacobian_Orthogonality.source.findY', 'findY', (['J'], {}), '(J)\n', (559, 562), False, 'from john_radlab.Jacobian_Orthogonality.source import AIM, findY\n'), ((567, 578), 'john_radlab.Jacobian_Orthogonality.source.AIM', 'AIM', (['J', '"""w"""'], {}), "(J, 'w')\n", (570, 578), False, 'from john_radlab.Jacobian_Orthogonality.source import AIM, findY\n'), ((579, 611), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (598, 611), True, 'import numpy as np\n'), ((792, 858), 'numpy.array', 'np.array', (['[pi / 1000, 2 * pi / 1000, 4 * pi / 1000, 8 * pi / 1000]'], {}), '([pi / 1000, 2 * pi / 1000, 4 * pi / 1000, 8 * pi / 1000])\n', (800, 858), True, 'import numpy as np\n'), ((735, 748), 'john_radlab.Jacobian_Orthogonality.source.AIM', 'AIM', (['J', '"""fro"""'], {}), "(J, 'fro')\n", (738, 748), False, 'from john_radlab.Jacobian_Orthogonality.source import AIM, findY\n'), ((756, 769), 'john_radlab.Jacobian_Orthogonality.source.AIM', 'AIM', (['J', '"""min"""'], {}), "(J, 'min')\n", (759, 769), False, 'from john_radlab.Jacobian_Orthogonality.source import AIM, findY\n')] |
import numpy as np
from landlab import Component
_VALID_METHODS = set(["Grid"])
def _assert_method_is_valid(method):
if method not in _VALID_METHODS:
raise ValueError("%s: Invalid method name" % method)
class Radiation(Component):
"""Compute 1D and 2D total incident shortwave radiation.
Landlab component that computes 1D and 2D total incident shortwave
radiation. This code also computes relative incidence shortwave radiation
compared to a flat surface. Ref: Bras, Rafael L. Hydrology: an
introduction to hydrologic science. Addison Wesley Publishing Company,
1990.
.. codeauthor:: <NAME> & <NAME>
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab.components import Radiation
>>> import numpy as np
>>> grid = RasterModelGrid((5, 4), xy_spacing=(0.2, 0.2))
>>> z = grid.add_zeros("node", "topographic__elevation")
>>> rad = Radiation(grid)
>>> rad.name
'Radiation'
>>> rad.input_var_names
('topographic__elevation',)
>>> sorted(rad.output_var_names) # doctest: +NORMALIZE_WHITESPACE
['radiation__incoming_shortwave_flux',
'radiation__net_shortwave_flux',
'radiation__ratio_to_flat_surface']
>>> sorted(rad.units) # doctest: +NORMALIZE_WHITESPACE
[('radiation__incoming_shortwave_flux', 'W/m^2'),
('radiation__net_shortwave_flux', 'W/m^2'),
('radiation__ratio_to_flat_surface', 'None'),
('topographic__elevation', 'm')]
>>> rad.grid.number_of_node_rows
5
>>> rad.grid.number_of_node_columns
4
>>> rad.grid is grid
True
>>> np.all(grid.at_cell['radiation__ratio_to_flat_surface'] == 0.)
True
>>> np.all(grid.at_node['topographic__elevation'] == 0.)
True
>>> grid['node']['topographic__elevation'] = np.array([
... 0., 0., 0., 0.,
... 1., 1., 1., 1.,
... 2., 2., 2., 2.,
... 3., 4., 4., 3.,
... 4., 4., 4., 4.])
>>> rad.current_time = 0.5
>>> rad.update()
>>> np.all(grid.at_cell['radiation__ratio_to_flat_surface'] == 0.)
False
"""
_name = "Radiation"
_info = {
"radiation__incoming_shortwave_flux": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "W/m^2",
"mapping": "cell",
"doc": "total incident shortwave radiation over the time step",
},
"radiation__net_shortwave_flux": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "W/m^2",
"mapping": "cell",
"doc": "net incident shortwave radiation over the time step",
},
"radiation__ratio_to_flat_surface": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "None",
"mapping": "cell",
"doc": "ratio of total incident shortwave radiation on sloped surface to flat surface",
},
"topographic__elevation": {
"dtype": float,
"intent": "in",
"optional": False,
"units": "m",
"mapping": "node",
"doc": "Land surface topographic elevation",
},
}
def __init__(
self,
grid,
method="Grid",
cloudiness=0.2,
latitude=34.0,
albedo=0.2,
solar_constant=1366.67,
clearsky_turbidity=2.0,
opt_airmass=0.0,
current_time=0.0,
hour=12.0,
):
"""
Parameters
----------
grid: RasterModelGrid
A grid.
method: {'Grid'}, optional
Currently, only default is available.
cloudiness: float, optional
Cloudiness.
latitude: float, optional
Latitude (radians).
albedo: float, optional
Albedo.
solar_constant: float, optional
Solar Constant (W/m^2).
clearsky_turbidity: float, optional
Clear sky turbidity.
opt_airmass: float, optional
Optical air mass.
current_time: float
Current time (years).
hour: float, optional
Hour of the day. Default is 12 (solar noon)
"""
super(Radiation, self).__init__(grid)
self.current_time = current_time
self.hour = hour
self._method = method
self._N = cloudiness
self._latitude = latitude
self._A = albedo
self._Io = solar_constant
self._n = clearsky_turbidity
self._m = opt_airmass
_assert_method_is_valid(self._method)
self.initialize_output_fields()
if "Slope" not in self._grid.at_cell:
self._grid.add_zeros("Slope", at="cell", units="radians")
if "Aspect" not in self._grid.at_cell:
self._grid.add_zeros("Aspect", at="cell", units="radians")
self._nodal_values = self._grid["node"]
self._cell_values = self._grid["cell"]
self._slope, self._aspect = grid.calculate_slope_aspect_at_nodes_burrough(
vals="topographic__elevation"
)
self._cell_values["Slope"] = self._slope
self._cell_values["Aspect"] = self._aspect
@property
def hour(self):
"""Hour of the day.
Default is 12 (solar noon).
"""
return self._hour
@hour.setter
def hour(self, hour):
assert hour >= 0.0
assert hour <= 24.0
self._hour = hour
def update(self):
"""Update fields with current loading conditions.
This method looks to the properties ``current_time`` and
``hour`` and uses their values in updating fields.
"""
self._t = self._hour
self._radf = self._cell_values["radiation__ratio_to_flat_surface"]
self._Rs = self._cell_values["radiation__incoming_shortwave_flux"]
self._Rnet = self._cell_values["radiation__net_shortwave_flux"]
self._julian = np.floor(
(self._current_time - np.floor(self._current_time)) * 365.25
) # Julian day
self._phi = np.radians(self._latitude) # Latitude in Radians
self._delta = 23.45 * np.radians(
np.cos(2 * np.pi / 365 * (172 - self._julian))
) # Declination angle
self._tau = (self._t + 12.0) * np.pi / 12.0 # Hour angle
self._alpha = np.arcsin(
np.sin(self._delta) * np.sin(self._phi)
+ np.cos(self._delta) * np.cos(self._phi) * np.cos(self._tau)
) # Solar Altitude
if self._alpha <= 0.25 * np.pi / 180.0: # If altitude is -ve,
self._alpha = 0.25 * np.pi / 180.0 # sun is beyond the horizon
# Counting for Albedo, Cloudiness and Atmospheric turbidity
self._Rgl = self._Io * np.exp(
(-1)
* self._n
* (0.128 - 0.054 * np.log10(1.0 / np.sin(self._alpha)))
* (1.0 / np.sin(self._alpha))
)
self._phisun = np.arctan(
-np.sin(self._tau)
/ (
np.tan(self._delta) * np.cos(self._phi)
- np.sin(self._phi) * np.cos(self._tau)
)
) # Sun's Azhimuth
if self._phisun >= 0 and -np.sin(self._tau) <= 0:
self._phisun = self._phisun + np.pi
elif self._phisun <= 0 and -np.sin(self._tau) >= 0:
self._phisun = self._phisun + np.pi
self._flat = np.cos(np.arctan(0)) * np.sin(self._alpha) + np.sin(
np.arctan(0)
) * np.cos(self._alpha) * np.cos(
self._phisun - 0
) # flat surface reference
# flat surface total incoming shortwave radiation
self._Rsflat = self._Rgl * self._flat
# flat surface Net incoming shortwave radiation
self._Rnetflat = (1 - self._A) * (1 - 0.65 * (self._N ** 2)) * self._Rsflat
self._sloped = np.cos(self._slope) * np.sin(self._alpha) + np.sin(
self._slope
) * np.cos(self._alpha) * np.cos(self._phisun - self._aspect)
# Ratio of cosine of solar incidence angle of sloped surface to that
# of a flat surface
self._radf = self._sloped / self._flat
self._radf[self._radf <= 0.0] = 0.0
self._radf[self._radf > 6.0] = 6.0
# Sloped surface Toatl Incoming Shortwave Radn
self._Rs = self._Rsflat * self._radf
self._Rnet = self._Rnetflat * self._radf
self._cell_values["radiation__ratio_to_flat_surface"] = self._radf
self._cell_values["radiation__incoming_shortwave_flux"] = self._Rs
self._cell_values["radiation__net_shortwave_flux"] = self._Rnet
| [
"numpy.radians",
"numpy.tan",
"numpy.floor",
"numpy.cos",
"numpy.sin",
"numpy.arctan"
] | [((6172, 6198), 'numpy.radians', 'np.radians', (['self._latitude'], {}), '(self._latitude)\n', (6182, 6198), True, 'import numpy as np\n'), ((6277, 6323), 'numpy.cos', 'np.cos', (['(2 * np.pi / 365 * (172 - self._julian))'], {}), '(2 * np.pi / 365 * (172 - self._julian))\n', (6283, 6323), True, 'import numpy as np\n'), ((7521, 7540), 'numpy.sin', 'np.sin', (['self._alpha'], {}), '(self._alpha)\n', (7527, 7540), True, 'import numpy as np\n'), ((7610, 7634), 'numpy.cos', 'np.cos', (['(self._phisun - 0)'], {}), '(self._phisun - 0)\n', (7616, 7634), True, 'import numpy as np\n'), ((7953, 7972), 'numpy.cos', 'np.cos', (['self._slope'], {}), '(self._slope)\n', (7959, 7972), True, 'import numpy as np\n'), ((7975, 7994), 'numpy.sin', 'np.sin', (['self._alpha'], {}), '(self._alpha)\n', (7981, 7994), True, 'import numpy as np\n'), ((8063, 8098), 'numpy.cos', 'np.cos', (['(self._phisun - self._aspect)'], {}), '(self._phisun - self._aspect)\n', (8069, 8098), True, 'import numpy as np\n'), ((6088, 6116), 'numpy.floor', 'np.floor', (['self._current_time'], {}), '(self._current_time)\n', (6096, 6116), True, 'import numpy as np\n'), ((6468, 6487), 'numpy.sin', 'np.sin', (['self._delta'], {}), '(self._delta)\n', (6474, 6487), True, 'import numpy as np\n'), ((6490, 6507), 'numpy.sin', 'np.sin', (['self._phi'], {}), '(self._phi)\n', (6496, 6507), True, 'import numpy as np\n'), ((6564, 6581), 'numpy.cos', 'np.cos', (['self._tau'], {}), '(self._tau)\n', (6570, 6581), True, 'import numpy as np\n'), ((7073, 7090), 'numpy.sin', 'np.sin', (['self._tau'], {}), '(self._tau)\n', (7079, 7090), True, 'import numpy as np\n'), ((7295, 7312), 'numpy.sin', 'np.sin', (['self._tau'], {}), '(self._tau)\n', (7301, 7312), True, 'import numpy as np\n'), ((7505, 7517), 'numpy.arctan', 'np.arctan', (['(0)'], {}), '(0)\n', (7514, 7517), True, 'import numpy as np\n'), ((7588, 7607), 'numpy.cos', 'np.cos', (['self._alpha'], {}), '(self._alpha)\n', (7594, 7607), True, 'import numpy as np\n'), ((7997, 8016), 'numpy.sin', 'np.sin', (['self._slope'], {}), '(self._slope)\n', (8003, 8016), True, 'import numpy as np\n'), ((8041, 8060), 'numpy.cos', 'np.cos', (['self._alpha'], {}), '(self._alpha)\n', (8047, 8060), True, 'import numpy as np\n'), ((6522, 6541), 'numpy.cos', 'np.cos', (['self._delta'], {}), '(self._delta)\n', (6528, 6541), True, 'import numpy as np\n'), ((6544, 6561), 'numpy.cos', 'np.cos', (['self._phi'], {}), '(self._phi)\n', (6550, 6561), True, 'import numpy as np\n'), ((6994, 7013), 'numpy.sin', 'np.sin', (['self._alpha'], {}), '(self._alpha)\n', (7000, 7013), True, 'import numpy as np\n'), ((7123, 7142), 'numpy.tan', 'np.tan', (['self._delta'], {}), '(self._delta)\n', (7129, 7142), True, 'import numpy as np\n'), ((7145, 7162), 'numpy.cos', 'np.cos', (['self._phi'], {}), '(self._phi)\n', (7151, 7162), True, 'import numpy as np\n'), ((7181, 7198), 'numpy.sin', 'np.sin', (['self._phi'], {}), '(self._phi)\n', (7187, 7198), True, 'import numpy as np\n'), ((7201, 7218), 'numpy.cos', 'np.cos', (['self._tau'], {}), '(self._tau)\n', (7207, 7218), True, 'import numpy as np\n'), ((7404, 7421), 'numpy.sin', 'np.sin', (['self._tau'], {}), '(self._tau)\n', (7410, 7421), True, 'import numpy as np\n'), ((7563, 7575), 'numpy.arctan', 'np.arctan', (['(0)'], {}), '(0)\n', (7572, 7575), True, 'import numpy as np\n'), ((6951, 6970), 'numpy.sin', 'np.sin', (['self._alpha'], {}), '(self._alpha)\n', (6957, 6970), True, 'import numpy as np\n')] |
from mltoolkit.mldp.steps.transformers import BaseTransformer
import numpy as np
from logging import getLogger
import os
logger_name = os.path.basename(__file__)
logger = getLogger(logger_name)
class RatingProp(BaseTransformer):
"""Computes the rating deviation property for reviews. And
that each batch contains data related to one group only!
"""
def __init__(self, rating_fname, new_fname, **kwargs):
super(RatingProp, self).__init__(**kwargs)
self.rating_fname = rating_fname
self.new_fname = new_fname
def _transform(self, data_chunk):
rating = data_chunk[self.rating_fname]
data_chunk[self.new_fname] = []
for indx in range(len(rating)):
_hyp = rating[indx]
_ref = [rating[i] for i in range(len(rating)) if i != indx]
rating_dev = _comp_rating_dev(_hyp, _ref)
data_chunk[self.new_fname].append(rating_dev)
return data_chunk
def _comp_rating_dev(hyp_rating, refs_rating):
res = hyp_rating - np.mean(refs_rating)
return res
| [
"logging.getLogger",
"numpy.mean",
"os.path.basename"
] | [((136, 162), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (152, 162), False, 'import os\n'), ((172, 194), 'logging.getLogger', 'getLogger', (['logger_name'], {}), '(logger_name)\n', (181, 194), False, 'from logging import getLogger\n'), ((1031, 1051), 'numpy.mean', 'np.mean', (['refs_rating'], {}), '(refs_rating)\n', (1038, 1051), True, 'import numpy as np\n')] |
# Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from copy import deepcopy
import numpy as np
import inspect
from tensorforce import util, TensorforceError
import tensorforce.agents
from tensorforce.meta_parameter_recorder import MetaParameterRecorder
class Agent(object):
"""
Basic Reinforcement learning agent. An agent encapsulates execution logic
of a particular reinforcement learning algorithm and defines the external interface
to the environment.
The agent hence acts as intermediate layer between environment
and backend execution (value function or policy updates).
"""
def __init__(
self,
states_spec,
actions_spec,
batched_observe
):
"""
Initializes the reinforcement learning agent.
Args:
states_spec: Dict containing at least one state definition. In the case of a single state,
keys `shape` and `type` are necessary. For multiple states, pass a dict of dicts where each state
is a dict itself with a unique name as its key.
actions_spec: Dict containing at least one action definition. Actions have types and either `num_actions`
for discrete actions or a `shape` for continuous actions. Consult documentation and tests for more.
batched_observe: Optional int specifying how many observe calls are batched into one session run.
Without batching, throughput will be lower because every `observe` triggers a session invocation to
update rewards in the graph.
"""
self.unique_state = ('shape' in states_spec)
if self.unique_state:
states_spec = dict(state=states_spec)
self.states_spec = deepcopy(states_spec)
for name, state in self.states_spec.items():
# Convert int to unary tuple
if isinstance(state['shape'], int):
state['shape'] = (state['shape'],)
# Set default type to float
if 'type' not in state:
state['type'] = 'float'
# Actions config and exploration
self.exploration = dict()
self.unique_action = ('type' in actions_spec)
if self.unique_action:
actions_spec = dict(action=actions_spec)
self.actions_spec = deepcopy(actions_spec)
for name, action in self.actions_spec.items():
# Check required values
if action['type'] == 'int':
if 'num_actions' not in action:
raise TensorforceError("Action requires value 'num_actions' set!")
elif action['type'] == 'float':
if ('min_value' in action) != ('max_value' in action):
raise TensorforceError("Action requires both values 'min_value' and 'max_value' set!")
# Set default shape to empty tuple
if 'shape' not in action:
action['shape'] = ()
# Convert int to unary tuple
if isinstance(action['shape'], int):
action['shape'] = (action['shape'],)
# TensorFlow summaries & Configuration Meta Parameter Recorder options
if self.summary_spec is None:
self.summary_labels = set()
else:
self.summary_labels = set(self.summary_spec.get('labels', ()))
self.meta_param_recorder = None
#if 'configuration' in self.summary_labels or 'print_configuration' in self.summary_labels:
if any(k in self.summary_labels for k in ['configuration','print_configuration']):
self.meta_param_recorder = MetaParameterRecorder(inspect.currentframe())
if 'meta_dict' in self.summary_spec:
# Custom Meta Dictionary passed
self.meta_param_recorder.merge_custom(self.summary_spec['meta_dict'])
if 'configuration' in self.summary_labels:
# Setup for TensorBoard population
self.summary_spec['meta_param_recorder_class'] = self.meta_param_recorder
if 'print_configuration' in self.summary_labels:
# Print to STDOUT (TADO: optimize output)
self.meta_param_recorder.text_output(format_type=1)
# Init Model, this must follow the Summary Configuration section above to carry meta_param_recorder
self.model = self.initialize_model()
# Batched observe for better performance with Python.
self.batched_observe = batched_observe
if self.batched_observe is not None:
self.observe_terminal = list()
self.observe_reward = list()
self.reset()
def __str__(self):
return str(self.__class__.__name__)
def sync(self, sync_value):
self.model.sync(sync_value)
def close(self):
self.model.close()
#被子类重写
def initialize_model(self):
"""
Creates the model for the respective agent based on specifications given by user. This is a separate
call after constructing the agent because the agent constructor has to perform a number of checks
on the specs first, sometimes adjusting them e.g. by converting to a dict.
"""
raise NotImplementedError
def reset(self):
"""
Reset the agent to its initial state on episode start. Updates internal episode and
timestep counter, internal states, and resets preprocessors.
"""
self.episode, self.timestep, self.next_internals = self.model.reset()
self.current_internals = self.next_internals
#TODO have to call preprocessing reset in model
# for preprocessing in self.preprocessing.values():
# preprocessing.reset()
def act(self, states, deterministic=False):
"""
Return action(s) for given state(s). States preprocessing and exploration are applied if
configured accordingly.
Args:
states: One state (usually a value tuple) or dict of states if multiple states are expected.
deterministic: If true, no exploration and sampling is applied.
Returns:
Scalar value of the action or dict of multiple actions the agent wants to execute.
"""
self.current_internals = self.next_internals
if self.unique_state:
self.current_states = dict(state=np.asarray(states))
else:
self.current_states = {name: np.asarray(state) for name, state in states.items()}
# Retrieve action
self.current_actions, self.next_internals, self.timestep = self.model.act(
states=self.current_states,
internals=self.current_internals,
deterministic=deterministic
)
if self.unique_action:
return self.current_actions['action']
else:
return self.current_actions
def observe_batch(self, current_states, current_internals, current_actions, current_terminal, current_reward, next_states, next_internals):
"""
Observe one batch data at a time from the environment.
Usually used in non-interactive mode, and the data is prepared beforehand
"""
raise NotImplementedError
def observe(self, next_states, terminal, reward):
"""
Observe experience from the environment to learn from. Optionally preprocesses rewards
Child classes should call super to get the processed reward
EX: terminal, reward = super()...
Args:
next_states: One state (usually a value tuple) or dict of states if multiple states are expected.
terminal: boolean indicating if the episode terminated after the observation.
reward: scalar reward that resulted from executing the action.
"""
self.current_terminal = terminal
self.current_reward = reward
if self.batched_observe is not None and self.batched_observe > 0:
# Batched observe for better performance with Python.
self.observe_terminal.append(self.current_terminal)
self.observe_reward.append(self.current_reward)
if self.current_terminal or len(self.observe_terminal) >= self.batched_observe:
self.episode = self.model.observe(
terminal=self.observe_terminal,
reward=self.observe_reward
)
self.observe_terminal = list()
self.observe_reward = list()
else:
self.episode = self.model.observe(
terminal=self.current_terminal,
reward=self.current_reward
)
def should_stop(self):
return self.model.monitored_session.should_stop()
def last_observation(self):
return dict(
states=self.current_states,
internals=self.current_internals,
actions=self.current_actions,
terminal=self.current_terminal,
reward=self.current_reward
)
def save_model(self, directory=None, append_timestep=True):
"""
Save TensorFlow model. If no checkpoint directory is given, the model's default saver
directory is used. Optionally appends current timestep to prevent overwriting previous
checkpoint files. Turn off to be able to load model from the same given path argument as
given here.
Args:
directory: Optional checkpoint directory.
use_global_step: Appends the current timestep to the checkpoint file if true.
If this is set to True, the load path must include the checkpoint timestep suffix.
For example, if stored to models/ and set to true, the exported file will be of the
form models/model.ckpt-X where X is the last timestep saved. The load path must
precisely match this file name. If this option is turned off, the checkpoint will
always overwrite the file specified in path and the model can always be loaded under
this path.
Returns:
Checkpoint path were the model was saved.
"""
return self.model.save(directory=directory, append_timestep=append_timestep)
def restore_model(self, directory=None, file=None):
"""
Restore TensorFlow model. If no checkpoint file is given, the latest checkpoint is
restored. If no checkpoint directory is given, the model's default saver directory is
used (unless file specifies the entire path).
Args:
directory: Optional checkpoint directory.
file: Optional checkpoint file, or path if directory not given.
"""
self.model.restore(directory=directory, file=file)
@staticmethod
def from_spec(spec, kwargs):
"""
Creates an agent from a specification dict.
"""
agent = util.get_object(
obj=spec,
predefined_objects=tensorforce.agents.agents,
kwargs=kwargs
)
#isinstance会考虑继承关系
assert isinstance(agent, Agent)
return agent
| [
"inspect.currentframe",
"numpy.asarray",
"tensorforce.util.get_object",
"copy.deepcopy",
"tensorforce.TensorforceError"
] | [((2501, 2522), 'copy.deepcopy', 'deepcopy', (['states_spec'], {}), '(states_spec)\n', (2509, 2522), False, 'from copy import deepcopy\n'), ((3075, 3097), 'copy.deepcopy', 'deepcopy', (['actions_spec'], {}), '(actions_spec)\n', (3083, 3097), False, 'from copy import deepcopy\n'), ((11654, 11744), 'tensorforce.util.get_object', 'util.get_object', ([], {'obj': 'spec', 'predefined_objects': 'tensorforce.agents.agents', 'kwargs': 'kwargs'}), '(obj=spec, predefined_objects=tensorforce.agents.agents,\n kwargs=kwargs)\n', (11669, 11744), False, 'from tensorforce import util, TensorforceError\n'), ((4395, 4417), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (4415, 4417), False, 'import inspect\n'), ((7194, 7211), 'numpy.asarray', 'np.asarray', (['state'], {}), '(state)\n', (7204, 7211), True, 'import numpy as np\n'), ((3304, 3364), 'tensorforce.TensorforceError', 'TensorforceError', (['"""Action requires value \'num_actions\' set!"""'], {}), '("Action requires value \'num_actions\' set!")\n', (3320, 3364), False, 'from tensorforce import util, TensorforceError\n'), ((7119, 7137), 'numpy.asarray', 'np.asarray', (['states'], {}), '(states)\n', (7129, 7137), True, 'import numpy as np\n'), ((3506, 3591), 'tensorforce.TensorforceError', 'TensorforceError', (['"""Action requires both values \'min_value\' and \'max_value\' set!"""'], {}), '("Action requires both values \'min_value\' and \'max_value\' set!"\n )\n', (3522, 3591), False, 'from tensorforce import util, TensorforceError\n')] |
import os
from PIL import Image
import numpy as np
import json
import logging
import torch
import torchvision
#from .coco import coco
#from maskrcnn_benchmark.data.datasets.coco import COCODataset #as coco
from maskrcnn_benchmark.structures.bounding_box import BoxList
#from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask
#from maskrcnn_benchmark.structures.segmentation_mask import Mask, MaskList
import torch.utils.data as data
import copy
class FOLDER_DATA(object):
"""
target (Bboxlist) has fields:
[
'instance_ids',: decode from original mask
'category_id', : object classes id
'labels', : object classes
'bin_mask_list',: a list of binary mask
'instance_mask': original mask loade from Annotations
]
__getitem__ will return sample:
img, target, idx, meta = sample
mask = target.get_field('instance_mask') # in shape [1, H, W]
"""
def __init__(self, ann_file,
db_root_dir, transforms=None, cache_data=True):
self.db_root_dir = db_root_dir
self.ann_file = ann_file
self.transforms_davis = transforms
logger = logging.getLogger(__name__)
logger.info('init datasets: %s'%db_root_dir)
self.palette = None
print(db_root_dir, self.ann_file)
if 'txt' in self.ann_file:
with open(os.path.join(self.ann_file)) as f:
seqs = f.readlines()
if 'track' not in db_root_dir:
seqs = [seq.rstrip("\n").strip() for seq in seqs]
else:
seqs = [seq.rstrip("\n").strip()[1:] for seq in seqs]
elif 'json' in self.ann_file:
seqs = json.load(open(self.ann_file, 'r'))
if 'videos' in seqs: # and 'meta' in self.ann_file:
# loading ytb/meta.json
seqs = list(seqs['videos'].keys())
else:
seqs = list(seqs.keys())
self.DATA = []
self.vid_fid_2_index = {}
self.seqs = seqs
logger.info('start loading data..')
count = 0
for _video in self.seqs:
self.vid_fid_2_index[_video] = {}
frames = np.sort(os.listdir(db_root_dir+ '/' + _video))
for f in frames:
self.DATA.append({'video': _video, 'frame': f})
self.vid_fid_2_index[_video][f.split('.')[0]] = count # also generate a DATA-INDEX
count += 1
if 'json' in self.ann_file:
indexfile = self.ann_file.replace('.json', '-CACHE_maskben_folderdata_index_%d.json'%len(self.seqs)).split('/')[-1]
indexfile = 'data/folder_data/%s'%indexfile
logger.info('save indexfile at %s'%indexfile)
json.dump({'index2vidfid':self.DATA, 'vidfid2index':self.vid_fid_2_index}, open(indexfile, 'w'))
logger.info('vid %s .. total: %d'%(_video[0], count))
logger.info('done')
def __len__(self):
return len(self.DATA)
def __getitem__(self, idx):
"""
return loaded image and target (BBoxList):
add field: "instance_ids", "category_id", "annotation_path", "labels",
"bin_mask_list", "instance_mask"
"""
sample = self.DATA[idx]
image_path = sample['video'] +'/' + sample['frame']
img = Image.open(self.db_root_dir + '/' +image_path).convert("RGB")
i = torch.from_numpy(np.array(img))
self.DATA[idx]['image_width'] = np.array(i.shape[1])
self.DATA[idx]['image_height'] = np.array(i.shape[0])
#meta = sample['meta']
#mask_path = sample['mask_path']
# mpath = os.path.join(self.db_root_dir, mask_path)
target = img
if self.transforms_davis is not None:
img, target = self.transforms_davis(img, target)
return img, target, idx #, meta
# ---------------------------------------
# add helper function for inference.py
# ---------------------------------------
def get_img_info(self, index):
meta = self.DATA[index] #['meta']
if 'image_width' not in meta:
self.__getitem__(index)
return {
"width": meta["image_width"],
"height": meta["image_height"],
"seq": meta["video"],
"frame": meta["frame"]
}
def get_groundtruth(self, idx):
img, target, idx = self.__getitem__(idx)
return target
| [
"logging.getLogger",
"os.listdir",
"PIL.Image.open",
"os.path.join",
"numpy.array"
] | [((1181, 1208), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1198, 1208), False, 'import logging\n'), ((3513, 3533), 'numpy.array', 'np.array', (['i.shape[1]'], {}), '(i.shape[1])\n', (3521, 3533), True, 'import numpy as np\n'), ((3575, 3595), 'numpy.array', 'np.array', (['i.shape[0]'], {}), '(i.shape[0])\n', (3583, 3595), True, 'import numpy as np\n'), ((3458, 3471), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (3466, 3471), True, 'import numpy as np\n'), ((2222, 2260), 'os.listdir', 'os.listdir', (["(db_root_dir + '/' + _video)"], {}), "(db_root_dir + '/' + _video)\n", (2232, 2260), False, 'import os\n'), ((3366, 3413), 'PIL.Image.open', 'Image.open', (["(self.db_root_dir + '/' + image_path)"], {}), "(self.db_root_dir + '/' + image_path)\n", (3376, 3413), False, 'from PIL import Image\n'), ((1389, 1416), 'os.path.join', 'os.path.join', (['self.ann_file'], {}), '(self.ann_file)\n', (1401, 1416), False, 'import os\n')] |
"""
Hawkes process with exponential kernel.
"""
import numpy as np
def hawkes1(n_samples):
"""Hawkes1 model from Omi et al. 2019."""
mu = 0.2
alpha = [0.8, 0.0]
beta = [1.0, 20.0]
arrival_times, loglike = _sample_and_nll(n_samples, mu, alpha, beta)
nll = -loglike.mean()
return arrival_times, nll
def hawkes2(n_samples):
"""Hawkes2 model from Omi et al. 2019."""
mu = 0.2
alpha = [0.4, 0.4]
beta = [1.0, 20.0]
arrival_times, loglike = _sample_and_nll(n_samples, mu, alpha, beta)
nll = -loglike.mean()
return arrival_times, nll
def _sample_and_nll(n_samples, mu, alpha, beta):
"""Generate samples from Hawkes process & compute NLL.
Source: https://github.com/omitakahiro/NeuralNetworkPointProcess
"""
T = []
LL = []
x = 0
l_trg1 = 0
l_trg2 = 0
l_trg_Int1 = 0
l_trg_Int2 = 0
mu_Int = 0
count = 0
while 1:
l = mu + l_trg1 + l_trg2
step = np.random.exponential()/l
x = x + step
l_trg_Int1 += l_trg1 * ( 1 - np.exp(-beta[0]*step) ) / beta[0]
l_trg_Int2 += l_trg2 * ( 1 - np.exp(-beta[1]*step) ) / beta[1]
mu_Int += mu * step
l_trg1 *= np.exp(-beta[0]*step)
l_trg2 *= np.exp(-beta[1]*step)
l_next = mu + l_trg1 + l_trg2
if np.random.rand() < l_next/l: #accept
T.append(x)
LL.append( np.log(l_next) - l_trg_Int1 - l_trg_Int2 - mu_Int )
l_trg1 += alpha[0]*beta[0]
l_trg2 += alpha[1]*beta[1]
l_trg_Int1 = 0
l_trg_Int2 = 0
mu_Int = 0
count += 1
if count == n_samples:
break
return [np.array(T), np.array(LL)]
| [
"numpy.random.rand",
"numpy.log",
"numpy.random.exponential",
"numpy.exp",
"numpy.array"
] | [((1205, 1228), 'numpy.exp', 'np.exp', (['(-beta[0] * step)'], {}), '(-beta[0] * step)\n', (1211, 1228), True, 'import numpy as np\n'), ((1245, 1268), 'numpy.exp', 'np.exp', (['(-beta[1] * step)'], {}), '(-beta[1] * step)\n', (1251, 1268), True, 'import numpy as np\n'), ((1702, 1713), 'numpy.array', 'np.array', (['T'], {}), '(T)\n', (1710, 1713), True, 'import numpy as np\n'), ((1715, 1727), 'numpy.array', 'np.array', (['LL'], {}), '(LL)\n', (1723, 1727), True, 'import numpy as np\n'), ((969, 992), 'numpy.random.exponential', 'np.random.exponential', ([], {}), '()\n', (990, 992), True, 'import numpy as np\n'), ((1317, 1333), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1331, 1333), True, 'import numpy as np\n'), ((1054, 1077), 'numpy.exp', 'np.exp', (['(-beta[0] * step)'], {}), '(-beta[0] * step)\n', (1060, 1077), True, 'import numpy as np\n'), ((1125, 1148), 'numpy.exp', 'np.exp', (['(-beta[1] * step)'], {}), '(-beta[1] * step)\n', (1131, 1148), True, 'import numpy as np\n'), ((1401, 1415), 'numpy.log', 'np.log', (['l_next'], {}), '(l_next)\n', (1407, 1415), True, 'import numpy as np\n')] |
from typing import Dict
import numpy as np
class ZDataProcessor:
def __init__(self):
self.source_to_index = {
'acoustic': 0,
'electronic': 1,
'synthetic': 2
}
self.quality_to_index = {
'bright': 0,
'dark': 1,
'distortion': 2,
'fast_decay': 3,
'long_release': 4,
'multiphonic': 5,
'nonlinear_env': 6,
'percussive': 7,
'reverb': 8,
'tempo_sync': 9
}
self.pitch = 60
def process(self, inputs: Dict) -> Dict:
velocity = inputs.get('velocity') or 75
pitch = inputs.get('velocity') or 60
source = inputs.get('source') or 'acoustic'
qualities = inputs.get('qualities') or []
latent_sample = inputs.get('latent_sample') or [0.] * 16
self.pitch = pitch # storing the midi pitch value before normalizing
velocity = np.expand_dims([velocity / 127.], axis=0).astype('float32')
pitch = np.expand_dims([pitch / 127.], axis=0).astype('float32')
source = np.expand_dims([self.source_to_index[source] / 2.], axis=0).astype('float32')
latent_sample = np.expand_dims(latent_sample, axis=0).astype('float32')
qualities_one_hot = np.zeros((1, 10))
for _, q in enumerate(qualities):
qualities_one_hot[0, self.quality_to_index[q]] = 1.
return {
'velocity': velocity,
'pitch': pitch,
'instrument_source': source,
'qualities': qualities_one_hot.astype('float32'),
'z_input': latent_sample
}
| [
"numpy.zeros",
"numpy.expand_dims"
] | [((1307, 1324), 'numpy.zeros', 'np.zeros', (['(1, 10)'], {}), '((1, 10))\n', (1315, 1324), True, 'import numpy as np\n'), ((970, 1012), 'numpy.expand_dims', 'np.expand_dims', (['[velocity / 127.0]'], {'axis': '(0)'}), '([velocity / 127.0], axis=0)\n', (984, 1012), True, 'import numpy as np\n'), ((1046, 1085), 'numpy.expand_dims', 'np.expand_dims', (['[pitch / 127.0]'], {'axis': '(0)'}), '([pitch / 127.0], axis=0)\n', (1060, 1085), True, 'import numpy as np\n'), ((1120, 1180), 'numpy.expand_dims', 'np.expand_dims', (['[self.source_to_index[source] / 2.0]'], {'axis': '(0)'}), '([self.source_to_index[source] / 2.0], axis=0)\n', (1134, 1180), True, 'import numpy as np\n'), ((1222, 1259), 'numpy.expand_dims', 'np.expand_dims', (['latent_sample'], {'axis': '(0)'}), '(latent_sample, axis=0)\n', (1236, 1259), True, 'import numpy as np\n')] |
import time
import torch
import onnx
import os
import numpy as np
from mmcv.tensorrt import (TRTWrapper, onnx2trt, save_trt_engine,
is_tensorrt_plugin_loaded)
assert is_tensorrt_plugin_loaded(), 'Requires to complie TensorRT plugins in mmcv'
def gen_trt(onnx_file='sample.onnx', trt_file='sample.trt'):
onnx_model = onnx.load(onnx_file)
## Model input
inputs = torch.rand(1, 3, 608, 1088).cuda()
## Model input shape info
opt_shape_dict = {
'input.1': [list(inputs.shape),
list(inputs.shape),
list(inputs.shape)]
}
## Create TensorRT engine
max_workspace_size = 1 << 30
trt_engine = onnx2trt(
onnx_model,
opt_shape_dict,
max_workspace_size=max_workspace_size)
## Save TensorRT engine
save_trt_engine(trt_engine, trt_file)
def run_inference(trt_file, num=1):
inputs = torch.rand(1, 3, 608, 1088).cuda()
## Run inference with TensorRT
# trt_model = TRTWrapper(trt_file, ['input.1'], ['922', '925', '928', '931'])
trt_model = TRTWrapper(trt_file, ['input.1'], ['hm', 'reg', 'wh', 'id_feature'])
sum = []
for i in range(num):
torch.cuda.synchronize()
t1 = time.time()
trt_outputs = trt_model({'input.1': inputs})
torch.cuda.synchronize()
# outputs = [trt_outputs['922'], trt_outputs['925'], trt_outputs['928'], trt_outputs['931']]
outputs = [trt_outputs['hm'], trt_outputs['reg'], trt_outputs['wh'], trt_outputs['id_feature']]
time_cost = time.time() - t1
sum.append(time_cost)
print(f"{i} th inference cost: {time_cost}")
print(f"average time: {np.mean(sum)}")
def main():
# onnx_name = 'fairmot_dla34_mmcv.onnx'
# onnx_name = 'fairmot_dla34_mmcv_opt.onnx'
# onnx_name = 'fairmot_dla34_whole_mmcv.onnx'
onnx_name = 'fairmot_dla34_whole_mmcv_opt13.onnx'
save_name = onnx_name.replace('.onnx', '.trt')
# onnx_name = 'fairmot_dla34_whole_mmcv.onnx'
# save_name = 'fairmot_dla34_whole_mmcv.trt'
# onnx_name = 'yolo_lite_mmcv.onnx'
# save_name = 'yolo_lite_mmcv.trt'
# onnx_file = os.path.join('/home/zzzj/Projects/models/onnx_engine/', onnx_name)
# trt_file = os.path.join('/home/zzzj/Projects/models/onnx_engine/', save_name)
# gen_trt(onnx_file, trt_file)
trt_file = os.path.join('/home/zzzj/Projects/models', 'test.engine')
run_inference(trt_file, 100)
if __name__ == '__main__':
main() | [
"numpy.mean",
"mmcv.tensorrt.TRTWrapper",
"mmcv.tensorrt.save_trt_engine",
"mmcv.tensorrt.is_tensorrt_plugin_loaded",
"os.path.join",
"torch.cuda.synchronize",
"mmcv.tensorrt.onnx2trt",
"onnx.load",
"time.time",
"torch.rand"
] | [((204, 231), 'mmcv.tensorrt.is_tensorrt_plugin_loaded', 'is_tensorrt_plugin_loaded', ([], {}), '()\n', (229, 231), False, 'from mmcv.tensorrt import TRTWrapper, onnx2trt, save_trt_engine, is_tensorrt_plugin_loaded\n'), ((359, 379), 'onnx.load', 'onnx.load', (['onnx_file'], {}), '(onnx_file)\n', (368, 379), False, 'import onnx\n'), ((708, 783), 'mmcv.tensorrt.onnx2trt', 'onnx2trt', (['onnx_model', 'opt_shape_dict'], {'max_workspace_size': 'max_workspace_size'}), '(onnx_model, opt_shape_dict, max_workspace_size=max_workspace_size)\n', (716, 783), False, 'from mmcv.tensorrt import TRTWrapper, onnx2trt, save_trt_engine, is_tensorrt_plugin_loaded\n'), ((842, 879), 'mmcv.tensorrt.save_trt_engine', 'save_trt_engine', (['trt_engine', 'trt_file'], {}), '(trt_engine, trt_file)\n', (857, 879), False, 'from mmcv.tensorrt import TRTWrapper, onnx2trt, save_trt_engine, is_tensorrt_plugin_loaded\n'), ((1098, 1166), 'mmcv.tensorrt.TRTWrapper', 'TRTWrapper', (['trt_file', "['input.1']", "['hm', 'reg', 'wh', 'id_feature']"], {}), "(trt_file, ['input.1'], ['hm', 'reg', 'wh', 'id_feature'])\n", (1108, 1166), False, 'from mmcv.tensorrt import TRTWrapper, onnx2trt, save_trt_engine, is_tensorrt_plugin_loaded\n'), ((2380, 2437), 'os.path.join', 'os.path.join', (['"""/home/zzzj/Projects/models"""', '"""test.engine"""'], {}), "('/home/zzzj/Projects/models', 'test.engine')\n", (2392, 2437), False, 'import os\n'), ((1214, 1238), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1236, 1238), False, 'import torch\n'), ((1252, 1263), 'time.time', 'time.time', ([], {}), '()\n', (1261, 1263), False, 'import time\n'), ((1325, 1349), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1347, 1349), False, 'import torch\n'), ((413, 440), 'torch.rand', 'torch.rand', (['(1)', '(3)', '(608)', '(1088)'], {}), '(1, 3, 608, 1088)\n', (423, 440), False, 'import torch\n'), ((930, 957), 'torch.rand', 'torch.rand', (['(1)', '(3)', '(608)', '(1088)'], {}), '(1, 3, 608, 1088)\n', (940, 957), False, 'import torch\n'), ((1575, 1586), 'time.time', 'time.time', ([], {}), '()\n', (1584, 1586), False, 'import time\n'), ((1702, 1714), 'numpy.mean', 'np.mean', (['sum'], {}), '(sum)\n', (1709, 1714), True, 'import numpy as np\n')] |
import copy
import matplotlib.pyplot as plt
import numpy
import pdb
import accelerated_functions as af
import constants as c
from Boundaries.inner_1D_rectangular import Inner_1D_Rectangular
from solver import location_indexes_inv
from Species.species import Species
from timing import Timing
#Inner_1D_HET (Inherits from Inner_1D_Rectangular):
#
#Definition = One-dimensional boundary that represents a HET in a 2D_rectangular mesh
#Attributes:
# +type (string) = "Inner - 1D_HET"
# +xmin (double) = Left limit of the boundary.
# +xmax (double) = Right limit of the boundary.
# +ymin (double) = Bottom limit of the boundary.
# +ymax (double) = Top limit of the boundary.
# +rmin (double) = Inner radius of the thruster exit
# +rmax (double) = Outer radius of the thruster exit
# +exit_area (double) = Area at the thruster exit
# +exi_nodes ([ind]) = Nodes where the information of Species is stored.
# +exit_pot (double) = Electric potential at the exit nodes
# +exit_pot_nodes ([ind]) = Nodes where the potential of the thruster exit is applied
# +Boundary attributes
#Methods:
# +Boundary methods.
class Inner_1D_HET(Inner_1D_Rectangular):
type = "Inner - 1D_HET"
def __init__(self, x_min, x_max , y_min, y_max, n_material):
super().__init__(x_min, x_max , y_min, y_max, n_material)
self.rmin = c.R_MIN
self.rmax = c.R_MAX
self.exit_area = c.EXIT_AREA
self.exit_nodes = None
self.exit_pot = c.EXIT_POT
self.exit_pot_nodes = None
# NOTE: This way of treating the boundary does not take into account the particles that cross the plane defined by the boundary that do not properly cross the boundary.
# Example: if the boundary is a right boundary, the cases where pos_x > xmax, but also ymax > pos_y or ymin < pos_y.
def checkPositionInBoundary(self, pos, surface = False, prec = 1e-3):
return numpy.ones_like(pos[:,0], dtype = numpy.bool_)
# +applyElectricBoundary(Electric_Field) = Applies the boundary condition to the electric field passed as argument.
# In this case, some nodes are considered Thruster exit and have a particular potential, the others lie in the
# metallic section of the thruster and are kept at potential 0.
def applyElectricBoundary(self, e_field):
values = numpy.where(numpy.isin(self.location, self.exit_pot_nodes), self.exit_pot, 0.0)
e_field.dirichlet(e_field.potential[self.location]+values, self, e_field.pic.mesh.nx, e_field.pic.mesh.ny, e_field.pic.mesh.dx, e_field.pic.mesh.dy)
# +createDistributionAtBorder(Motion_Solver part_solver, Species species, [double] delta_n): (([double,double] pos, [int] border), [int] repeats) =
# The function creates particle positions of 'species' along the region between rmin and rmax, under a uniform distribution with a surface density 'delta_n', where
# delta_n indicates the density per 'location' node [particle/m^2].
# Return: 'pos' is the numpy array indicating the positions of the new particles, 'border' indicates in which border they are created, and
# 'repeats' indicates for each position, how many particles are expected to be created.
# The tuple (pos, border) is reffered as flux in the program.
@Timing
def createDistributionHET(self, part_solver, species, delta_n, drift_vel, ind_offset, prec = 1e-5):
add_rand = numpy.random.rand(len(self.exit_nodes))
mpf_new = delta_n*(self.exit_area/2*drift_vel[:,1]*species.dt)
mpf_new = mpf_new/species.spwt+species.mesh_values.residuals[ind_offset+self.exit_nodes]+add_rand
mp_new = mpf_new.astype(int)
species.mesh_values.residuals[ind_offset+self.exit_nodes] = mpf_new-mp_new
#Bottom
if self.directions[0] == 0:
center = (self.xmax+self.xmin)/2
pos_1 = numpy.asarray([[center-self.rmax, self.ymin],[center+self.rmin, self.ymin]])
pos_1 = numpy.repeat(pos_1, mp_new, axis = 0)
random = numpy.random.rand(numpy.shape(pos_1)[0])
shifts = random*(self.rmax-self.rmin)
pos_1[:,0] += shifts
hit_1 = numpy.zeros_like(pos_1[:,1], dtype = numpy.uint8)[:,None]
#Left
elif self.directions[0] == 3:
center = (self.ymax+self.ymin)/2
pos_1 = numpy.asarray([[self.xmin, center-self.rmax],[self.xmin, center+self.rmin]])
pos_1 = numpy.repeat(pos_1, mp_new, axis = 0)
random = numpy.random.rand(numpy.shape(pos_1)[0])
shifts = random*(self.rmax-self.rmin)
pos_1[:,1] += shifts
hit_1 = 3*numpy.ones_like(pos_1[:,1], dtype = numpy.uint8)[:,None]
#Right
elif self.directions[0] == 1:
center = (self.ymax+self.ymin)/2
pos_1 = numpy.asarray([[self.xmax, center-self.rmax], [self.xmax, center+self.rmin]])
pos_1 = numpy.repeat(pos_1, mp_new, axis = 0)
random = numpy.random.rand(numpy.shape(pos_1)[0])
shifts = random*(self.rmax-self.rmin)
pos_1[:,1] += shifts
hit_1 = numpy.ones_like(pos_1[:,1], dtype = numpy.uint8)[:,None]
#Top
else:
center = (self.xmax+self.xmin)/2
pos_1 = numpy.asarray([[center-self.rmax, self.ymax],[center+self.rmin, self.ymax]])
pos_1 = numpy.repeat(pos_1, mp_new, axis = 0)
random = numpy.random.rand(numpy.shape(pos_1)[0])
shifts = random*(self.rmax-self.rmin)
pos_1[:,0] += shifts
hit_1 = 2*numpy.ones_like(pos_1[:,1], dtype = numpy.uint8)[:,None]
repeats = numpy.ones(numpy.shape(hit_1)[0], dtype = numpy.uint8)
return (numpy.append(numpy.append(pos_1, hit_1, axis = 1), species.spwt*numpy.ones_like(hit_1), axis = 1),), repeats
| [
"numpy.ones_like",
"numpy.repeat",
"numpy.asarray",
"numpy.isin",
"numpy.append",
"numpy.shape",
"numpy.zeros_like"
] | [((1933, 1978), 'numpy.ones_like', 'numpy.ones_like', (['pos[:, 0]'], {'dtype': 'numpy.bool_'}), '(pos[:, 0], dtype=numpy.bool_)\n', (1948, 1978), False, 'import numpy\n'), ((2373, 2419), 'numpy.isin', 'numpy.isin', (['self.location', 'self.exit_pot_nodes'], {}), '(self.location, self.exit_pot_nodes)\n', (2383, 2419), False, 'import numpy\n'), ((3922, 4008), 'numpy.asarray', 'numpy.asarray', (['[[center - self.rmax, self.ymin], [center + self.rmin, self.ymin]]'], {}), '([[center - self.rmax, self.ymin], [center + self.rmin, self.\n ymin]])\n', (3935, 4008), False, 'import numpy\n'), ((4019, 4054), 'numpy.repeat', 'numpy.repeat', (['pos_1', 'mp_new'], {'axis': '(0)'}), '(pos_1, mp_new, axis=0)\n', (4031, 4054), False, 'import numpy\n'), ((4222, 4270), 'numpy.zeros_like', 'numpy.zeros_like', (['pos_1[:, 1]'], {'dtype': 'numpy.uint8'}), '(pos_1[:, 1], dtype=numpy.uint8)\n', (4238, 4270), False, 'import numpy\n'), ((4397, 4483), 'numpy.asarray', 'numpy.asarray', (['[[self.xmin, center - self.rmax], [self.xmin, center + self.rmin]]'], {}), '([[self.xmin, center - self.rmax], [self.xmin, center + self.\n rmin]])\n', (4410, 4483), False, 'import numpy\n'), ((4494, 4529), 'numpy.repeat', 'numpy.repeat', (['pos_1', 'mp_new'], {'axis': '(0)'}), '(pos_1, mp_new, axis=0)\n', (4506, 4529), False, 'import numpy\n'), ((5713, 5731), 'numpy.shape', 'numpy.shape', (['hit_1'], {}), '(hit_1)\n', (5724, 5731), False, 'import numpy\n'), ((4096, 4114), 'numpy.shape', 'numpy.shape', (['pos_1'], {}), '(pos_1)\n', (4107, 4114), False, 'import numpy\n'), ((4874, 4960), 'numpy.asarray', 'numpy.asarray', (['[[self.xmax, center - self.rmax], [self.xmax, center + self.rmin]]'], {}), '([[self.xmax, center - self.rmax], [self.xmax, center + self.\n rmin]])\n', (4887, 4960), False, 'import numpy\n'), ((4972, 5007), 'numpy.repeat', 'numpy.repeat', (['pos_1', 'mp_new'], {'axis': '(0)'}), '(pos_1, mp_new, axis=0)\n', (4984, 5007), False, 'import numpy\n'), ((5324, 5410), 'numpy.asarray', 'numpy.asarray', (['[[center - self.rmax, self.ymax], [center + self.rmin, self.ymax]]'], {}), '([[center - self.rmax, self.ymax], [center + self.rmin, self.\n ymax]])\n', (5337, 5410), False, 'import numpy\n'), ((5421, 5456), 'numpy.repeat', 'numpy.repeat', (['pos_1', 'mp_new'], {'axis': '(0)'}), '(pos_1, mp_new, axis=0)\n', (5433, 5456), False, 'import numpy\n'), ((5786, 5820), 'numpy.append', 'numpy.append', (['pos_1', 'hit_1'], {'axis': '(1)'}), '(pos_1, hit_1, axis=1)\n', (5798, 5820), False, 'import numpy\n'), ((4571, 4589), 'numpy.shape', 'numpy.shape', (['pos_1'], {}), '(pos_1)\n', (4582, 4589), False, 'import numpy\n'), ((4699, 4746), 'numpy.ones_like', 'numpy.ones_like', (['pos_1[:, 1]'], {'dtype': 'numpy.uint8'}), '(pos_1[:, 1], dtype=numpy.uint8)\n', (4714, 4746), False, 'import numpy\n'), ((5175, 5222), 'numpy.ones_like', 'numpy.ones_like', (['pos_1[:, 1]'], {'dtype': 'numpy.uint8'}), '(pos_1[:, 1], dtype=numpy.uint8)\n', (5190, 5222), False, 'import numpy\n'), ((5837, 5859), 'numpy.ones_like', 'numpy.ones_like', (['hit_1'], {}), '(hit_1)\n', (5852, 5859), False, 'import numpy\n'), ((5049, 5067), 'numpy.shape', 'numpy.shape', (['pos_1'], {}), '(pos_1)\n', (5060, 5067), False, 'import numpy\n'), ((5498, 5516), 'numpy.shape', 'numpy.shape', (['pos_1'], {}), '(pos_1)\n', (5509, 5516), False, 'import numpy\n'), ((5626, 5673), 'numpy.ones_like', 'numpy.ones_like', (['pos_1[:, 1]'], {'dtype': 'numpy.uint8'}), '(pos_1[:, 1], dtype=numpy.uint8)\n', (5641, 5673), False, 'import numpy\n')] |
# Copyright 2019 The Dreamer Authors. Copyright 2020 Plan2Explore Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
def count_dataset(directory):
directory = os.path.expanduser(directory)
if not tf.gfile.Exists(directory):
message = "Data set directory '{}' does not exist."
raise ValueError(message.format(directory))
pattern = os.path.join(directory, '*.npz')
def func():
filenames = tf.gfile.Glob(pattern)
episodes = len(filenames)
episodes = np.array(episodes, dtype=np.int32)
return episodes
return tf.py_func(func, [], tf.int32)
| [
"tensorflow.gfile.Exists",
"os.path.join",
"tensorflow.gfile.Glob",
"numpy.array",
"tensorflow.py_func",
"os.path.expanduser"
] | [((854, 883), 'os.path.expanduser', 'os.path.expanduser', (['directory'], {}), '(directory)\n', (872, 883), False, 'import os\n'), ((1037, 1069), 'os.path.join', 'os.path.join', (['directory', '"""*.npz"""'], {}), "(directory, '*.npz')\n", (1049, 1069), False, 'import os\n'), ((1232, 1262), 'tensorflow.py_func', 'tf.py_func', (['func', '[]', 'tf.int32'], {}), '(func, [], tf.int32)\n', (1242, 1262), True, 'import tensorflow as tf\n'), ((893, 919), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['directory'], {}), '(directory)\n', (908, 919), True, 'import tensorflow as tf\n'), ((1100, 1122), 'tensorflow.gfile.Glob', 'tf.gfile.Glob', (['pattern'], {}), '(pattern)\n', (1113, 1122), True, 'import tensorflow as tf\n'), ((1168, 1202), 'numpy.array', 'np.array', (['episodes'], {'dtype': 'np.int32'}), '(episodes, dtype=np.int32)\n', (1176, 1202), True, 'import numpy as np\n')] |
import numpy as np
import numpy.random as npr
from .base import Environment, Feedback, Spec
class ContextualEnv(Environment):
def __init__(self, arms, sd):
super().__init__()
self.arms = arms
self.sd = sd
self.max_rew = None
self.mean_rews = None
@property
def k(self):
return self.arms.shape[0]
@property
def d(self):
return self.arms.shape[1]
@property
def regrets(self):
return self.max_rew - self.mean_rews
def create_spec(self):
ctx = npr.randn(self.d) # Note to self: K x d data generated here. K = number of arms, d = dimensions
#self.mean_rews = self.arms.dot(ctx)
self.mean_rews = np.linalg.norm(np.multiply(self.arms, ctx[np.newaxis, :]), ord=1, axis=1)
self.max_rew = np.max(self.mean_rews)
return ContextualSpec(self.t, ctx)
def get_feedback(self, arm, spec=None):
if spec is None:
spec = self.spec
mean_rews = self.mean_rews
max_rew = self.max_rew
else:
mean_rews = self.arms.dot(spec.ctx)
max_rew = np.max(mean_rews)
mean = mean_rews[arm]
noise = npr.randn() * self.sd
rew = mean + noise
return ContextualFeedback(spec, arm, rew, noise, max_rew)
class ContextualSpec(Spec):
def __init__(self, t, ctx):
super().__init__(t)
self.ctx = ctx
class ContextualFeedback(Feedback):
def __init__(self, spec, arm, rew, noise, max_rew):
super().__init__(spec, arm, rew)
self.noise = noise
self.max_rew = max_rew
@property
def t(self):
return self.spec.t
@property
def ctx(self):
return self.spec.ctx
@property
def mean_rew(self):
return self.rew - self.noise
@property
def regret(self):
return self.max_rew - self.mean_rew
def __repr__(self):
return f'CtxFb(arm={self.arm}, reg={self.regret}, noise={self.noise}, mean={self.mean_rew})'
| [
"numpy.multiply",
"numpy.random.randn",
"numpy.max"
] | [((554, 571), 'numpy.random.randn', 'npr.randn', (['self.d'], {}), '(self.d)\n', (563, 571), True, 'import numpy.random as npr\n'), ((819, 841), 'numpy.max', 'np.max', (['self.mean_rews'], {}), '(self.mean_rews)\n', (825, 841), True, 'import numpy as np\n'), ((737, 779), 'numpy.multiply', 'np.multiply', (['self.arms', 'ctx[np.newaxis, :]'], {}), '(self.arms, ctx[np.newaxis, :])\n', (748, 779), True, 'import numpy as np\n'), ((1144, 1161), 'numpy.max', 'np.max', (['mean_rews'], {}), '(mean_rews)\n', (1150, 1161), True, 'import numpy as np\n'), ((1209, 1220), 'numpy.random.randn', 'npr.randn', ([], {}), '()\n', (1218, 1220), True, 'import numpy.random as npr\n')] |
# ------------------------------------------------------------------------------
# Hippocampus segmentation published by Dryad
# (https://datadryad.org/stash/dataset/doi:10.5061/dryad.gc72v)
# ------------------------------------------------------------------------------
import os
import SimpleITK as sitk
import mp.data.datasets.dataset_utils as du
from mp.data.datasets.dataset_segmentation import SegmentationDataset, SegmentationInstance
from mp.paths import storage_data_path
from mp.utils.load_restore import join_path
import re
import nibabel as nib
import numpy as np
import re
class DryadHippocampus(SegmentationDataset):
r"""Class for the segmentation of the HarP dataset,
https://datadryad.org/stash/dataset/doi:10.5061/dryad.gc72v.
"""
def __init__(self, subset=None, hold_out_ixs=None, merge_labels=True):
# Modality is either: "T1w" or "T2w"
# Resolution is either: "Standard" or "Hires"
# If you want to use different resolutions or modalities, please create another object with a different subset
default = {"Modality": "T1w", "Resolution": "Standard"}
if subset is not None:
default.update(subset)
subset = default
else:
subset = default
# Hires T2w is not available
assert not (subset["Resolution"] == "Standard" and subset["Modality"] == "T2w"), \
"Hires T2w not available for the Dryad Hippocampus dataset"
if hold_out_ixs is None:
hold_out_ixs = []
global_name = 'DryadHippocampus'
name = du.get_dataset_name(global_name, subset)
dataset_path = os.path.join(storage_data_path,
global_name,
"Merged Labels" if merge_labels else "Original",
"".join([f"{key}[{subset[key]}]" for key in ["Modality", "Resolution"]])
)
original_data_path = du.get_original_data_path(global_name)
# Copy the images if not done already
if not os.path.isdir(dataset_path):
_extract_images(original_data_path, dataset_path, merge_labels, subset)
# Fetch all patient/study names
study_names = set(file_name.split('.nii')[0].split('_gt')[0] for file_name in os.listdir(dataset_path))
# Build instances
instances = []
for study_name in study_names:
instances.append(SegmentationInstance(
x_path=os.path.join(dataset_path, study_name + '.nii.gz'),
y_path=os.path.join(dataset_path, study_name + '_gt.nii.gz'),
name=study_name,
group_id=None
))
if merge_labels:
label_names = ['background', 'hippocampus']
else:
label_names = ['background', 'subiculum', 'CA1-3', 'CA4-DG']
super().__init__(instances, name=name, label_names=label_names,
modality=subset["Modality"] + ' MRI', nr_channels=1, hold_out_ixs=hold_out_ixs)
def _extract_images(source_path, target_path, merge_labels, subset):
r"""Extracts images, merges mask labels (if specified) and saves the
modified images.
"""
def bbox_3D(img):
r = np.any(img, axis=(1, 2))
c = np.any(img, axis=(0, 2))
z = np.any(img, axis=(0, 1))
rmin, rmax = np.where(r)[0][[0, -1]]
cmin, cmax = np.where(c)[0][[0, -1]]
zmin, zmax = np.where(z)[0][[0, -1]]
return rmin, rmax, cmin, cmax, zmin, zmax
# Create directories
os.makedirs(os.path.join(target_path))
# Patient folders s01, s02, ...
for patient_folder in filter(lambda s: re.match(r"^s[0-9]+.*", s), os.listdir(source_path)):
# Loading the image
image_path = os.path.join(source_path, patient_folder,
f"{patient_folder}_{subset['Modality'].lower()}_"
f"{subset['Resolution'].lower()}_defaced_MNI.nii.gz")
x = sitk.ReadImage(image_path)
x = sitk.GetArrayFromImage(x)
# For each MRI, there are 2 segmentation (left and right hippocampus)
for side in ["L", "R"]:
# Loading the label
label_path = os.path.join(source_path, patient_folder,
f"{patient_folder}_hippolabels_"
f"{'hres' if subset['Resolution'] == 'Hires' else 't1w_standard'}"
f"_{side}_MNI.nii.gz")
y = sitk.ReadImage(label_path)
y = sitk.GetArrayFromImage(y)
# We need to recover the study name of the image name to construct the name of the segmentation files
study_name = f"{patient_folder}_{side}"
# Average label shape (T1w, standard): (37.0, 36.3, 26.7)
# Average label shape (T1w, hires): (94.1, 92.1, 68.5)
# Average label shape (T2w, hires): (94.1, 92.1, 68.5)
assert x.shape == y.shape
# Disclaimer: next part is ugly and not many checks are made
# So we first compute the bounding box
rmin, rmax, cmin, cmax, zmin, zmax = bbox_3D(y)
# Compute the start idx for each dim
dr = (rmax - rmin) // 4
dc = (cmax - cmin) // 4
dz = (zmax - zmin) // 4
# Reshaping
y = y[rmin - dr: rmax + dr,
cmin - dc: cmax + dc,
zmin - dz: zmax + dz]
if merge_labels:
y[y > 1] = 1
x_cropped = x[rmin - dr: rmax + dr,
cmin - dc: cmax + dc,
zmin - dz: zmax + dz]
# Save new images so they can be loaded directly
sitk.WriteImage(sitk.GetImageFromArray(y),
join_path([target_path, study_name + "_gt.nii.gz"]))
sitk.WriteImage(sitk.GetImageFromArray(x_cropped),
join_path([target_path, study_name + ".nii.gz"]))
| [
"os.listdir",
"SimpleITK.GetImageFromArray",
"mp.data.datasets.dataset_utils.get_original_data_path",
"numpy.where",
"os.path.join",
"SimpleITK.GetArrayFromImage",
"numpy.any",
"re.match",
"os.path.isdir",
"mp.utils.load_restore.join_path",
"SimpleITK.ReadImage",
"mp.data.datasets.dataset_util... | [((1584, 1624), 'mp.data.datasets.dataset_utils.get_dataset_name', 'du.get_dataset_name', (['global_name', 'subset'], {}), '(global_name, subset)\n', (1603, 1624), True, 'import mp.data.datasets.dataset_utils as du\n'), ((1990, 2028), 'mp.data.datasets.dataset_utils.get_original_data_path', 'du.get_original_data_path', (['global_name'], {}), '(global_name)\n', (2015, 2028), True, 'import mp.data.datasets.dataset_utils as du\n'), ((3283, 3307), 'numpy.any', 'np.any', (['img'], {'axis': '(1, 2)'}), '(img, axis=(1, 2))\n', (3289, 3307), True, 'import numpy as np\n'), ((3320, 3344), 'numpy.any', 'np.any', (['img'], {'axis': '(0, 2)'}), '(img, axis=(0, 2))\n', (3326, 3344), True, 'import numpy as np\n'), ((3357, 3381), 'numpy.any', 'np.any', (['img'], {'axis': '(0, 1)'}), '(img, axis=(0, 1))\n', (3363, 3381), True, 'import numpy as np\n'), ((3611, 3636), 'os.path.join', 'os.path.join', (['target_path'], {}), '(target_path)\n', (3623, 3636), False, 'import os\n'), ((3746, 3769), 'os.listdir', 'os.listdir', (['source_path'], {}), '(source_path)\n', (3756, 3769), False, 'import os\n'), ((4048, 4074), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['image_path'], {}), '(image_path)\n', (4062, 4074), True, 'import SimpleITK as sitk\n'), ((4087, 4112), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['x'], {}), '(x)\n', (4109, 4112), True, 'import SimpleITK as sitk\n'), ((2091, 2118), 'os.path.isdir', 'os.path.isdir', (['dataset_path'], {}), '(dataset_path)\n', (2104, 2118), False, 'import os\n'), ((3718, 3743), 're.match', 're.match', (['"""^s[0-9]+.*"""', 's'], {}), "('^s[0-9]+.*', s)\n", (3726, 3743), False, 'import re\n'), ((4281, 4446), 'os.path.join', 'os.path.join', (['source_path', 'patient_folder', 'f"""{patient_folder}_hippolabels_{\'hres\' if subset[\'Resolution\'] == \'Hires\' else \'t1w_standard\'}_{side}_MNI.nii.gz"""'], {}), '(source_path, patient_folder,\n f"{patient_folder}_hippolabels_{\'hres\' if subset[\'Resolution\'] == \'Hires\' else \'t1w_standard\'}_{side}_MNI.nii.gz"\n )\n', (4293, 4446), False, 'import os\n'), ((4577, 4603), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['label_path'], {}), '(label_path)\n', (4591, 4603), True, 'import SimpleITK as sitk\n'), ((4620, 4645), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['y'], {}), '(y)\n', (4642, 4645), True, 'import SimpleITK as sitk\n'), ((3404, 3415), 'numpy.where', 'np.where', (['r'], {}), '(r)\n', (3412, 3415), True, 'import numpy as np\n'), ((3449, 3460), 'numpy.where', 'np.where', (['c'], {}), '(c)\n', (3457, 3460), True, 'import numpy as np\n'), ((3494, 3505), 'numpy.where', 'np.where', (['z'], {}), '(z)\n', (3502, 3505), True, 'import numpy as np\n'), ((5831, 5856), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['y'], {}), '(y)\n', (5853, 5856), True, 'import SimpleITK as sitk\n'), ((5886, 5937), 'mp.utils.load_restore.join_path', 'join_path', (["[target_path, study_name + '_gt.nii.gz']"], {}), "([target_path, study_name + '_gt.nii.gz'])\n", (5895, 5937), False, 'from mp.utils.load_restore import join_path\n'), ((5967, 6000), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['x_cropped'], {}), '(x_cropped)\n', (5989, 6000), True, 'import SimpleITK as sitk\n'), ((6030, 6078), 'mp.utils.load_restore.join_path', 'join_path', (["[target_path, study_name + '.nii.gz']"], {}), "([target_path, study_name + '.nii.gz'])\n", (6039, 6078), False, 'from mp.utils.load_restore import join_path\n'), ((2331, 2355), 'os.listdir', 'os.listdir', (['dataset_path'], {}), '(dataset_path)\n', (2341, 2355), False, 'import os\n'), ((2520, 2570), 'os.path.join', 'os.path.join', (['dataset_path', "(study_name + '.nii.gz')"], {}), "(dataset_path, study_name + '.nii.gz')\n", (2532, 2570), False, 'import os\n'), ((2595, 2648), 'os.path.join', 'os.path.join', (['dataset_path', "(study_name + '_gt.nii.gz')"], {}), "(dataset_path, study_name + '_gt.nii.gz')\n", (2607, 2648), False, 'import os\n')] |
'''
Provider for duck dataset from <NAME>
'''
import os
import os.path
import json
import numpy as np
import sys
import pickle
import glob
class Dataset():
def __init__(self, \
root='/scr1/mengyuan/ICCV-data/MSR_processed', \
num_points = 8192, \
num_frames=2, skip_frames=1, \
train=True):
self.num_points = num_points
self.num_frames = num_frames
self.skip_frames = skip_frames # sample frames i, i+skip_frames, i+2*skip_frames, ...
self.train = train
self.root = root
self.datapath = os.listdir(self.root)
if train:
self.datapath = [d for d in self.datapath if int(d.split('_')[1].split('s')[1]) <= 5]
else:
self.datapath = [d for d in self.datapath if int(d.split('_')[1].split('s')[1]) > 5]
self.datapath = [d.split('.')[0] for d in self.datapath]
self.data = []
self.label = []
self.index_map = []
self.load_data()
self.shuffle()
def load_data(self): # takes about 5G memory to load
for i,file in enumerate(self.datapath):
result = np.load(os.path.join(self.root, file+'.npz'))
self.data.append(result['point_clouds'])
self.label.append(int(file.split('_')[0][1:])-1)
nframes = result['point_clouds'].shape[0]
for t in range(0, nframes-self.skip_frames*(self.num_frames-1), self.skip_frames):
self.index_map.append((i,t))
def shuffle(self):
self.indices = np.arange(len(self.index_map))
if self.train:
np.random.shuffle(self.indices)
def __getitem__(self, idx):
id, t = self.index_map[self.indices[idx]]
points = [self.data[id][t+i*self.skip_frames] for i in range(self.num_frames)]
for i,p in enumerate(points):
if p.shape[0] > self.num_points:
index = np.random.choice(p.shape[0], size=self.num_points, replace=False)
else:
repeat, residue = self.num_points // p.shape[0], self.num_points % p.shape[0]
index = np.random.choice(p.shape[0], size=residue, replace=False)
index = np.concatenate([np.arange(p.shape[0]) for _ in range(repeat)] + [index], axis=0)
points[i] = p[index, :]
points = np.array(points)
if self.train:
# scale the points
scales = np.random.uniform(0.9, 1.1, size=3)
points = points * scales
points = points / 300
return points, self.label[id], id
def __len__(self):
return len(self.index_map)
if __name__ == '__main__':
d = Dataset(num_points=8192)
print(len(d))
import time
tic = time.time()
point_size = 0.2
for i in range(100):
points = d[i]
print(points.shape)
print(time.time() - tic)
| [
"os.listdir",
"numpy.random.choice",
"os.path.join",
"numpy.array",
"numpy.random.uniform",
"time.time",
"numpy.arange",
"numpy.random.shuffle"
] | [((2756, 2767), 'time.time', 'time.time', ([], {}), '()\n', (2765, 2767), False, 'import time\n'), ((593, 614), 'os.listdir', 'os.listdir', (['self.root'], {}), '(self.root)\n', (603, 614), False, 'import os\n'), ((2351, 2367), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (2359, 2367), True, 'import numpy as np\n'), ((1624, 1655), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indices'], {}), '(self.indices)\n', (1641, 1655), True, 'import numpy as np\n'), ((2444, 2479), 'numpy.random.uniform', 'np.random.uniform', (['(0.9)', '(1.1)'], {'size': '(3)'}), '(0.9, 1.1, size=3)\n', (2461, 2479), True, 'import numpy as np\n'), ((2874, 2885), 'time.time', 'time.time', ([], {}), '()\n', (2883, 2885), False, 'import time\n'), ((1165, 1203), 'os.path.join', 'os.path.join', (['self.root', "(file + '.npz')"], {}), "(self.root, file + '.npz')\n", (1177, 1203), False, 'import os\n'), ((1933, 1998), 'numpy.random.choice', 'np.random.choice', (['p.shape[0]'], {'size': 'self.num_points', 'replace': '(False)'}), '(p.shape[0], size=self.num_points, replace=False)\n', (1949, 1998), True, 'import numpy as np\n'), ((2135, 2192), 'numpy.random.choice', 'np.random.choice', (['p.shape[0]'], {'size': 'residue', 'replace': '(False)'}), '(p.shape[0], size=residue, replace=False)\n', (2151, 2192), True, 'import numpy as np\n'), ((2233, 2254), 'numpy.arange', 'np.arange', (['p.shape[0]'], {}), '(p.shape[0])\n', (2242, 2254), True, 'import numpy as np\n')] |
#! /usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import traceback
# import sys
import os.path
def get_skelet3d_lib():
import ctypes
import ctypes.util
# import os
# ThinningCxxShared is C++, ThinningShared is C
# libpath = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) +\
# "/../bin/libBinaryThinningCxxShared.so")
# #"/../bin/libBinaryThinningShared.so")
# if not os.path.exists(libpath):
# libpath = "libBinaryThinningCxxShared.so"
libname = "BinaryThinningCxxShared"
libpath = ctypes.util.find_library(libname)
if libpath is None:
from . import libfixer
libfixer.libfix()
print("Library download complete")
libpath = ctypes.util.find_library(libname)
# os.environ['PATH']
# import pdb; pdb.set_trace()
try:
hlibc = ctypes.CDLL(libpath)
except Exception as e:
print("libname: ", libname)
print("libpath: ", libpath)
print(traceback.format_exc())
if libpath != None:
print("CDLL cannot find library.")
print("On Linux is the problem with LD_LIBRARY_PATH.")
print(
"On Windows could be problem with messing 32-bit and 64-bit DLL libraries."
)
print("Please read skelet3d/README.md")
print("https://github.com/mjirik/skelet3d/blob/master/README.md")
exit()
return hlibc
def skelet3d(data):
"""
skel = skeleton (data)
data: 3D numpy data
skel: 3D skeleton
"""
import ctypes
import ctypes.util
data = data.astype("int8")
hlibc = get_skelet3d_lib()
# unsigned char * thinningCxx (int sizeX, int sizeY, int sizeZ, unsigned char *)
# function .tostring() give char stream
thinning = hlibc.thinningCxx
# thinning = hlibc.thinning
thinning.restype = ctypes.c_char_p
sdata = data.tostring()
outstr = thinning(
data.shape[2], data.shape[1], data.shape[0], ctypes.c_char_p(sdata)
)
# outa = np.fromstring(sdata, dtype="uint8")
outa = np.frombuffer(sdata, dtype="uint8")
return outa.reshape(data.shape).copy()
def main():
data = np.zeros([8, 9, 10], dtype="int8")
data[1:4, 3:7, 1:12] = 1
print("Example")
print("Input data")
print(data)
skelet3d(data)
print("Output data")
if __name__ == "__main__":
main()
| [
"ctypes.util.find_library",
"traceback.format_exc",
"numpy.zeros",
"ctypes.CDLL",
"numpy.frombuffer",
"ctypes.c_char_p"
] | [((575, 608), 'ctypes.util.find_library', 'ctypes.util.find_library', (['libname'], {}), '(libname)\n', (599, 608), False, 'import ctypes\n'), ((2108, 2143), 'numpy.frombuffer', 'np.frombuffer', (['sdata'], {'dtype': '"""uint8"""'}), "(sdata, dtype='uint8')\n", (2121, 2143), True, 'import numpy as np\n'), ((2212, 2246), 'numpy.zeros', 'np.zeros', (['[8, 9, 10]'], {'dtype': '"""int8"""'}), "([8, 9, 10], dtype='int8')\n", (2220, 2246), True, 'import numpy as np\n'), ((753, 786), 'ctypes.util.find_library', 'ctypes.util.find_library', (['libname'], {}), '(libname)\n', (777, 786), False, 'import ctypes\n'), ((872, 892), 'ctypes.CDLL', 'ctypes.CDLL', (['libpath'], {}), '(libpath)\n', (883, 892), False, 'import ctypes\n'), ((2018, 2040), 'ctypes.c_char_p', 'ctypes.c_char_p', (['sdata'], {}), '(sdata)\n', (2033, 2040), False, 'import ctypes\n'), ((1006, 1028), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (1026, 1028), False, 'import traceback\n')] |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""T5 CBQA postprocessors."""
import numpy as np
import tensorflow.compat.v1 as tf
def natural_questions(output,
prefix="answer:",
example=None,
is_target=False):
"""Get answers from predictions and targets.
The predictions will contain a single set of one or more answers. The example
may contain multiple sets of answers from different annotators. We return
an answer group for each annotation, even if its empty.
Args:
output: str, target or prediction in text format.
prefix: str, prefix expected before each answer.
example: dict, input example.
is_target: bool, whether the input was ground truth (True) or
prediction (False).
Returns:
a list of answer tuples.
"""
if is_target:
answer_groups = []
short_answers = np.split(
example["short_answers/values"],
example["short_answers/row_starts"][1:])
yes_no_answers = example["yes_no_answers"]
if len(short_answers) != len(yes_no_answers):
raise ValueError(
"Number of annotations not consistent: %d vs %d" %
(len(short_answers), len(yes_no_answers)))
for short_ans_grp, y_n_ans in zip(short_answers, yes_no_answers):
# Annotators cannot provide both y/n and short answers.
if y_n_ans > -1 and short_ans_grp:
raise ValueError(
"Annotation cannot include both yes/no and short answers.")
if y_n_ans == 0:
answer_groups.append(("no",))
elif y_n_ans == 1:
answer_groups.append(("yes",))
else:
answer_groups.append(
tuple(tf.compat.as_text(ans) for ans in short_ans_grp)
)
else:
answer_groups = [
tuple(s.strip() for s in output.split(prefix)[1:])
]
return answer_groups
| [
"tensorflow.compat.v1.compat.as_text",
"numpy.split"
] | [((1468, 1555), 'numpy.split', 'np.split', (["example['short_answers/values']", "example['short_answers/row_starts'][1:]"], {}), "(example['short_answers/values'], example[\n 'short_answers/row_starts'][1:])\n", (1476, 1555), True, 'import numpy as np\n'), ((2259, 2281), 'tensorflow.compat.v1.compat.as_text', 'tf.compat.as_text', (['ans'], {}), '(ans)\n', (2276, 2281), True, 'import tensorflow.compat.v1 as tf\n')] |
from sklearn.cluster import KMeans
import numpy as np
from cv2 import cv2
from collections import Counter
from skimage.color import rgb2lab, deltaE_cie76
def RGB2HEX(color):
return "#{:02x}{:02x}{:02x}".format(int(color[0]), int(color[1]), int(color[2]))
def get_image(image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def get_colors(image, number_of_colors):
modified_image = cv2.resize(image, (600, 400), interpolation = cv2.INTER_AREA)
modified_image = modified_image.reshape(modified_image.shape[0]*modified_image.shape[1], 3)
clf = KMeans(n_clusters = number_of_colors)
labels = clf.fit_predict(modified_image)
counts = Counter(labels)
center_colors = clf.cluster_centers_
# We get ordered colors by iterating through the keys
ordered_colors = [center_colors[i] for i in counts.keys()]
hex_colors = [RGB2HEX(ordered_colors[i]) for i in counts.keys()]
rgb_colors = [ordered_colors[i] for i in counts.keys()]
return rgb_colors
def match_image_by_color(image, color, threshold = 60, number_of_colors = 3):
image_colors = get_colors(get_image(image), number_of_colors)
selected_color = rgb2lab(np.uint8(np.asarray([[color]])))
select_image = False
for i in range(number_of_colors):
curr_color = rgb2lab(np.uint8(np.asarray([[image_colors[i]]])))
diff = deltaE_cie76(selected_color, curr_color)
if (diff < threshold):
select_image = True
return select_image
COLORS = {
'GREEN': [0, 128, 0],
'BLUE': [0, 0, 128],
'YELLOW': [255, 255, 0],
'RED': [255, 0, 0],
'ORANGE': [255, 69, 0],
'BLACK': [0, 0, 0],
}
def find_color(img_loc):
green = match_image_by_color(img_loc, COLORS['GREEN'])
blue = match_image_by_color(img_loc, COLORS['BLUE'])
yellow = match_image_by_color(img_loc, COLORS['YELLOW'])
red = match_image_by_color(img_loc, COLORS['RED'])
orange = match_image_by_color(img_loc, COLORS['ORANGE'])
black = match_image_by_color(img_loc, COLORS['BLACK'])
colors = []
if green:
colors.append('Green')
return colors
if blue:
colors.append('Blue')
return colors
if yellow:
colors.append('Yellow')
return colors
if red:
colors.append('Red')
return colors
if orange:
colors.append('Orange')
return colors
if black:
colors.append('Black')
return colors
return colors
def detect_color(img):
from PIL import Image
import numpy
pil_image = Image.open(img)
img = cv2.cvtColor(numpy.array(pil_image), cv2.COLOR_RGB2BGR)
col = find_color(img)
term = ''
for c in col:
term += c + ' '
print(term)
return term
| [
"sklearn.cluster.KMeans",
"PIL.Image.open",
"skimage.color.deltaE_cie76",
"numpy.asarray",
"collections.Counter",
"numpy.array",
"cv2.cv2.resize",
"cv2.cv2.cvtColor"
] | [((298, 336), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (310, 336), False, 'from cv2 import cv2\n'), ((418, 477), 'cv2.cv2.resize', 'cv2.resize', (['image', '(600, 400)'], {'interpolation': 'cv2.INTER_AREA'}), '(image, (600, 400), interpolation=cv2.INTER_AREA)\n', (428, 477), False, 'from cv2 import cv2\n'), ((586, 621), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'number_of_colors'}), '(n_clusters=number_of_colors)\n', (592, 621), False, 'from sklearn.cluster import KMeans\n'), ((682, 697), 'collections.Counter', 'Counter', (['labels'], {}), '(labels)\n', (689, 697), False, 'from collections import Counter\n'), ((2582, 2597), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (2592, 2597), False, 'from PIL import Image\n'), ((1377, 1417), 'skimage.color.deltaE_cie76', 'deltaE_cie76', (['selected_color', 'curr_color'], {}), '(selected_color, curr_color)\n', (1389, 1417), False, 'from skimage.color import rgb2lab, deltaE_cie76\n'), ((2621, 2643), 'numpy.array', 'numpy.array', (['pil_image'], {}), '(pil_image)\n', (2632, 2643), False, 'import numpy\n'), ((1202, 1223), 'numpy.asarray', 'np.asarray', (['[[color]]'], {}), '([[color]])\n', (1212, 1223), True, 'import numpy as np\n'), ((1328, 1359), 'numpy.asarray', 'np.asarray', (['[[image_colors[i]]]'], {}), '([[image_colors[i]]])\n', (1338, 1359), True, 'import numpy as np\n')] |
import torch
import torch.utils.data as data
import torch.nn as nn
import torch.optim as optim
import os
import numpy as np
import pandas as pd
import glob
import cv2
from tqdm import tqdm, trange
import matplotlib.pyplot as plt
img_size = 256
# raw data directories
X_img_path = "D:/AI in Urban Design/DL UD dir/STRT2FTPRNT/Filtered_X"
Y_img_path = "D:/AI in Urban Design/DL UD dir/STRT2FTPRNT/Filtered_Y"
def make_data(X_img_path, Y_img_path, img_size):
X_data = []
Y_data = []
X_img_count = 0
Y_img_count = 0
for i in tqdm(os.listdir(X_img_path), ncols=100, disable=False):
path = os.path.join(X_img_path, i)
X = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
X = cv2.resize(X, (img_size, img_size))
X = np.array(X)
X = X.reshape((1, img_size, img_size))
X = X / 255
X = 1 - X
X_data.append(X)
X_img_count += 1
for i in tqdm(os.listdir(Y_img_path), ncols=100, disable=False):
path = os.path.join(Y_img_path, i)
Y = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
Y = cv2.resize(Y, (img_size, img_size))
Y = np.array(Y)
Y = Y.reshape((1, img_size, img_size))
Y = Y / 255
Y = 1 - Y
Y_data.append(Y)
Y_img_count += 1
print('X Image_count:' + str(X_img_count))
print('Y Image_count:' + str(Y_img_count))
return X_data, Y_data
class segmentationDataSet(data.Dataset):
def __init__(self, X_img_path, Y_img_path, img_size):
self.inputs_path = X_img_path
self.targets_path = Y_img_path
self.img_size = img_size
self.inputs_dtype = torch.float32
self.targets_dtype = torch.float32
self.inputs, self.targets = make_data(self.inputs_path, self.targets_path, self.img_size)
def __len__(self):
return len(self.inputs)
def __getitem__(self, index: int):
# Select the sample
input_ID = self.inputs[index]
target_ID = self.targets[index]
# Load input and target
x, y = input_ID, target_ID
# Typecasting
x, y = torch.from_numpy(x).type(self.inputs_dtype), torch.from_numpy(y).type(self.targets_dtype)
return x, y
batch_S = 4
test_split = 0.1
ts = 0.1 / 0.9
dataset = segmentationDataSet(X_img_path, Y_img_path, img_size)
dataset_size = len(dataset)
test_size = int(test_split * dataset_size)
train_size = dataset_size - test_size
train_dataset, test_dataset = data.random_split(dataset, [train_size, test_size])
trdataset_size = len(train_dataset)
val_size = int(ts * trdataset_size)
training_size = trdataset_size - val_size
training_dataset, val_dataset = data.random_split(train_dataset, [training_size, val_size])
training_dataloader = data.DataLoader(dataset=training_dataset, batch_size = batch_S, shuffle=True)
x, y = next(iter(training_dataloader))
print(f'x = shape: {x.shape}; type: {x.dtype}')
print(f'x = min: {x.min()}; max: {x.max()}')
print(f'y = shape: {y.shape}; type: {y.dtype}')
print(f'y = min: {y.min()}; max: {y.max()}')
val_dataloader = data.DataLoader(dataset=val_dataset, batch_size = batch_S, shuffle=True)
x, y = next(iter(val_dataloader))
print(f'x = shape: {x.shape}; type: {x.dtype}')
print(f'x = min: {x.min()}; max: {x.max()}')
print(f'y = shape: {y.shape}; type: {y.dtype}')
print(f'y = min: {y.min()}; max: {y.max()}')
# Unet architecture
def double_conv(in_c, out_c, seperable=True):
if seperable:
conv = nn.Sequential(
nn.Conv2d(in_c, out_c, kernel_size=1, bias=True),
nn.Conv2d(out_c, out_c, kernel_size=3, padding=1, groups=out_c, bias=True),
nn.BatchNorm2d(out_c),
nn.ReLU(inplace=True),
nn.Conv2d(out_c, out_c, kernel_size=1, bias=True),
nn.Conv2d(out_c, out_c, kernel_size=3, padding=1, groups=out_c, bias=True),
nn.BatchNorm2d(out_c),
nn.ReLU(inplace=True)
)
return conv
else:
conv = nn.Sequential(
nn.Conv2d(in_c, out_c, kernel_size=3, padding=1, bias=True),
nn.BatchNorm2d(out_c),
nn.ReLU(inplace=True),
nn.Conv2d(out_c, out_c, kernel_size=3, padding=1, bias=True),
nn.BatchNorm2d(out_c),
nn.ReLU(inplace=True)
)
return conv
def crop_img(tensor, target_tensor):
target_size = target_tensor.size()[2]
tensor_size = tensor.size()[2]
delta = tensor_size - target_size
delta = delta // 2
return tensor[:, :, delta:tensor_size - delta, delta:tensor_size - delta]
class Unet(nn.Module):
def __init__(self):
super(Unet, self).__init__()
self.max_pool_2x2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.down_conv1 = double_conv(1, 8, seperable=False)
self.down_conv2 = double_conv(8, 16, seperable=True)
self.down_conv3 = double_conv(16, 32, seperable=True)
self.down_conv4 = double_conv(32, 64, seperable=True)
self.down_conv5 = double_conv(64, 128, seperable=True)
self.down_conv6 = double_conv(128, 256, seperable=True)
self.down_conv7 = double_conv(256, 512, seperable=True)
self.down_conv8 = double_conv(512, 1024, seperable=True)
self.down_conv9 = double_conv(1024, 2048, seperable=True)
self.up_trans1 = nn.ConvTranspose2d(2048, 1024, kernel_size=2, stride=2, bias=False)
self.up_conv1 = double_conv(2048, 1024, seperable=True)
self.up_trans2 = nn.ConvTranspose2d(1024, 512, kernel_size=2, stride=2, bias=False)
self.up_conv2 = double_conv(1024, 512, seperable=True)
self.up_trans3 = nn.ConvTranspose2d(512, 256, kernel_size=2, stride=2, bias=False)
self.up_conv3 = double_conv(512, 256, seperable=True)
self.up_trans4 = nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2, bias=False)
self.up_conv4 = double_conv(256, 128, seperable=False)
self.up_trans5 = nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2, bias=False)
self.up_conv5 = double_conv(128, 64, seperable=False)
self.up_trans6 = nn.ConvTranspose2d(64, 32, kernel_size=2, stride=2, bias=False)
self.up_conv6 = double_conv(64, 32, seperable=False)
self.up_trans7 = nn.ConvTranspose2d(32, 16, kernel_size=2, stride=2, bias=False)
self.up_conv7 = double_conv(32, 16, seperable=False)
self.up_trans8 = nn.ConvTranspose2d(16, 8, kernel_size=2, stride=2, bias=False)
self.up_conv8 = double_conv(16, 8, seperable=False)
self.num_classes = 1
self.out = nn.Conv2d(8, self.num_classes, kernel_size=1)
self.sigmoid = nn.Sigmoid()
def forward(self, image):
# Down
x1 = self.down_conv1(image)
x2 = self.max_pool_2x2(x1)
x3 = self.down_conv2(x2)
x4 = self.max_pool_2x2(x3)
x5 = self.down_conv3(x4)
x6 = self.max_pool_2x2(x5)
x7 = self.down_conv4(x6)
x8 = self.max_pool_2x2(x7)
x9 = self.down_conv5(x8)
x10 = self.max_pool_2x2(x9)
x11 = self.down_conv6(x10)
x12 = self.max_pool_2x2(x11)
x13 = self.down_conv7(x12)
x14 = self.max_pool_2x2(x13)
x15 = self.down_conv8(x14)
x16 = self.max_pool_2x2(x15)
x17 = self.down_conv9(x16)
# Up
x_U = self.up_trans1(x17)
y = crop_img(x15, x_U)
x_U = self.up_conv1(torch.cat([x_U, y], 1))
x_U = self.up_trans2(x_U)
y = crop_img(x13, x_U)
x_U = self.up_conv2(torch.cat([x_U, y], 1))
x_U = self.up_trans3(x_U)
y = crop_img(x11, x_U)
x_U = self.up_conv3(torch.cat([x_U, y], 1))
x_U = self.up_trans4(x_U)
y = crop_img(x9, x_U)
x_U = self.up_conv4(torch.cat([x_U, y], 1))
x_U = self.up_trans5(x_U)
y = crop_img(x7, x_U)
x_U = self.up_conv5(torch.cat([x_U, y], 1))
x_U = self.up_trans6(x_U)
y = crop_img(x5, x_U)
x_U = self.up_conv6(torch.cat([x_U, y], 1))
x_U = self.up_trans7(x_U)
y = crop_img(x3, x_U)
x_U = self.up_conv7(torch.cat([x_U, y], 1))
x_U = self.up_trans8(x_U)
y = crop_img(x1, x_U)
x_U = self.up_conv8(torch.cat([x_U, y], 1))
# U-Net Output
x_U = self.out(x_U)
# Sigmoid layer
x_sig = self.sigmoid(x_U)
return x_sig
# hyperparameters
num_epochs = 80
learning_rate = 0.001
lr_decay = 0.1
# Set device
def get_device():
if torch.cuda.is_available():
return torch.device("cuda")
else:
return torch.device("cpu")
device = get_device()
print(device)
def dice_metric(inputs, target):
intersection = 2.0 * (target * inputs).sum()
union = target.sum() + inputs.sum()
if target.sum() == 0 and inputs.sum() == 0:
return 1.0
return intersection / union
def diceCoeff(pred, gt, smooth=1e-5):
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
intersection = (pred_flat * gt_flat).sum(1)
unionset = pred_flat.sum(1) + gt_flat.sum(1)
loss = (2 * intersection + smooth) / (unionset + smooth)
return loss.sum() / N
def diceCoeffv2(pred, gt, eps=1e-5):
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum(gt_flat * pred_flat, dim=1)
fp = torch.sum(pred_flat, dim=1) - tp
fn = torch.sum(gt_flat, dim=1) - tp
loss = (2 * tp + eps) / (2 * tp + fp + fn + eps)
return loss.sum() / N
class DiceLoss(nn.Module):
def __init__(self):
super(DiceLoss, self).__init__()
def forward(self, y_pr, y_gt):
return 1 - diceCoeffv2(y_pr, y_gt)
# model
model = Unet()
model.to(device)
# criterion
criterion = DiceLoss()
# optimizer
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay = 1e-5)
# Scheduler
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10,
threshold=0.0001, threshold_mode='rel', cooldown=0,
min_lr=0, eps=1e-08, verbose=False)
# Training
class Trainer:
def __init__(self,
model: torch.nn.Module,
device: torch.device,
criterion: torch.nn.Module,
optimizer: torch.optim.Optimizer,
training_DataLoader: torch.utils.data.DataLoader,
validation_DataLoader: torch.utils.data.DataLoader,
lr_scheduler: torch.optim.lr_scheduler,
epochs: int,
epoch: int = 0,
batch: int = 0
):
self.model = model
self.criterion = criterion
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.training_DataLoader = training_DataLoader
self.validation_DataLoader = validation_DataLoader
self.device = device
self.epochs = epochs
self.epoch = epoch
self.batch = batch
self.training_loss = []
self.validation_loss = []
self.training_acc = []
self.validation_acc = []
self.learning_rate = []
def run_trainer(self):
progressbar = trange(self.epochs, desc='Progress')
for i in progressbar:
"""Epoch counter"""
self.epoch += 1 # epoch counter
"""Training block"""
self._train()
"""Validation block"""
if self.validation_DataLoader is not None:
self._validate()
"""Learning rate scheduler block"""
if self.lr_scheduler is not None:
if self.validation_DataLoader is not None and self.lr_scheduler.__class__.__name__ == 'ReduceLROnPlateau':
self.lr_scheduler.step(self.validation_loss[i]) # learning rate scheduler step with validation loss
else:
self.lr_scheduler.step() # learning rate scheduler step
return self.training_loss, self.validation_loss, self.training_acc, self.validation_acc, self.learning_rate
def _train(self):
self.model.train() # train mode
train_losses = []
train_accuracy = []
batch_iter = tqdm(enumerate(self.training_DataLoader), 'Training', total=len(self.training_DataLoader),
leave=False)
for i, (x, y) in batch_iter:
if i == 0:
self.batch += 1 # batch counter
input, target = x.to(self.device), y.to(self.device) # send to device (GPU or CPU)
self.optimizer.zero_grad() # zerograd the parameters
out = self.model(input) # one forward pass
# Saving training output images
if i%25 == 0:
img1 = torch.unsqueeze(input[0, 0, :, :], 2)
img1 = img1 * 255
img1 = img1.cpu()
np_img1 = img1.detach().numpy()
img2 = torch.unsqueeze(target[0, 0, :, :], 2)
img2 = img2 * 255
img2 = img2.cpu()
np_img2 = img2.detach().numpy()
img3 = torch.unsqueeze(out[0, 0, :, :], 2)
img3 = img3 * 255
img3 = img3.cpu()
np_img3 = img3.detach().numpy()
img = cv2.hconcat([np_img1, np_img2, np_img3])
cv2.imwrite('D:/AI in Urban Design/DL UD dir/STRT2FTPRNT/strt2ftprnt_trainCONCAT/'
+ 'train' + str(self.epoch) + '-' + str(self.batch) + '-' + str(i+1) + '.jpeg', img)
acc = dice_metric(out, target) # calculate accuracy
acc_value = acc.item()
train_accuracy.append(acc_value)
loss = self.criterion(out, target) # calculate loss
loss_value = loss.item()
train_losses.append(loss_value)
loss.backward() # one backward pass
self.optimizer.step() # update the parameters
batch_iter.set_description(f'Training: (loss {loss_value:.4f}) (acc {acc_value:.4f})') # update progressbar
self.training_acc.append(np.mean(train_accuracy))
self.training_loss.append(np.mean(train_losses))
self.learning_rate.append(self.optimizer.param_groups[0]['lr'])
batch_iter.close()
def _validate(self):
self.model.eval() # evaluation mode
val_losses = []
val_accuracy = []
batch_iter = tqdm(enumerate(self.validation_DataLoader), 'Validation', total=len(self.validation_DataLoader),
leave=False)
for i, (x, y) in batch_iter:
input, target = x.to(self.device), y.to(self.device) # send to device (GPU or CPU)
with torch.no_grad():
out = self.model(input)
# Saving validation output images
if i % 25 == 0:
img1 = torch.unsqueeze(input[0, 0, :, :], 2)
img1 = img1 * 255
img1 = img1.cpu()
np_img1 = img1.detach().numpy()
img2 = torch.unsqueeze(target[0, 0, :, :], 2)
img2 = img2 * 255
img2 = img2.cpu()
np_img2 = img2.detach().numpy()
img3 = torch.unsqueeze(out[0, 0, :, :], 2)
img3 = img3 * 255
img3 = img3.cpu()
np_img3 = img3.detach().numpy()
img = cv2.hconcat([np_img1, np_img2, np_img3])
cv2.imwrite('D:/AI in Urban Design/DL UD dir/STRT2FTPRNT/strt2ftprnt_valCONCAT/'
+ 'train' + str(self.epoch) + '-' + str(self.batch) + '-' + str(i + 1) + '.jpeg', img)
acc = dice_metric(out, target) # calculate accuracy
acc_value = acc.item()
val_accuracy.append(acc_value)
loss = self.criterion(out, target)
loss_value = loss.item()
val_losses.append(loss_value)
batch_iter.set_description(f'Validation: (loss {loss_value:.4f}) (acc {acc_value:.4f})')
self.validation_acc.append(np.mean(val_accuracy))
self.validation_loss.append(np.mean(val_losses))
batch_iter.close()
trainer = Trainer(model=model,
device=device,
criterion=criterion,
optimizer=optimizer,
training_DataLoader = training_dataloader,
validation_DataLoader = val_dataloader,
lr_scheduler=scheduler,
epochs=num_epochs,
epoch=0)
# start training
training_losses, validation_losses, training_acc, validation_acc, lr_rates = trainer.run_trainer()
# save trained model
PATH = 'D:/AI in Urban Design/DL UD dir/STRT2FTPRNT/DL model/strt2ftprnt_Unet_' \
+ str(int(validation_acc[-1])) + '.pt'
torch.save(model.state_dict(), PATH)
# Plot loss vs epochs
epoch_list = []
for i in range(len(training_losses)):
epoch_list.append(i + 1)
# Dice loss and Accuracy plot
plt.plot(epoch_list, training_losses, color='r', label="Training Loss")
plt.plot(epoch_list, validation_losses, color='b', label="Validation Loss")
plt.title("Dice Loss")
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# Accuracy plot
plt.plot(epoch_list, training_acc, color='r', linestyle='--', label="Training Accuracy")
plt.plot(epoch_list, validation_acc, color='b', linestyle='--', label="Validation Accuracy")
plt.title("Dice Metric")
plt.xlabel('Epochs')
plt.ylabel('Acc')
plt.legend()
plt.show()
# lr plot
plt.plot(epoch_list, lr_rates, color='g', label="Learning rate")
plt.title("Learning rate during training")
plt.xlabel('Epochs')
plt.ylabel('Lr')
plt.legend()
plt.show() | [
"torch.nn.ReLU",
"matplotlib.pyplot.ylabel",
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"torch.sum",
"torch.nn.Sigmoid",
"torch.nn.BatchNorm2d",
"os.listdir",
"numpy.mean",
"torch.unsqueeze",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"torch.optim.lr_scheduler... | [((2538, 2589), 'torch.utils.data.random_split', 'data.random_split', (['dataset', '[train_size, test_size]'], {}), '(dataset, [train_size, test_size])\n', (2555, 2589), True, 'import torch.utils.data as data\n'), ((2744, 2803), 'torch.utils.data.random_split', 'data.random_split', (['train_dataset', '[training_size, val_size]'], {}), '(train_dataset, [training_size, val_size])\n', (2761, 2803), True, 'import torch.utils.data as data\n'), ((2829, 2904), 'torch.utils.data.DataLoader', 'data.DataLoader', ([], {'dataset': 'training_dataset', 'batch_size': 'batch_S', 'shuffle': '(True)'}), '(dataset=training_dataset, batch_size=batch_S, shuffle=True)\n', (2844, 2904), True, 'import torch.utils.data as data\n'), ((3157, 3227), 'torch.utils.data.DataLoader', 'data.DataLoader', ([], {'dataset': 'val_dataset', 'batch_size': 'batch_S', 'shuffle': '(True)'}), '(dataset=val_dataset, batch_size=batch_S, shuffle=True)\n', (3172, 3227), True, 'import torch.utils.data as data\n'), ((10081, 10262), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'optim.lr_scheduler.ReduceLROnPlateau', (['optimizer'], {'mode': '"""min"""', 'factor': '(0.1)', 'patience': '(10)', 'threshold': '(0.0001)', 'threshold_mode': '"""rel"""', 'cooldown': '(0)', 'min_lr': '(0)', 'eps': '(1e-08)', 'verbose': '(False)'}), "(optimizer, mode='min', factor=0.1,\n patience=10, threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr\n =0, eps=1e-08, verbose=False)\n", (10117, 10262), True, 'import torch.optim as optim\n'), ((17532, 17603), 'matplotlib.pyplot.plot', 'plt.plot', (['epoch_list', 'training_losses'], {'color': '"""r"""', 'label': '"""Training Loss"""'}), "(epoch_list, training_losses, color='r', label='Training Loss')\n", (17540, 17603), True, 'import matplotlib.pyplot as plt\n'), ((17605, 17680), 'matplotlib.pyplot.plot', 'plt.plot', (['epoch_list', 'validation_losses'], {'color': '"""b"""', 'label': '"""Validation Loss"""'}), "(epoch_list, validation_losses, color='b', label='Validation Loss')\n", (17613, 17680), True, 'import matplotlib.pyplot as plt\n'), ((17682, 17704), 'matplotlib.pyplot.title', 'plt.title', (['"""Dice Loss"""'], {}), "('Dice Loss')\n", (17691, 17704), True, 'import matplotlib.pyplot as plt\n'), ((17706, 17726), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (17716, 17726), True, 'import matplotlib.pyplot as plt\n'), ((17728, 17746), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (17738, 17746), True, 'import matplotlib.pyplot as plt\n'), ((17748, 17760), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (17758, 17760), True, 'import matplotlib.pyplot as plt\n'), ((17762, 17772), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17770, 17772), True, 'import matplotlib.pyplot as plt\n'), ((17793, 17886), 'matplotlib.pyplot.plot', 'plt.plot', (['epoch_list', 'training_acc'], {'color': '"""r"""', 'linestyle': '"""--"""', 'label': '"""Training Accuracy"""'}), "(epoch_list, training_acc, color='r', linestyle='--', label=\n 'Training Accuracy')\n", (17801, 17886), True, 'import matplotlib.pyplot as plt\n'), ((17883, 17980), 'matplotlib.pyplot.plot', 'plt.plot', (['epoch_list', 'validation_acc'], {'color': '"""b"""', 'linestyle': '"""--"""', 'label': '"""Validation Accuracy"""'}), "(epoch_list, validation_acc, color='b', linestyle='--', label=\n 'Validation Accuracy')\n", (17891, 17980), True, 'import matplotlib.pyplot as plt\n'), ((17977, 18001), 'matplotlib.pyplot.title', 'plt.title', (['"""Dice Metric"""'], {}), "('Dice Metric')\n", (17986, 18001), True, 'import matplotlib.pyplot as plt\n'), ((18003, 18023), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (18013, 18023), True, 'import matplotlib.pyplot as plt\n'), ((18025, 18042), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Acc"""'], {}), "('Acc')\n", (18035, 18042), True, 'import matplotlib.pyplot as plt\n'), ((18044, 18056), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (18054, 18056), True, 'import matplotlib.pyplot as plt\n'), ((18058, 18068), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18066, 18068), True, 'import matplotlib.pyplot as plt\n'), ((18083, 18147), 'matplotlib.pyplot.plot', 'plt.plot', (['epoch_list', 'lr_rates'], {'color': '"""g"""', 'label': '"""Learning rate"""'}), "(epoch_list, lr_rates, color='g', label='Learning rate')\n", (18091, 18147), True, 'import matplotlib.pyplot as plt\n'), ((18149, 18191), 'matplotlib.pyplot.title', 'plt.title', (['"""Learning rate during training"""'], {}), "('Learning rate during training')\n", (18158, 18191), True, 'import matplotlib.pyplot as plt\n'), ((18193, 18213), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (18203, 18213), True, 'import matplotlib.pyplot as plt\n'), ((18215, 18231), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Lr"""'], {}), "('Lr')\n", (18225, 18231), True, 'import matplotlib.pyplot as plt\n'), ((18233, 18245), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (18243, 18245), True, 'import matplotlib.pyplot as plt\n'), ((18247, 18257), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18255, 18257), True, 'import matplotlib.pyplot as plt\n'), ((8674, 8699), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8697, 8699), False, 'import torch\n'), ((9499, 9536), 'torch.sum', 'torch.sum', (['(gt_flat * pred_flat)'], {'dim': '(1)'}), '(gt_flat * pred_flat, dim=1)\n', (9508, 9536), False, 'import torch\n'), ((574, 596), 'os.listdir', 'os.listdir', (['X_img_path'], {}), '(X_img_path)\n', (584, 596), False, 'import os\n'), ((641, 668), 'os.path.join', 'os.path.join', (['X_img_path', 'i'], {}), '(X_img_path, i)\n', (653, 668), False, 'import os\n'), ((682, 720), 'cv2.imread', 'cv2.imread', (['path', 'cv2.IMREAD_GRAYSCALE'], {}), '(path, cv2.IMREAD_GRAYSCALE)\n', (692, 720), False, 'import cv2\n'), ((734, 769), 'cv2.resize', 'cv2.resize', (['X', '(img_size, img_size)'], {}), '(X, (img_size, img_size))\n', (744, 769), False, 'import cv2\n'), ((783, 794), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (791, 794), True, 'import numpy as np\n'), ((956, 978), 'os.listdir', 'os.listdir', (['Y_img_path'], {}), '(Y_img_path)\n', (966, 978), False, 'import os\n'), ((1023, 1050), 'os.path.join', 'os.path.join', (['Y_img_path', 'i'], {}), '(Y_img_path, i)\n', (1035, 1050), False, 'import os\n'), ((1064, 1102), 'cv2.imread', 'cv2.imread', (['path', 'cv2.IMREAD_GRAYSCALE'], {}), '(path, cv2.IMREAD_GRAYSCALE)\n', (1074, 1102), False, 'import cv2\n'), ((1116, 1151), 'cv2.resize', 'cv2.resize', (['Y', '(img_size, img_size)'], {}), '(Y, (img_size, img_size))\n', (1126, 1151), False, 'import cv2\n'), ((1165, 1176), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (1173, 1176), True, 'import numpy as np\n'), ((4796, 4833), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (4808, 4833), True, 'import torch.nn as nn\n'), ((5439, 5506), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(2048)', '(1024)'], {'kernel_size': '(2)', 'stride': '(2)', 'bias': '(False)'}), '(2048, 1024, kernel_size=2, stride=2, bias=False)\n', (5457, 5506), True, 'import torch.nn as nn\n'), ((5598, 5664), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(1024)', '(512)'], {'kernel_size': '(2)', 'stride': '(2)', 'bias': '(False)'}), '(1024, 512, kernel_size=2, stride=2, bias=False)\n', (5616, 5664), True, 'import torch.nn as nn\n'), ((5755, 5820), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(512)', '(256)'], {'kernel_size': '(2)', 'stride': '(2)', 'bias': '(False)'}), '(512, 256, kernel_size=2, stride=2, bias=False)\n', (5773, 5820), True, 'import torch.nn as nn\n'), ((5910, 5975), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(256)', '(128)'], {'kernel_size': '(2)', 'stride': '(2)', 'bias': '(False)'}), '(256, 128, kernel_size=2, stride=2, bias=False)\n', (5928, 5975), True, 'import torch.nn as nn\n'), ((6066, 6130), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(128)', '(64)'], {'kernel_size': '(2)', 'stride': '(2)', 'bias': '(False)'}), '(128, 64, kernel_size=2, stride=2, bias=False)\n', (6084, 6130), True, 'import torch.nn as nn\n'), ((6220, 6283), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(64)', '(32)'], {'kernel_size': '(2)', 'stride': '(2)', 'bias': '(False)'}), '(64, 32, kernel_size=2, stride=2, bias=False)\n', (6238, 6283), True, 'import torch.nn as nn\n'), ((6372, 6435), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(32)', '(16)'], {'kernel_size': '(2)', 'stride': '(2)', 'bias': '(False)'}), '(32, 16, kernel_size=2, stride=2, bias=False)\n', (6390, 6435), True, 'import torch.nn as nn\n'), ((6524, 6586), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(16)', '(8)'], {'kernel_size': '(2)', 'stride': '(2)', 'bias': '(False)'}), '(16, 8, kernel_size=2, stride=2, bias=False)\n', (6542, 6586), True, 'import torch.nn as nn\n'), ((6700, 6745), 'torch.nn.Conv2d', 'nn.Conv2d', (['(8)', 'self.num_classes'], {'kernel_size': '(1)'}), '(8, self.num_classes, kernel_size=1)\n', (6709, 6745), True, 'import torch.nn as nn\n'), ((6770, 6782), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (6780, 6782), True, 'import torch.nn as nn\n'), ((8717, 8737), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (8729, 8737), False, 'import torch\n'), ((8765, 8784), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (8777, 8784), False, 'import torch\n'), ((9547, 9574), 'torch.sum', 'torch.sum', (['pred_flat'], {'dim': '(1)'}), '(pred_flat, dim=1)\n', (9556, 9574), False, 'import torch\n'), ((9590, 9615), 'torch.sum', 'torch.sum', (['gt_flat'], {'dim': '(1)'}), '(gt_flat, dim=1)\n', (9599, 9615), False, 'import torch\n'), ((11503, 11539), 'tqdm.trange', 'trange', (['self.epochs'], {'desc': '"""Progress"""'}), "(self.epochs, desc='Progress')\n", (11509, 11539), False, 'from tqdm import tqdm, trange\n'), ((3588, 3636), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_c', 'out_c'], {'kernel_size': '(1)', 'bias': '(True)'}), '(in_c, out_c, kernel_size=1, bias=True)\n', (3597, 3636), True, 'import torch.nn as nn\n'), ((3651, 3725), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_c', 'out_c'], {'kernel_size': '(3)', 'padding': '(1)', 'groups': 'out_c', 'bias': '(True)'}), '(out_c, out_c, kernel_size=3, padding=1, groups=out_c, bias=True)\n', (3660, 3725), True, 'import torch.nn as nn\n'), ((3740, 3761), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_c'], {}), '(out_c)\n', (3754, 3761), True, 'import torch.nn as nn\n'), ((3776, 3797), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3783, 3797), True, 'import torch.nn as nn\n'), ((3812, 3861), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_c', 'out_c'], {'kernel_size': '(1)', 'bias': '(True)'}), '(out_c, out_c, kernel_size=1, bias=True)\n', (3821, 3861), True, 'import torch.nn as nn\n'), ((3876, 3950), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_c', 'out_c'], {'kernel_size': '(3)', 'padding': '(1)', 'groups': 'out_c', 'bias': '(True)'}), '(out_c, out_c, kernel_size=3, padding=1, groups=out_c, bias=True)\n', (3885, 3950), True, 'import torch.nn as nn\n'), ((3965, 3986), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_c'], {}), '(out_c)\n', (3979, 3986), True, 'import torch.nn as nn\n'), ((4001, 4022), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4008, 4022), True, 'import torch.nn as nn\n'), ((4110, 4169), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_c', 'out_c'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(in_c, out_c, kernel_size=3, padding=1, bias=True)\n', (4119, 4169), True, 'import torch.nn as nn\n'), ((4184, 4205), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_c'], {}), '(out_c)\n', (4198, 4205), True, 'import torch.nn as nn\n'), ((4220, 4241), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4227, 4241), True, 'import torch.nn as nn\n'), ((4256, 4316), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_c', 'out_c'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(out_c, out_c, kernel_size=3, padding=1, bias=True)\n', (4265, 4316), True, 'import torch.nn as nn\n'), ((4331, 4352), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_c'], {}), '(out_c)\n', (4345, 4352), True, 'import torch.nn as nn\n'), ((4367, 4388), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4374, 4388), True, 'import torch.nn as nn\n'), ((7556, 7578), 'torch.cat', 'torch.cat', (['[x_U, y]', '(1)'], {}), '([x_U, y], 1)\n', (7565, 7578), False, 'import torch\n'), ((7676, 7698), 'torch.cat', 'torch.cat', (['[x_U, y]', '(1)'], {}), '([x_U, y], 1)\n', (7685, 7698), False, 'import torch\n'), ((7796, 7818), 'torch.cat', 'torch.cat', (['[x_U, y]', '(1)'], {}), '([x_U, y], 1)\n', (7805, 7818), False, 'import torch\n'), ((7915, 7937), 'torch.cat', 'torch.cat', (['[x_U, y]', '(1)'], {}), '([x_U, y], 1)\n', (7924, 7937), False, 'import torch\n'), ((8034, 8056), 'torch.cat', 'torch.cat', (['[x_U, y]', '(1)'], {}), '([x_U, y], 1)\n', (8043, 8056), False, 'import torch\n'), ((8153, 8175), 'torch.cat', 'torch.cat', (['[x_U, y]', '(1)'], {}), '([x_U, y], 1)\n', (8162, 8175), False, 'import torch\n'), ((8272, 8294), 'torch.cat', 'torch.cat', (['[x_U, y]', '(1)'], {}), '([x_U, y], 1)\n', (8281, 8294), False, 'import torch\n'), ((8391, 8413), 'torch.cat', 'torch.cat', (['[x_U, y]', '(1)'], {}), '([x_U, y], 1)\n', (8400, 8413), False, 'import torch\n'), ((14483, 14506), 'numpy.mean', 'np.mean', (['train_accuracy'], {}), '(train_accuracy)\n', (14490, 14506), True, 'import numpy as np\n'), ((14543, 14564), 'numpy.mean', 'np.mean', (['train_losses'], {}), '(train_losses)\n', (14550, 14564), True, 'import numpy as np\n'), ((16586, 16607), 'numpy.mean', 'np.mean', (['val_accuracy'], {}), '(val_accuracy)\n', (16593, 16607), True, 'import numpy as np\n'), ((16646, 16665), 'numpy.mean', 'np.mean', (['val_losses'], {}), '(val_losses)\n', (16653, 16665), True, 'import numpy as np\n'), ((13116, 13153), 'torch.unsqueeze', 'torch.unsqueeze', (['input[0, 0, :, :]', '(2)'], {}), '(input[0, 0, :, :], 2)\n', (13131, 13153), False, 'import torch\n'), ((13297, 13335), 'torch.unsqueeze', 'torch.unsqueeze', (['target[0, 0, :, :]', '(2)'], {}), '(target[0, 0, :, :], 2)\n', (13312, 13335), False, 'import torch\n'), ((13479, 13514), 'torch.unsqueeze', 'torch.unsqueeze', (['out[0, 0, :, :]', '(2)'], {}), '(out[0, 0, :, :], 2)\n', (13494, 13514), False, 'import torch\n'), ((13657, 13697), 'cv2.hconcat', 'cv2.hconcat', (['[np_img1, np_img2, np_img3]'], {}), '([np_img1, np_img2, np_img3])\n', (13668, 13697), False, 'import cv2\n'), ((15113, 15128), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15126, 15128), False, 'import torch\n'), ((2164, 2183), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (2180, 2183), False, 'import torch\n'), ((2209, 2228), 'torch.from_numpy', 'torch.from_numpy', (['y'], {}), '(y)\n', (2225, 2228), False, 'import torch\n'), ((15285, 15322), 'torch.unsqueeze', 'torch.unsqueeze', (['input[0, 0, :, :]', '(2)'], {}), '(input[0, 0, :, :], 2)\n', (15300, 15322), False, 'import torch\n'), ((15482, 15520), 'torch.unsqueeze', 'torch.unsqueeze', (['target[0, 0, :, :]', '(2)'], {}), '(target[0, 0, :, :], 2)\n', (15497, 15520), False, 'import torch\n'), ((15680, 15715), 'torch.unsqueeze', 'torch.unsqueeze', (['out[0, 0, :, :]', '(2)'], {}), '(out[0, 0, :, :], 2)\n', (15695, 15715), False, 'import torch\n'), ((15874, 15914), 'cv2.hconcat', 'cv2.hconcat', (['[np_img1, np_img2, np_img3]'], {}), '([np_img1, np_img2, np_img3])\n', (15885, 15914), False, 'import cv2\n')] |
import socket
import sys
import requests
#import requests_oauthlib
import prepro
import json
import time
import pandas as pd
from copulae import NormalCopula
import numpy as np
#sudo apt-get install -y python3-oauth python3-oauth2client python3-oauthlib python3-requests-oauthlib
def send_data_to_spark(data, tcp_connection):
first = True
for line in data.splitlines():
try:
if first:
first = False
continue
print("Data: " + line)
print ("------------------------------------------")
tcp_connection.send(str(line + '\n').encode())
except:
e = sys.exc_info()[0]
print("Error: %s" % e)
#Devuelve un array con la media y otro con la desviacion tipica de cada columna
def getNormalDist(df):
mean = df.mean()
avg = df.std()
return mean, avg
def generaDatosSimulados(df, mean, std, numValues):
df_simulado = pd.DataFrame();
headers = df.columns;
for i in range(8):
data = np.random.randn(numValues);
values = data * std[i] + mean[i]
values = np.where(values < 0, 0, values) #Los valores por debajo de 0 no tienen sentido
df_simulado[headers[i]] = values ;
return df_simulado
print("Creating the data generator...")
#Leemos el archivo con los datos
df = pd.read_csv("../nasa/event/event_wind_summary/event_wind_summary.tab",
sep='\s+',header=None)
#Ponemos el nombre a cada columna
df.columns = prepro.read_headers("../nasa/event/event_wind_summary/event_wind_summary.lbl.txt")
#Cogemos solo las variables con las que se ha entrenado
trainVar = ['RMS_X_AXIS_X100', 'WINDSPEED', 'PRESSURE','AIR_TEMPERATURE', 'MEAN_X_AXIS_CROSSINGS',
'MEAN_Y_AXIS_CROSSINGS', 'MEAN_Z_AXIS_CROSSINGS','WIND_DIRECTION']
df = df[trainVar]
mean, std = getNormalDist(df)
TCP_IP = "localhost"
TCP_PORT = 9012
conn = None
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
df_simulado = generaDatosSimulados(df, mean, std, 10)
print("Waiting for TCP connection...")
conn, addr = s.accept()
print("Connected... Starting sending data.")
while 1:
df_simulado = generaDatosSimulados(df, mean, std, 10)
data = df_simulado.to_string(index=False, header=False)
send_data_to_spark(data,conn)
time.sleep(10)
| [
"prepro.read_headers",
"socket.socket",
"pandas.read_csv",
"numpy.where",
"time.sleep",
"sys.exc_info",
"pandas.DataFrame",
"numpy.random.randn"
] | [((1351, 1451), 'pandas.read_csv', 'pd.read_csv', (['"""../nasa/event/event_wind_summary/event_wind_summary.tab"""'], {'sep': '"""\\\\s+"""', 'header': 'None'}), "('../nasa/event/event_wind_summary/event_wind_summary.tab', sep=\n '\\\\s+', header=None)\n", (1362, 1451), True, 'import pandas as pd\n'), ((1497, 1584), 'prepro.read_headers', 'prepro.read_headers', (['"""../nasa/event/event_wind_summary/event_wind_summary.lbl.txt"""'], {}), "(\n '../nasa/event/event_wind_summary/event_wind_summary.lbl.txt')\n", (1516, 1584), False, 'import prepro\n'), ((1921, 1970), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (1934, 1970), False, 'import socket\n'), ((954, 968), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (966, 968), True, 'import pandas as pd\n'), ((2342, 2356), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (2352, 2356), False, 'import time\n'), ((1037, 1063), 'numpy.random.randn', 'np.random.randn', (['numValues'], {}), '(numValues)\n', (1052, 1063), True, 'import numpy as np\n'), ((1124, 1155), 'numpy.where', 'np.where', (['(values < 0)', '(0)', 'values'], {}), '(values < 0, 0, values)\n', (1132, 1155), True, 'import numpy as np\n'), ((662, 676), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (674, 676), False, 'import sys\n')] |
#############################################################
# MIT License, Copyright © 2020, ETH Zurich, <NAME>
#############################################################
import numpy as np
import tensorflow as tf
import os
from tqdm import trange
from config import get_config
from util import *
from styler_2p import Styler
import sys
sys.path.append('E:/partio/build/py/Release')
import partio
def run(config):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # so the IDs match nvidia-smi
os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu_id # "0, 1" for multiple
prepare_dirs_and_logger(config)
tf.compat.v1.set_random_seed(config.seed)
config.rng = np.random.RandomState(config.seed)
styler = Styler(config)
styler.load_img(config.resolution)
params = {}
# load particles
p = []
r = []
for i in trange(config.num_frames, desc='load particle'):
pt_path = os.path.join(config.data_dir, config.dataset, config.d_path % (config.target_frame+i))
pt = partio.read(pt_path)
p_id = pt.attributeInfo('id')
p_pos = pt.attributeInfo('position')
p_den = pt.attributeInfo('density')
p_num = pt.numParticles()
p_ = np.zeros([p_num,2], dtype=np.float32)
r_ = np.zeros([p_num,1], dtype=np.float32)
for j in range(p_num):
p_id_ = pt.get(p_id, j)[0]
p_[p_id_] = pt.get(p_pos, p_id_)[:-1] # 2d
r_[p_id_] = pt.get(p_den, p_id_)
r.append(r_)
# normalize particle position [0-1]
px, py = p_[...,0], p_[...,1]
px /= config.domain[1]
py /= config.domain[0]
p_ = np.stack([py,px], axis=-1)
p.append(p_)
print('resolution:', config.resolution)
print('domain:', config.domain)
print('radius:', config.radius)
print('normalized px range', px.min(), px.max())
print('normalized py range', py.min(), py.max())
print('num particles:', p[0].shape) # the number of particles is fixed
params['p'] = p
params['r'] = r
# styler.render_test(params)
result = styler.run(params)
# save loss plot
l = result['l']
lb = []
for o, l_ in enumerate(l):
lb_, = plt.plot(range(len(l_)), l_, label='oct %d' % o)
lb.append(lb_)
plt.legend(handles=lb)
# plt.show()
plot_path = os.path.join(config.log_dir, 'loss_plot.png')
plt.savefig(plot_path)
# save density fields
d_sty = result['d'] # [0-255], uint8
# d_path = os.path.join(config.log_dir, 'd%03d_%03d.png' % (config.target_frame,config.target_frame+config.num_frames-1))
# save_image(d_sty, d_path, nrow=5, gray=not 'c' in config.target_field)
for i, d_sty_ in enumerate(d_sty):
im = Image.fromarray(d_sty_)
d_path = os.path.join(config.log_dir, '%03d.png' % (config.target_frame+i))
im.save(d_path)
d_intm = result['d_intm']
for o, d_intm_o in enumerate(d_intm):
for i, d_intm_ in enumerate(d_intm_o):
im = Image.fromarray(d_intm_)
d_path = os.path.join(config.log_dir, 'o%02d_%03d.png' % (o, config.target_frame))
im.save(d_path)
# save particles (load using Houdini GPlay)
c_sty = result['c']
p_org = []
for p_ in p:
# denormalize particle positions
px, py = p_[...,1], p_[...,0]
px *= config.domain[1]
py *= config.domain[0]
p_org.append(np.stack([px,py], axis=-1))
for i in range(config.num_frames):
# create a particle set and attributes
pt = partio.create()
position = pt.addAttribute("position",partio.VECTOR,2)
color = pt.addAttribute("Cd",partio.FLOAT,3)
radius = pt.addAttribute("radius",partio.FLOAT,1)
# normal = pt.addAttribute("normal",partio.VECTOR,3)
for pi in range(p_org[i].shape[0]):
p_ = pt.addParticle()
pt.set(position, p_, tuple(p_org[i][pi].astype(np.float)))
pt.set(color, p_, tuple(c_sty[i][pi].astype(np.float)))
pt.set(radius, p_, (config.radius,))
p_path = os.path.join(config.log_dir, '%03d.bgeo' % (config.target_frame+i))
partio.write(p_path, pt)
# visualization using open3d
bbox = [
[0,0,-1],
[config.domain[1],config.domain[0],1], # [X,Y,Z]
]
draw_pt(p_org, pc=c_sty, bbox=bbox, dt=0.1)
def main(config):
config.dataset = 'dambreak2d'
config.d_path = 'partio/ParticleData_Fluid_%d.bgeo'
# from scene
config.radius = 0.025
config.support = 4
config.disc = 2
config.rest_density = 1000
config.resolution = [128, 256] # [H,W]
cell_size = 2*config.radius*config.disc
config.domain = [float(_*cell_size) for _ in config.resolution] # [H,W]
config.nsize = max(3-config.disc,1) # 1 is enough if disc is 2, 2 if disc is 1
# upscaling for rendering
config.scale = 4
config.nsize *= config.scale
config.resolution = [config.resolution[0]*config.scale, config.resolution[1]*config.scale]
#####################
# frame range setting
multi_frame = True
config.frames_per_opt = 200
config.window_sigma = 3
# if multi_frame:
# config.target_frame = 1
# config.num_frames = 200
# config.batch_size = 4
# else:
# config.target_frame = 150
# config.num_frames = 1
# config.batch_size = 1
######
# color test
config.target_field = 'c'
config.lr = 0.01
config.iter = 100
config.octave_n = 3
config.octave_scale = 1.7
config.clip = False
# style_list = {
# 'fire_new': 'fire_new.jpg',
# 'ben_giles': 'ben_giles.png',
# 'wave': 'wave.jpeg',
# }
# style = 'wave'
# config.style_target = os.path.join(config.data_dir, 'image', style_list[style])
config.network = 'vgg_19.ckpt'
config.w_style = 1
config.w_content = 0
config.style_init = 'noise'
config.style_layer = ['conv2_1','conv3_1']
config.w_style_layer = [0.5,0.5]
config.style_mask = True
config.style_mask_on_ref = False
config.style_tiling = 2
config.w_tv = 0.01
if config.w_content == 1:
config.tag = 'test_%s_%s_%d' % (
config.target_field, config.content_layer, config.content_channel)
else:
style = os.path.splitext(os.path.basename(config.style_target))[0]
config.tag = 'test_%s_%s' % (
config.target_field, style)
config.tag += '_%d' % config.num_frames
run(config)
if __name__ == "__main__":
config, unparsed = get_config()
main(config) | [
"os.path.join",
"config.get_config",
"numpy.zeros",
"numpy.stack",
"tensorflow.compat.v1.set_random_seed",
"partio.read",
"partio.write",
"os.path.basename",
"partio.create",
"styler_2p.Styler",
"sys.path.append",
"tqdm.trange",
"numpy.random.RandomState"
] | [((342, 387), 'sys.path.append', 'sys.path.append', (['"""E:/partio/build/py/Release"""'], {}), "('E:/partio/build/py/Release')\n", (357, 387), False, 'import sys\n'), ((619, 660), 'tensorflow.compat.v1.set_random_seed', 'tf.compat.v1.set_random_seed', (['config.seed'], {}), '(config.seed)\n', (647, 660), True, 'import tensorflow as tf\n'), ((678, 712), 'numpy.random.RandomState', 'np.random.RandomState', (['config.seed'], {}), '(config.seed)\n', (699, 712), True, 'import numpy as np\n'), ((727, 741), 'styler_2p.Styler', 'Styler', (['config'], {}), '(config)\n', (733, 741), False, 'from styler_2p import Styler\n'), ((859, 906), 'tqdm.trange', 'trange', (['config.num_frames'], {'desc': '"""load particle"""'}), "(config.num_frames, desc='load particle')\n", (865, 906), False, 'from tqdm import trange\n'), ((2348, 2393), 'os.path.join', 'os.path.join', (['config.log_dir', '"""loss_plot.png"""'], {}), "(config.log_dir, 'loss_plot.png')\n", (2360, 2393), False, 'import os\n'), ((6608, 6620), 'config.get_config', 'get_config', ([], {}), '()\n', (6618, 6620), False, 'from config import get_config\n'), ((926, 1019), 'os.path.join', 'os.path.join', (['config.data_dir', 'config.dataset', '(config.d_path % (config.target_frame + i))'], {}), '(config.data_dir, config.dataset, config.d_path % (config.\n target_frame + i))\n', (938, 1019), False, 'import os\n'), ((1026, 1046), 'partio.read', 'partio.read', (['pt_path'], {}), '(pt_path)\n', (1037, 1046), False, 'import partio\n'), ((1223, 1261), 'numpy.zeros', 'np.zeros', (['[p_num, 2]'], {'dtype': 'np.float32'}), '([p_num, 2], dtype=np.float32)\n', (1231, 1261), True, 'import numpy as np\n'), ((1274, 1312), 'numpy.zeros', 'np.zeros', (['[p_num, 1]'], {'dtype': 'np.float32'}), '([p_num, 1], dtype=np.float32)\n', (1282, 1312), True, 'import numpy as np\n'), ((1663, 1690), 'numpy.stack', 'np.stack', (['[py, px]'], {'axis': '(-1)'}), '([py, px], axis=-1)\n', (1671, 1690), True, 'import numpy as np\n'), ((2787, 2855), 'os.path.join', 'os.path.join', (['config.log_dir', "('%03d.png' % (config.target_frame + i))"], {}), "(config.log_dir, '%03d.png' % (config.target_frame + i))\n", (2799, 2855), False, 'import os\n'), ((3558, 3573), 'partio.create', 'partio.create', ([], {}), '()\n', (3571, 3573), False, 'import partio\n'), ((4118, 4187), 'os.path.join', 'os.path.join', (['config.log_dir', "('%03d.bgeo' % (config.target_frame + i))"], {}), "(config.log_dir, '%03d.bgeo' % (config.target_frame + i))\n", (4130, 4187), False, 'import os\n'), ((4194, 4218), 'partio.write', 'partio.write', (['p_path', 'pt'], {}), '(p_path, pt)\n', (4206, 4218), False, 'import partio\n'), ((3061, 3134), 'os.path.join', 'os.path.join', (['config.log_dir', "('o%02d_%03d.png' % (o, config.target_frame))"], {}), "(config.log_dir, 'o%02d_%03d.png' % (o, config.target_frame))\n", (3073, 3134), False, 'import os\n'), ((3430, 3457), 'numpy.stack', 'np.stack', (['[px, py]'], {'axis': '(-1)'}), '([px, py], axis=-1)\n', (3438, 3457), True, 'import numpy as np\n'), ((6363, 6400), 'os.path.basename', 'os.path.basename', (['config.style_target'], {}), '(config.style_target)\n', (6379, 6400), False, 'import os\n')] |
"""
This module focuses on scraping and saving Game of Thrones scripts. These scripts were found through the
``genius.com/albums/Game-of-thrones`` website. All scripts are saved to the ``./scripts/`` directory (which is created
if it does not exist already).
Author: <NAME>
"""
import os
import re
from typing import List
import numpy as np
import requests
import html2text
def scrape_html_and_save(url: str, fname_out: str, remove_brackets: bool = False) -> None:
"""
Scrapes a specified URL and saves the HTML to file.
Parameters
----------
url
The URL that is being scraped from.
fname_out
The name of the file the formatted HTML will be saved to.
remove_brackets : optional
Removes the instances in the HTML of the form ``(WORD)``. These are replaced with empty lines.
Returns
-------
None. The HTML is saved to the ``fname_out``.
"""
# Set some options for parsing the HTML to text. Idk if most of these actually do anything.
parser = html2text.HTML2Text()
parser.unicode_snob = True
parser.body_width = 0
parser.skip_internal_links = True
parser.ignore_links = True
# Now get the HTML page.
r = requests.get(url)
if r.status_code != 200:
print(f"Encountered error while fetching webpage {url}")
raise RuntimeError
# Snip out all that HTML nonsense and leave just the text (this will be the script itself).
data = r.text
text = parser.handle(data)
# If we are removing brackets, we're going to open it up later and save again. So use a tmp name.
if remove_brackets:
fname = f"{fname_out}_tmp.txt"
else:
fname = f"{fname_out}.txt"
with open(fname, "w") as f:
f.write(text)
print(f"Saved to {fname}")
# For some scripts, there are obnoxious brackets that specify who characters are talking to. These will mess with
# statistics and processing so we need to remove them.
if remove_brackets:
import os
# We will seek occurences of the form "(<ANYTHING>)".
pattern = "\([^)]*\)" # noqa: W605
# Open the file and replace.
new_fname = f"{fname_out}.txt"
with open(fname, "r") as f_in, open(new_fname, "w") as f_out:
for line in f_in:
new_string = re.sub(pattern, "", line)
f_out.write(new_string)
print(f"Removed brackets and resaved to {new_fname}")
os.remove(f"{fname_out}_tmp.txt")
print("Deleted old tmp file.")
def generate_episode_names(url: str, output_fname: str, debug: bool = False) -> List[str]:
"""
Fetches and saves the name of the episodes.
Parameters
----------
url
The URL specifying the website containing the list of all episodes.
output_fname
The name of the file where the episodes will be saved to.
debug : optional
If specified, prints out some messages to help with debugging.
Returns
-------
episode_names
The names of all the episodes. These may have to be processed further to provide a proper URL.
"""
# First scrape the URL and turn the ugly HTML to nicely formatted text.
scrape_html_and_save(url, output_fname)
# Now its time to go through the episodes and format the names of the episodes a bit.
episode_names = []
with open(output_fname, "r") as f:
for line in f:
# Ignore empty lines.
try:
_ = (line.split())[0]
except IndexError:
continue
if debug:
print(f"Line {line}")
# The episode names are on lines as "### <Episode Name> Lyrics".
reg_exp = re.compile(r"###. ([A-Z].*)Lyrics", re.IGNORECASE)
reg_line = reg_exp.split(line) # Split on this search.
if debug:
print(f"Reg_line {reg_line}")
# A correctly matched line will be a list of the form...
# ['', '<Name of episode>', '\n']
# So lines without length 3 are wrong.
if len(reg_line) < 3:
continue
# Trim out surrounding white space and append.
episode_name = reg_line[1].strip()
episode_names.append(episode_name)
return episode_names
if __name__ == "__main__":
output_dir = "./scripts"
if not os.path.exists(output_dir):
os.makedirs(output_dir)
seasons = np.arange(8, 9)
for season_num in seasons:
# First find the names of the episodes in this Season.
url = f"https://genius.com/albums/Game-of-thrones/Season-{season_num}-scripts"
fname_out = f"{output_dir}/season-{season_num}-episodes.txt"
episode_names = generate_episode_names(url, fname_out)
# Then go through each episode and grab its script.
for episode_num, episode_name in enumerate(episode_names):
# For the URL, the episode names use '-' instead of spaces and use lower case
# letter.
url_episode_name = episode_name.replace(" ", "-").lower()
# Commas and apostrophes are the devil.
url_episode_name = url_episode_name.replace("'", "").lower()
url_episode_name = url_episode_name.replace(",", "").lower()
url = f"https://genius.com/Game-of-thrones-{url_episode_name}-annotated"
fname_out = f"{output_dir}/s{season_num:02}e{episode_num+1:02}"
scrape_html_and_save(url, fname_out, remove_brackets=True)
| [
"os.path.exists",
"os.makedirs",
"re.compile",
"html2text.HTML2Text",
"requests.get",
"re.sub",
"numpy.arange",
"os.remove"
] | [((1029, 1050), 'html2text.HTML2Text', 'html2text.HTML2Text', ([], {}), '()\n', (1048, 1050), False, 'import html2text\n'), ((1215, 1232), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1227, 1232), False, 'import requests\n'), ((4482, 4497), 'numpy.arange', 'np.arange', (['(8)', '(9)'], {}), '(8, 9)\n', (4491, 4497), True, 'import numpy as np\n'), ((2470, 2503), 'os.remove', 'os.remove', (['f"""{fname_out}_tmp.txt"""'], {}), "(f'{fname_out}_tmp.txt')\n", (2479, 2503), False, 'import os\n'), ((4407, 4433), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (4421, 4433), False, 'import os\n'), ((4443, 4466), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (4454, 4466), False, 'import os\n'), ((3742, 3791), 're.compile', 're.compile', (['"""###. ([A-Z].*)Lyrics"""', 're.IGNORECASE'], {}), "('###. ([A-Z].*)Lyrics', re.IGNORECASE)\n", (3752, 3791), False, 'import re\n'), ((2332, 2357), 're.sub', 're.sub', (['pattern', '""""""', 'line'], {}), "(pattern, '', line)\n", (2338, 2357), False, 'import re\n')] |
import numpy as np
from scipy.spatial.distance import cdist
dat=np.array([[1,1],[1.1,1],[1.3,1],[1.5,1],[1.6,1]])
dat=np.transpose(dat)
dist_thres=0.15
num_points = dat.shape[1]
is_key_point=[False for i in range(num_points)]
print("Before filter, total {} points".format(num_points))
dat_t = np.transpose(dat)
dist = cdist(dat_t, dat_t, 'euclidean')
i=0
for it in range(dat_t.shape[0]):
len_next = list(np.where(dist[i, i:] > dist_thres))[0]
is_key_point[i] = True
if len_next.shape[0]>0:
i = len_next[0] + i
else:
break
dat_filt = dat[:, is_key_point]
print("After filter, total {} points".format(dat_filt.shape[1]))
print(dat_filt) | [
"scipy.spatial.distance.cdist",
"numpy.array",
"numpy.transpose",
"numpy.where"
] | [((65, 123), 'numpy.array', 'np.array', (['[[1, 1], [1.1, 1], [1.3, 1], [1.5, 1], [1.6, 1]]'], {}), '([[1, 1], [1.1, 1], [1.3, 1], [1.5, 1], [1.6, 1]])\n', (73, 123), True, 'import numpy as np\n'), ((119, 136), 'numpy.transpose', 'np.transpose', (['dat'], {}), '(dat)\n', (131, 136), True, 'import numpy as np\n'), ((294, 311), 'numpy.transpose', 'np.transpose', (['dat'], {}), '(dat)\n', (306, 311), True, 'import numpy as np\n'), ((319, 351), 'scipy.spatial.distance.cdist', 'cdist', (['dat_t', 'dat_t', '"""euclidean"""'], {}), "(dat_t, dat_t, 'euclidean')\n", (324, 351), False, 'from scipy.spatial.distance import cdist\n'), ((411, 445), 'numpy.where', 'np.where', (['(dist[i, i:] > dist_thres)'], {}), '(dist[i, i:] > dist_thres)\n', (419, 445), True, 'import numpy as np\n')] |
import os
import mmcv
import numpy as np
import matplotlib.pyplot as plt
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from mmcv import Config
from mmdet.datasets import build_dataset
MODEL = "smoking"
MODEL_NAME = "mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_smoking"
CONFIG_FILE = f"configs/{MODEL}/{MODEL_NAME}.py"
RESULT_FILE = f"../../tools/results.pkl"
def plot_pr_curve(config_file, result_file, metric="bbox"):
"""plot precison-recall curve based on testing results of pkl file.
Args:
config_file (list[list | tuple]): config file path.
result_file (str): pkl file of testing results path.
metric (str): Metrics to be evaluated. Options are
'bbox', 'segm'.
"""
cfg = Config.fromfile(config_file)
# turn on test mode of dataset
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
# build dataset
dataset = build_dataset(cfg.data.test)
# load result file in pkl format
pkl_results = mmcv.load(result_file)
# convert pkl file (list[list | tuple | ndarray]) to json
json_results, _ = dataset.format_results(pkl_results)
# initialize COCO instance
coco = COCO(annotation_file=cfg.data.test.ann_file)
coco_gt = coco
coco_dt = coco_gt.loadRes(json_results[metric])
# initialize COCOeval instance
coco_eval = COCOeval(coco_gt, coco_dt, metric)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
# extract eval data
precisions = coco_eval.eval["precision"]
'''
precisions[T, R, K, A, M]
T: iou thresholds [0.5 : 0.05 : 0.95], idx from 0 to 9
R: recall thresholds [0 : 0.01 : 1], idx from 0 to 100
K: category, idx from 0 to ...
A: area range, (all, small, medium, large), idx from 0 to 3
M: max dets, (1, 10, 100), idx from 0 to 2
'''
pr_array1 = precisions[0, :, 0, 0, 2]
pr_array2 = precisions[1, :, 0, 0, 2]
pr_array3 = precisions[2, :, 0, 0, 2]
pr_array4 = precisions[3, :, 0, 0, 2]
pr_array5 = precisions[4, :, 0, 0, 2]
pr_array6 = precisions[5, :, 0, 0, 2]
pr_array7 = precisions[6, :, 0, 0, 2]
pr_array8 = precisions[7, :, 0, 0, 2]
pr_array9 = precisions[8, :, 0, 0, 2]
pr_array10 = precisions[9, :, 0, 0, 2]
x = np.arange(0.0, 1.01, 0.01)
# plot PR curve
plt.plot(x, pr_array1, label="IoU=0.5")
# plt.plot(x, pr_array2, label="iou=0.55")
plt.plot(x, pr_array3, label="IoU=0.6")
#plt.plot(x, pr_array4, label="iou=0.65")
plt.plot(x, pr_array5, label="IoU=0.7")
#plt.plot(x, pr_array6, label="iou=0.75")
plt.plot(x, pr_array7, label="IoU=0.8")
#plt.plot(x, pr_array8, label="iou=0.85")
plt.plot(x, pr_array9, label="IoU=0.9")
#plt.plot(x, pr_array10, label="iou=0.95")
plt.xlabel("Recall")
plt.ylabel("Precison")
plt.xlim(0, 1.0)
plt.ylim(0, 1.01)
plt.grid(True)
plt.legend()
plt.show()
if __name__ == "__main__":
plot_pr_curve(config_file=CONFIG_FILE, result_file=RESULT_FILE, metric="bbox") | [
"mmdet.datasets.build_dataset",
"matplotlib.pyplot.grid",
"pycocotools.cocoeval.COCOeval",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"pycocotools.coco.COCO",
"mmcv.Config.fromfile",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.x... | [((781, 809), 'mmcv.Config.fromfile', 'Config.fromfile', (['config_file'], {}), '(config_file)\n', (796, 809), False, 'from mmcv import Config\n'), ((1074, 1102), 'mmdet.datasets.build_dataset', 'build_dataset', (['cfg.data.test'], {}), '(cfg.data.test)\n', (1087, 1102), False, 'from mmdet.datasets import build_dataset\n'), ((1158, 1180), 'mmcv.load', 'mmcv.load', (['result_file'], {}), '(result_file)\n', (1167, 1180), False, 'import mmcv\n'), ((1343, 1387), 'pycocotools.coco.COCO', 'COCO', ([], {'annotation_file': 'cfg.data.test.ann_file'}), '(annotation_file=cfg.data.test.ann_file)\n', (1347, 1387), False, 'from pycocotools.coco import COCO\n'), ((1510, 1544), 'pycocotools.cocoeval.COCOeval', 'COCOeval', (['coco_gt', 'coco_dt', 'metric'], {}), '(coco_gt, coco_dt, metric)\n', (1518, 1544), False, 'from pycocotools.cocoeval import COCOeval\n'), ((2433, 2459), 'numpy.arange', 'np.arange', (['(0.0)', '(1.01)', '(0.01)'], {}), '(0.0, 1.01, 0.01)\n', (2442, 2459), True, 'import numpy as np\n'), ((2484, 2523), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'pr_array1'], {'label': '"""IoU=0.5"""'}), "(x, pr_array1, label='IoU=0.5')\n", (2492, 2523), True, 'import matplotlib.pyplot as plt\n'), ((2575, 2614), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'pr_array3'], {'label': '"""IoU=0.6"""'}), "(x, pr_array3, label='IoU=0.6')\n", (2583, 2614), True, 'import matplotlib.pyplot as plt\n'), ((2665, 2704), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'pr_array5'], {'label': '"""IoU=0.7"""'}), "(x, pr_array5, label='IoU=0.7')\n", (2673, 2704), True, 'import matplotlib.pyplot as plt\n'), ((2755, 2794), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'pr_array7'], {'label': '"""IoU=0.8"""'}), "(x, pr_array7, label='IoU=0.8')\n", (2763, 2794), True, 'import matplotlib.pyplot as plt\n'), ((2845, 2884), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'pr_array9'], {'label': '"""IoU=0.9"""'}), "(x, pr_array9, label='IoU=0.9')\n", (2853, 2884), True, 'import matplotlib.pyplot as plt\n'), ((2937, 2957), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (2947, 2957), True, 'import matplotlib.pyplot as plt\n'), ((2962, 2984), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precison"""'], {}), "('Precison')\n", (2972, 2984), True, 'import matplotlib.pyplot as plt\n'), ((2989, 3005), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1.0)'], {}), '(0, 1.0)\n', (2997, 3005), True, 'import matplotlib.pyplot as plt\n'), ((3010, 3027), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1.01)'], {}), '(0, 1.01)\n', (3018, 3027), True, 'import matplotlib.pyplot as plt\n'), ((3032, 3046), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3040, 3046), True, 'import matplotlib.pyplot as plt\n'), ((3051, 3063), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3061, 3063), True, 'import matplotlib.pyplot as plt\n'), ((3068, 3078), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3076, 3078), True, 'import matplotlib.pyplot as plt\n')] |
import json
import os
from enum import Enum
from typing import List
import numpy as np
import pandas
from keras.models import Sequential
from keras.layers import LSTM, Dense, RepeatVector, TimeDistributed, Activation
from matplotlib import gridspec as grid
from matplotlib import pylab as plt
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
class ColumnType(Enum):
TIME = 0
FEATURE = 1
TARGET = 2
class CsvColumn:
def __init__(self, col_index, name, type, normalize):
self.col_index = col_index # type: int
self.name = name # type: str
self.type = ColumnType[type.upper()] # type: ColumnType
self.normalize = normalize # type: bool
def __repr__(self):
s = "{:>2}) {:<25} {:>7} {:^6} normalize".format(
self.col_index, self.name, self.type.name,
"do" if self.normalize else "do not")
return s
CsvColumnList = List[CsvColumn]
def load_meta_data(json_path: str) -> CsvColumnList:
with open(json_path, 'r') as json_file:
json_dict = json.load(json_file)
column_list = [] # type: CsvColumnList
for column_dict in json_dict["columns"]:
column = CsvColumn(**column_dict)
column_list.append(column)
return sorted(column_list, key=lambda x: x.col_index)
class SimpleLSTM:
def __init__(self):
# Data
self.use_csv_file = True
self.dataframe = None # type: np.ndarray
self.timestamp = None # type: np.ndarray
self.feature_indices = None # type: list
self.feature_names = None # type: list
self.target_indices = None # type: list
self.target_names = None # type: list
# Preprocessing
self.feature_transformer = None # type: MinMaxScaler
self.target_transformer = None # type: MinMaxScaler
self.smoothing_window = 1
# Model
self.model = None # type: Sequential
self.units = [16, 8]
self.look_back = 200
self.look_front = 10
# Training
self.num_epochs = 50
self.batch_size = 32
self.train_x = None # type: np.ndarray
self.train_y = None # type: np.ndarray
self.test_x = None # type: np.ndarray
self.test_y = None # type: np.ndarray
def run(self):
if self.use_csv_file:
self.load_csv_data()
else:
self.create_data()
print("Raw data shapes:"
"\nFeatures: {} (observations, num features)"
"\nTargets: {} (observations, num targets".format(
self.features.shape, self.targets.shape))
# Preprocess the data by scaling, smoothing and shifting.
self.preprocess_data()
self.plot_dataframe(features=self.features, targets=self.targets,
feature_names=self.feature_names,
target_names=self.target_names)
X, Y = self.create_supervised_data(features=self.features, targets=self.targets,
look_back=self.look_back,
look_front=self.look_front)
print("Supervised data shapes:"
"\nX: {} (batch, window size, num features),"
"\nY: {} (batch, prediction window size, num features)".format(
X.shape, Y.shape))
# Split the data into train test.
self.train_x, self.train_y, self.test_x, self.test_y = self.train_test_split(
features=X, targets=Y, train_fraction=0.7)
print("Train data:"
"\n\tFeatures: {}"
"\n\tTargets: {}".format(self.train_x.shape, self.train_y.shape))
print("Test data:"
"\n\tFeatures: {}"
"\n\tTargets: {}".format(self.test_x.shape, self.test_y.shape))
# Create a learning model and train in on the train data.
self.model = Sequential()
self.model.add(LSTM(units=self.units[0],
input_shape=(self.look_back, self.features.shape[1]),
return_sequences=False))
self.model.add(RepeatVector(self.look_front))
self.model.add(LSTM(units=self.units[1], return_sequences=True))
self.model.add(TimeDistributed(Dense(self.targets.shape[1])))
self.model.add(Activation('linear'))
self.model.compile(loss='mae', optimizer='adam')
print(self.model.summary())
history = self.model.fit(self.train_x, self.train_y, epochs=self.num_epochs,
batch_size=self.batch_size,
validation_data=(self.test_x, self.test_y), verbose=1,
shuffle=False)
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show()
# invert scaling for prediction
yhat = self.model.predict(self.test_x)
print("Prediction shape: {}".format(yhat.shape))
yhat_sequential = \
self.supervised_target_to_sequential(yhat, look_front=self.look_front)
print("Sequential shape: {}".format(yhat_sequential.shape))
inv_yhat = self.transform_target_back(transformed_targets=yhat_sequential)
print("Untransformed shape: {}".format(inv_yhat.shape))
inv_yhat = inv_yhat[:, 0]
# invert scaling for test targets
test_y_sequential = \
self.supervised_target_to_sequential(self.test_y, look_front=self.look_front)
print("Y_test sequential shape: {}".format(test_y_sequential.shape))
inv_y = self.transform_target_back(transformed_targets=test_y_sequential)
print("Untransformed shape: {}".format(inv_y.shape))
inv_y = inv_y[:, 0]
# calculate RMSE
rmse = np.sqrt(mean_squared_error(inv_y, inv_yhat))
print('Test RMSE: %.3f' % rmse)
plt.plot(inv_yhat, label="Prediction", linewidth=2)
plt.plot(inv_y, label="ground truth", linewidth=2)
start = 0
for y in yhat:
if start % 20 == 0:
y = self.transform_target_back(y)
plt.plot(start + np.arange(self.look_front), y)
start += 1
plt.legend()
plt.show()
def load_csv_data(self):
cur_dir = os.path.dirname(os.path.realpath(__file__))
dataset_dir = os.path.join(cur_dir, "dataset")
meta_data_file_name = os.path.join(dataset_dir, "data_clean.json")
meta_data = load_meta_data(json_path=meta_data_file_name)
dataset_file_name = os.path.join(dataset_dir, "data_clean.csv")
self.dataframe = pandas.read_csv(
dataset_file_name, delimiter=",", index_col=False,
usecols=[meta.col_index for meta in meta_data])
# Drop all columns which have too many nans.
nan_fraction_accepted = 0.1
num_nans_accepted = int(nan_fraction_accepted * self.dataframe.shape[0])
drop_col_indices = []
for col_index, col in enumerate(self.dataframe.columns):
num_nans = np.count_nonzero(self.dataframe[col].isnull())
if num_nans > num_nans_accepted:
drop_col_indices.append(col_index)
print("Ignoring feature {} as there are too many 'nan's".format(col))
meta_data = [meta for meta in meta_data if meta.col_index not in drop_col_indices]
self.dataframe.drop(self.dataframe.columns[drop_col_indices],
axis=1, inplace=True)
# Drop all rows which still contain a nan.
self.dataframe.dropna(inplace=True)
# Reorder columns inside the dataframe.
time_name = [meta.name for meta in meta_data
if meta.type == ColumnType.TIME][0]
# Rename and reorder dataframe columns
self.feature_names = [meta.name for meta in meta_data
if meta.type == ColumnType.FEATURE]
self.target_names = [meta.name for meta in meta_data
if meta.type == ColumnType.TARGET]
self.dataframe = self.dataframe[
[time_name, *self.feature_names, *self.target_names]]
self.timestamp = self.dataframe.values[:, 0].copy()
self.dataframe.drop(labels=self.dataframe.columns[0], inplace=True, axis=1)
self.feature_indices = [i for i in range(len(self.feature_names))]
self.target_indices = [i + self.feature_indices[-1] + 1
for i in range(len(self.target_names))]
# Preprocess features
self.dataframe = self.dataframe.values.astype(np.float32)
def create_data(self):
x_linspace = np.linspace(0, 150 * np.pi, 2500)
num_features = 3
num_targets = 1
features = []
functions = [np.sin, np.cos]
for i in range(num_features):
feature = np.random.rand() + np.random.rand() * np.random.choice(functions)(
np.random.rand() * x_linspace + np.random.rand()
)
features.append(feature)
features = np.array(features).T
targets = []
for i in range(num_targets):
target = np.zeros(features.shape[0])
for feature in features.T:
target += np.random.rand() * feature
targets.append(target)
targets = np.array(targets).T
self.dataframe = np.concatenate((features, targets), axis=1)
self.feature_names = ["feature {}".format(i + 1) for i in range(
self.features.shape[1])]
self.target_names = ["target {}".format(i + 1) for i in range(
self.targets.shape[1])]
self.timestamp = np.arange(self.dataframe.shape[0])
def preprocess_data(self):
def gaussian_kernel(size: int, width: tuple = (-0.5, 0.5)) -> np.ndarray:
k_lin = np.linspace(width[0], width[1], size)
k = np.exp(-k_lin ** 2)
k /= np.sum(k)
return k
if self.smoothing_window > 1:
kernel = gaussian_kernel(self.smoothing_window)
for feature_id in range(self.features.shape[1]):
self.features[:, feature_id] = np.convolve(self.features[:, feature_id],
kernel, "same")
for target_id in range(self.targets.shape[1]):
self.targets[:, target_id] = np.convolve(self.targets[:, target_id],
kernel, "same")
self.prepare_feature_transformer()
self.prepare_target_transformer()
self.features = self.transform_features(self.features)
self.targets = self.transform_targets(self.targets)
@staticmethod
def create_supervised_data(features: np.ndarray, targets: np.ndarray,
look_back: int, look_front: int) -> tuple:
"""
Creates a supervised representation of the data, i.e. a three dimensional array with the following dimensions:
X.shape = (num_supervised_samples, look_back, num_features)
Y.shape = (num_supervised_samples, look_front, num_targets)
:param features: Numpy array (2D) of raw features (each row corresponds to one
single time measurement)
:param targets: Numpy array (2D) of raw targets (each row corresponds to one
single time measurement)
:param look_back: Number of steps to look back (memory) in the features set.
:param look_front: Number of steps to look front (predict) in the target set.
:return: A redundant supervised representation of the input data.
"""
X = [] # type: list
Y = [] # type: list
# Need 2-dimensional data as input.
assert (len(features.shape) == len(targets.shape) == 2)
num_samples = features.shape[0]
assert (num_samples == targets.shape[0])
# Move a window of size look_back over the features predicting a successive
# window of size look_front in the targets.
for i in range(num_samples - look_back - look_front):
X.append(features[i:i + look_back, :])
Y.append(targets[i + look_back:i + look_back + look_front, :])
# Vectorize the data.
X = np.array(X)
Y = np.array(Y)
# Handle the case where either the features or the targets are one dimensional
# (add a new dimension as the slices created in the sliding window are only one
# dimensional if there is a single dimension in the feature/target).
if len(X.shape) == 2:
X = X[:, :, np.newaxis]
if len(Y.shape) == 2:
Y = Y[:, :, np.newaxis]
return X, Y
@staticmethod
def supervised_target_to_sequential(supervised_data: np.ndarray,
look_front: int) -> np.ndarray:
sequential_target = np.zeros(shape=(supervised_data.shape[0] + look_front - 1))
for data_index, data in enumerate(supervised_data):
sequential_target[data_index:data_index + look_front] += np.squeeze(data)
# Adjust the weights in the head and tail of the predicted data (since there
# are a lower number of predictions there due to the overlapping windows).
for i in range(look_front):
sequential_target[i] = sequential_target[i] * (look_front / (i + 1.0))
sequential_target[-i - 1] = sequential_target[-i - 1] * (
look_front / (i + 1.0))
sequential_target = sequential_target / look_front
return sequential_target.reshape(-1, 1)
@staticmethod
def plot_dataframe(features: np.ndarray, targets: np.ndarray,
feature_names: list, target_names: list):
num_features = features.shape[1]
num_targets = targets.shape[1]
num_subplots = num_features + num_targets
num_cols = max(num_subplots // 5, 2)
num_rows = int(num_subplots // num_cols) + 1
plot_height = 2.5
plot_width = 4
fig_size = (plot_width * num_cols, plot_height * num_rows)
fig = plt.figure(figsize=fig_size)
gs = grid.GridSpec(num_rows, num_cols)
# Remove the ticks to allow more space in the plot.
ticks_params = {'labelbottom': 'off', 'labelleft': 'off'}
# Plot features
for ax_index, (feature, feature_name) in enumerate(
zip(features.T, feature_names)):
print("plotting - {}/{} - {}({})".format(ax_index + 1, len(feature_names),
feature_name, feature.shape))
ax = fig.add_subplot(gs[ax_index]) # type: plt.Axes
ax.tick_params(**ticks_params)
# Feature
ax.plot(feature)
ax.set_title(feature_name)
print("Plotting targets")
# Plot targets
for ax_index, (target, target_name) in enumerate(zip(targets.T, target_names)):
print("plotting - {}/{} - {}({})".format(ax_index + 1, len(feature_names),
target_name, target.shape))
ax = fig.add_subplot(gs[ax_index + num_features]) # type: plt.Axes
ax.tick_params(**ticks_params)
ax.plot(target, color="red")
ax.set_title(target_name)
fig.suptitle("Input dataset (blue: feature, red: target)")
gs.tight_layout(fig, rect=[0.01, 0, 0.99, 0.95])
plt.show()
def prepare_feature_transformer(self):
self.feature_transformer = MinMaxScaler()
self.feature_transformer.fit(self.features)
def transform_features(self, features: np.ndarray) -> np.ndarray:
return self.feature_transformer.transform(features)
def transform_features_back(self, transformed_features: np.ndarray) -> np.ndarray:
return self.feature_transformer.inverse_transform(transformed_features)
def prepare_target_transformer(self):
self.target_transformer = MinMaxScaler()
self.target_transformer.fit(self.targets)
def transform_targets(self, targets: np.ndarray) -> np.ndarray:
return self.target_transformer.transform(targets)
def transform_target_back(self, transformed_targets: np.ndarray) -> np.ndarray:
return self.target_transformer.inverse_transform(transformed_targets)
@property
def features(self) -> np.ndarray:
return np.atleast_2d(self.dataframe[:, self.feature_indices])
@features.setter
def features(self, f: np.ndarray):
assert (f.shape == self.features.shape)
self.dataframe[:, self.feature_indices] = f
@property
def targets(self) -> np.ndarray:
t = np.atleast_2d(self.dataframe[:, self.target_indices])
if t.shape[0] == 1:
t = t.T
return t
@targets.setter
def targets(self, t: np.ndarray):
t = np.atleast_2d(t)
assert (t.shape == self.targets.shape)
self.dataframe[:, self.target_indices] = t
@staticmethod
def train_test_split(features: np.ndarray, targets: np.ndarray,
train_fraction: float) -> tuple:
assert (features.shape[0] == targets.shape[0])
num_samples = features.shape[0]
num_train_samples = int(np.round(train_fraction * num_samples))
# Keep the first num_train_samples as training samples.
train_x = features[:num_train_samples]
train_y = targets[:num_train_samples]
# The remaining samples for testing.
test_x = features[num_train_samples:]
test_y = targets[num_train_samples:]
return train_x, train_y, test_x, test_y
| [
"numpy.convolve",
"numpy.random.rand",
"pandas.read_csv",
"numpy.array",
"matplotlib.pylab.show",
"keras.layers.Activation",
"keras.layers.Dense",
"numpy.arange",
"numpy.atleast_2d",
"matplotlib.pylab.figure",
"matplotlib.pylab.legend",
"numpy.exp",
"keras.layers.LSTM",
"numpy.linspace",
... | [((1093, 1113), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (1102, 1113), False, 'import json\n'), ((3962, 3974), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3972, 3974), False, 'from keras.models import Sequential\n'), ((4789, 4837), 'matplotlib.pylab.plot', 'plt.plot', (["history.history['loss']"], {'label': '"""train"""'}), "(history.history['loss'], label='train')\n", (4797, 4837), True, 'from matplotlib import pylab as plt\n'), ((4846, 4897), 'matplotlib.pylab.plot', 'plt.plot', (["history.history['val_loss']"], {'label': '"""test"""'}), "(history.history['val_loss'], label='test')\n", (4854, 4897), True, 'from matplotlib import pylab as plt\n'), ((4906, 4918), 'matplotlib.pylab.legend', 'plt.legend', ([], {}), '()\n', (4916, 4918), True, 'from matplotlib import pylab as plt\n'), ((4927, 4937), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (4935, 4937), True, 'from matplotlib import pylab as plt\n'), ((5989, 6040), 'matplotlib.pylab.plot', 'plt.plot', (['inv_yhat'], {'label': '"""Prediction"""', 'linewidth': '(2)'}), "(inv_yhat, label='Prediction', linewidth=2)\n", (5997, 6040), True, 'from matplotlib import pylab as plt\n'), ((6049, 6099), 'matplotlib.pylab.plot', 'plt.plot', (['inv_y'], {'label': '"""ground truth"""', 'linewidth': '(2)'}), "(inv_y, label='ground truth', linewidth=2)\n", (6057, 6099), True, 'from matplotlib import pylab as plt\n'), ((6319, 6331), 'matplotlib.pylab.legend', 'plt.legend', ([], {}), '()\n', (6329, 6331), True, 'from matplotlib import pylab as plt\n'), ((6340, 6350), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (6348, 6350), True, 'from matplotlib import pylab as plt\n'), ((6466, 6498), 'os.path.join', 'os.path.join', (['cur_dir', '"""dataset"""'], {}), "(cur_dir, 'dataset')\n", (6478, 6498), False, 'import os\n'), ((6529, 6573), 'os.path.join', 'os.path.join', (['dataset_dir', '"""data_clean.json"""'], {}), "(dataset_dir, 'data_clean.json')\n", (6541, 6573), False, 'import os\n'), ((6669, 6712), 'os.path.join', 'os.path.join', (['dataset_dir', '"""data_clean.csv"""'], {}), "(dataset_dir, 'data_clean.csv')\n", (6681, 6712), False, 'import os\n'), ((6738, 6857), 'pandas.read_csv', 'pandas.read_csv', (['dataset_file_name'], {'delimiter': '""","""', 'index_col': '(False)', 'usecols': '[meta.col_index for meta in meta_data]'}), "(dataset_file_name, delimiter=',', index_col=False, usecols=\n [meta.col_index for meta in meta_data])\n", (6753, 6857), False, 'import pandas\n'), ((8775, 8808), 'numpy.linspace', 'np.linspace', (['(0)', '(150 * np.pi)', '(2500)'], {}), '(0, 150 * np.pi, 2500)\n', (8786, 8808), True, 'import numpy as np\n'), ((9501, 9544), 'numpy.concatenate', 'np.concatenate', (['(features, targets)'], {'axis': '(1)'}), '((features, targets), axis=1)\n', (9515, 9544), True, 'import numpy as np\n'), ((9788, 9822), 'numpy.arange', 'np.arange', (['self.dataframe.shape[0]'], {}), '(self.dataframe.shape[0])\n', (9797, 9822), True, 'import numpy as np\n'), ((12385, 12396), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (12393, 12396), True, 'import numpy as np\n'), ((12409, 12420), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (12417, 12420), True, 'import numpy as np\n'), ((13016, 13073), 'numpy.zeros', 'np.zeros', ([], {'shape': '(supervised_data.shape[0] + look_front - 1)'}), '(shape=supervised_data.shape[0] + look_front - 1)\n', (13024, 13073), True, 'import numpy as np\n'), ((14239, 14267), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': 'fig_size'}), '(figsize=fig_size)\n', (14249, 14267), True, 'from matplotlib import pylab as plt\n'), ((14281, 14314), 'matplotlib.gridspec.GridSpec', 'grid.GridSpec', (['num_rows', 'num_cols'], {}), '(num_rows, num_cols)\n', (14294, 14314), True, 'from matplotlib import gridspec as grid\n'), ((15594, 15604), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (15602, 15604), True, 'from matplotlib import pylab as plt\n'), ((15684, 15698), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (15696, 15698), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((16127, 16141), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (16139, 16141), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((16550, 16604), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dataframe[:, self.feature_indices]'], {}), '(self.dataframe[:, self.feature_indices])\n', (16563, 16604), True, 'import numpy as np\n'), ((16830, 16883), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dataframe[:, self.target_indices]'], {}), '(self.dataframe[:, self.target_indices])\n', (16843, 16883), True, 'import numpy as np\n'), ((17020, 17036), 'numpy.atleast_2d', 'np.atleast_2d', (['t'], {}), '(t)\n', (17033, 17036), True, 'import numpy as np\n'), ((3998, 4106), 'keras.layers.LSTM', 'LSTM', ([], {'units': 'self.units[0]', 'input_shape': '(self.look_back, self.features.shape[1])', 'return_sequences': '(False)'}), '(units=self.units[0], input_shape=(self.look_back, self.features.shape[\n 1]), return_sequences=False)\n', (4002, 4106), False, 'from keras.layers import LSTM, Dense, RepeatVector, TimeDistributed, Activation\n'), ((4183, 4212), 'keras.layers.RepeatVector', 'RepeatVector', (['self.look_front'], {}), '(self.look_front)\n', (4195, 4212), False, 'from keras.layers import LSTM, Dense, RepeatVector, TimeDistributed, Activation\n'), ((4237, 4285), 'keras.layers.LSTM', 'LSTM', ([], {'units': 'self.units[1]', 'return_sequences': '(True)'}), '(units=self.units[1], return_sequences=True)\n', (4241, 4285), False, 'from keras.layers import LSTM, Dense, RepeatVector, TimeDistributed, Activation\n'), ((4380, 4400), 'keras.layers.Activation', 'Activation', (['"""linear"""'], {}), "('linear')\n", (4390, 4400), False, 'from keras.layers import LSTM, Dense, RepeatVector, TimeDistributed, Activation\n'), ((5903, 5938), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['inv_y', 'inv_yhat'], {}), '(inv_y, inv_yhat)\n', (5921, 5938), False, 'from sklearn.metrics import mean_squared_error\n'), ((6416, 6442), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (6432, 6442), False, 'import os\n'), ((9181, 9199), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (9189, 9199), True, 'import numpy as np\n'), ((9282, 9309), 'numpy.zeros', 'np.zeros', (['features.shape[0]'], {}), '(features.shape[0])\n', (9290, 9309), True, 'import numpy as np\n'), ((9455, 9472), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (9463, 9472), True, 'import numpy as np\n'), ((9957, 9994), 'numpy.linspace', 'np.linspace', (['width[0]', 'width[1]', 'size'], {}), '(width[0], width[1], size)\n', (9968, 9994), True, 'import numpy as np\n'), ((10011, 10030), 'numpy.exp', 'np.exp', (['(-k_lin ** 2)'], {}), '(-k_lin ** 2)\n', (10017, 10030), True, 'import numpy as np\n'), ((10048, 10057), 'numpy.sum', 'np.sum', (['k'], {}), '(k)\n', (10054, 10057), True, 'import numpy as np\n'), ((13205, 13221), 'numpy.squeeze', 'np.squeeze', (['data'], {}), '(data)\n', (13215, 13221), True, 'import numpy as np\n'), ((17409, 17447), 'numpy.round', 'np.round', (['(train_fraction * num_samples)'], {}), '(train_fraction * num_samples)\n', (17417, 17447), True, 'import numpy as np\n'), ((4326, 4354), 'keras.layers.Dense', 'Dense', (['self.targets.shape[1]'], {}), '(self.targets.shape[1])\n', (4331, 4354), False, 'from keras.layers import LSTM, Dense, RepeatVector, TimeDistributed, Activation\n'), ((8979, 8995), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (8993, 8995), True, 'import numpy as np\n'), ((10286, 10343), 'numpy.convolve', 'np.convolve', (['self.features[:, feature_id]', 'kernel', '"""same"""'], {}), "(self.features[:, feature_id], kernel, 'same')\n", (10297, 10343), True, 'import numpy as np\n'), ((10507, 10562), 'numpy.convolve', 'np.convolve', (['self.targets[:, target_id]', 'kernel', '"""same"""'], {}), "(self.targets[:, target_id], kernel, 'same')\n", (10518, 10562), True, 'import numpy as np\n'), ((8998, 9014), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (9012, 9014), True, 'import numpy as np\n'), ((9375, 9391), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (9389, 9391), True, 'import numpy as np\n'), ((6256, 6282), 'numpy.arange', 'np.arange', (['self.look_front'], {}), '(self.look_front)\n', (6265, 6282), True, 'import numpy as np\n'), ((9017, 9044), 'numpy.random.choice', 'np.random.choice', (['functions'], {}), '(functions)\n', (9033, 9044), True, 'import numpy as np\n'), ((9094, 9110), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (9108, 9110), True, 'import numpy as np\n'), ((9062, 9078), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (9076, 9078), True, 'import numpy as np\n')] |
import pylab as plt
import matplotlib
import numpy as np
import pylab as plt
import matplotlib
from itertools import product
import numpy as np
import pandas as pd
import os
from sklearn.metrics.pairwise import pairwise_distances
from matplotlib.ticker import ScalarFormatter, FuncFormatter
matplotlib.style.use('bmh')
markers = [("-", "o"), ("-", "p"), ("-", "D"), ("-", "^"), ("-", "s"),
("-", "8"), ("-", "o"), ("-", "o"), ("-", "o"), ("-", "o"),
("-", "o"), ("-", "o")]
colors = ['#741111', "#000000", '#3a49ba','#7634c9',
"#4C9950", "#CC29A3", '#ba3a3a', "#0f7265",
"#7A7841", "#00C5CD", "#6e26d9"]
bright_colors = ["#00C5CD"]
def myticks(x,pos):
if x == 0: return "$0$"
exponent = int(np.log10(x))
coeff = x/10**exponent
#return r"{:0.1f}e{:0d}".format(coeff,exponent)
return r"${:0.1f} \times 10^{{ {:2d} }}$".format(coeff,exponent)
def myticks_new(x,pos, exponent=1e5):
if x == 0: return "$0$"
exponent = int(np.log10(x))
coeff = x/10**exponent
return r"${:0s}$".format(coeff/exponent)
#return r"${:0.1f} \times 10^{{ {:2d} }}$".format(coeff,exponent)
class FixedOrderFormatter(ScalarFormatter):
"""Formats axis ticks using scientific notation with a constant order of
magnitude"""
def __init__(self, order_of_mag=0, useOffset=True, useMathText=False):
self._order_of_mag = order_of_mag
ScalarFormatter.__init__(self, useOffset=useOffset,
useMathText=useMathText)
def _set_orderOfMagnitude(self, range):
"""Over-riding this to avoid having orderOfMagnitude reset elsewhere"""
self.orderOfMagnitude = self._order_of_mag
class PrettyPlot:
def __init__(self, title=None, ylabel=None, xlabel=None,
fontsize=14, line_width=2.5, markersize=12,
ratio=1.0,axFontSize=18,
figsize=(13, 10), legend_type="line",
yscale="log", subplots=(1,1),
shareRowLabel=True, axTickSize=14):
self.axTickSize = int(axTickSize * ratio)
self.fontsize = int(fontsize * ratio)
self.shareRowLabel = shareRowLabel
self.lim_set = False
self.ylim = None
self.legend_type = legend_type
self.yscale = yscale
self.line_width = int(line_width * ratio)
self.markersize = int(markersize * ratio)
self.axFontSize = int(axFontSize * ratio)
self.ratio = ratio
if self.yscale=="log":
plt.yscale("log")
#ax.set_yscale('logit')
self.labels = []
self.y_list = []
self.x_list = []
self.converged = []
fig = plt.figure(figsize=figsize)
if title is not None:
fig.suptitle(title, fontsize=self.axFontSize)
self.fig = fig
subplots = list(subplots)
self.nrows = subplots[0]
self.ncols = subplots[1]
self.pIndex = 1
self.axList = []
def subplot():
pass
def add_yxList(self, y_vals, x_vals, label, converged = False):
if isinstance(y_vals, list):
y_vals = np.array(y_vals)
if isinstance(x_vals, list):
x_vals = np.array(x_vals)
self.y_list += [y_vals]
self.x_list += [x_vals]
self.labels += [label]
self.converged += [converged]
def show(self):
plt.show()
def save(self, path, iformat="png"):
create_dirs(path)
fname = path + ".%s" % iformat
self.fig.savefig(fname, bbox_inches='tight')
print(("Figure saved in %s" % (fname)))
def plot_DataFrame(self, results):
n_points, n_labels = results.shape
x_vals = np.arange(n_points)
labels = results.columns
y_array = np.array(results)
y_list = []
x_list = []
for j in range(n_labels):
x_list += [x_vals]
y_list += [y_array[:, j]]
self.plot(y_list, x_list, labels)
def set_lim(self, ylim, xlim):
self.lim_set = True
self.ylim = ylim
self.ax.set_ylim(ylim)
self.ax.set_xlim(xlim)
def set_tickSize(self, labelsize=8):
[tick.label.set_fontsize(labelsize) for tick in self.ax.yaxis.get_major_ticks()]
[tick.label.set_fontsize(labelsize) for tick in self.ax.xaxis.get_major_ticks()]
def set_title(self, title):
self.fig.suptitle(title, fontsize=self.axFontSize, y=1.08)
def plot(self, y_list=None, x_list=None, labels=None, ax=None,
ylabel="", xlabel="", yscale=False):
fig = self.fig
if y_list == None and x_list == None:
y_list = self.y_list
x_list = self.x_list
if yscale == "log":
# Makse sure everything is non-negative
# for yi in y_list:
# assert np.all(yi >= 0)
# Set zeros to eps
for i in range(len(y_list)):
y_list[i] = np.maximum(y_list[i], np.finfo(float).eps)
# Set zeros to eps
for i in range(len(y_list)):
opt_ind = np.where(y_list[i] == np.finfo(float).eps)[0]
if opt_ind.size > 0:
opt_ind = opt_ind[0]
y_list[i] = y_list[i][:opt_ind+1]
x_list[i] = x_list[i][:opt_ind+1]
n_labels = len(y_list)
if ax is None:
ax = self.fig.add_subplot(self.nrows,
self.ncols, self.pIndex)
ax.set_facecolor('white')
ax.set_yscale("log", nonposy='clip')
if labels is None and self.labels is None:
labels = list(map(str, np.arange(n_labels)))
elif labels is None:
labels = self.labels
ref_points = []
for i in range(len(self.converged)):
if self.converged[i] is not None:
ref_points += [[self.converged[i]["X"],
self.converged[i]["Y"]]]
label_positions, label_indices = get_labelPositions(y_list,
x_list,
self.ylim,
labels=labels,
ref_points=np.array(ref_points))
ls_markers = markers
if not self.lim_set:
y_min, y_max = get_min_max(y_list)
x_min, x_max = get_min_max(x_list)
#y_min = max(y_min, 1e-8)
ax.set_ylim([y_min, y_max])
ax.set_xlim([x_min, x_max])
for i in range(n_labels):
color = colors[i]
ls, marker = ls_markers[i]
y_vals = y_list[i]
x_vals = x_list[i]
n_points = len(y_vals)
label = labels[i]
markerFreq = n_points / (int(np.log(n_points)) + 1)
## SCATTER PLOT OPTIMAL
# ind_opt = np.where(y_vals == np.finfo(float).eps)[0]
# if ind_opt.size > 0:
# x_opt = x_vals[np.where(y_vals == np.finfo(float).eps)[0][0]]
# y_opt = np.finfo(float).eps
if self.converged[i] is not None:
ax.scatter(self.converged[i]["X"],
self.converged[i]["Y"], s=300, marker="*", color=color, clip_on=False, zorder=100)
##
line, = ax.plot(x_vals, y_vals, markevery=int(markerFreq),
markersize=int(self.markersize), color=color,
lw=self.line_width, alpha=1.0,
label=label, ls=ls, marker=marker)
if self.legend_type == "line":
x_point, y_point = label_positions[i]
angle = get_label_angle(x_vals, y_vals, label_indices[i], ax, color='0.5', size=12)
box = dict(facecolor="white",
edgecolor=color, linestyle=ls,
#hatch=marker,
linewidth=int(2*self.ratio), boxstyle="round")
ax.text(x_point , y_point, label, va='center',ha='center',
rotation=angle,
color=color,
bbox=box,
fontsize=self.fontsize)
else:
plt.legend(loc="best")
if self.shareRowLabel and (((self.pIndex-1) % (self.ncols)) == 0):
ax.set_ylabel(ylabel, fontsize=self.axFontSize)
if not self.shareRowLabel:
ax.set_ylabel(ylabel, fontsize=self.axFontSize)
ax.set_xlabel(xlabel, fontsize=self.axFontSize)
ax.tick_params(labelsize=self.axTickSize)
ax.tick_params(axis='y', labelsize=int(self.axTickSize*1.5))
self.y_list = []
self.x_list = []
self.labels = []
self.converged = []
self.pIndex += 1
self.axList += [ax]
ax.minorticks_off()
vals = np.logspace(np.log10(y_min),np.log10(y_max), 5)
ax.set_yticks(vals)
ax.yaxis.set_major_formatter(FuncFormatter(myticks))
return fig, ax
def plot_old(self, y_list=None, x_list=None, labels=None, ax=None,
ylabel="", xlabel="", yscale=False):
if y_list == None and x_list == None:
y_list = self.y_list
x_list = self.x_list
n_labels = len(y_list)
if ax is None:
ax = self.fig.add_subplot(self.nrows,
self.ncols, self.pIndex)
ax.set_facecolor('white')
if yscale == "log":
#pFunc = ax.semilogy
pFunc = ax.plot
#plt.yscale('log')
else:
pFunc = ax.plot
ax.set_yscale("log", nonposy='clip')
if labels is None and self.labels is None:
labels = list(map(str, np.arange(n_labels)))
elif labels is None:
labels = self.labels
fig = self.fig
label_positions, label_indices = get_labelPositions(y_list,
x_list,
self.ylim,
scale=yscale)
ls_markers = markers
if not self.lim_set:
y_min, y_max = get_min_max(y_list)
x_min, x_max = get_min_max(x_list)
ax.set_ylim([y_min, y_max])
ax.set_xlim([x_min, x_max])
for i in range(n_labels):
color = colors[i]
ls, marker = ls_markers[i]
y_vals = y_list[i]
x_vals = x_list[i]
n_points = len(y_vals)
label = labels[i]
# if i > 0:
# percentage = get_overlapPercentage(i, y_list)
# if percentage > 0.6:
# ls = "--"
# color = bright_colors[0]
markerFreq = n_points / (int(np.log(n_points)) + 1)
#
#ax.spines['left']._adjust_location()
# if self.yscale == "log":
# line, = ax.semilogy(x_vals, y_vals, markevery=markerFreq,
# markersize=12, color=color, lw=lw, alpha=0.9,
# label=label, ls=ls, marker=marker)
# else:
line, = pFunc(x_vals, y_vals, markevery=markerFreq,
markersize=self.markersize, color=color,
lw=self.line_width, alpha=0.9,
label=label, ls=ls, marker=marker)
if self.legend_type == "line":
x_point, y_point = label_positions[i]
angle = get_label_angle(x_vals, y_vals, label_indices[i], ax, color='0.5', size=12)
box = dict(facecolor="white",
edgecolor=color, linestyle=ls,
#hatch=marker,
linewidth=int(2*self.ratio), boxstyle="round")
ax.text(x_point , y_point, label, va='center',ha='center',
rotation=angle,
color=color,
bbox=box,
fontsize=self.fontsize)
else:
plt.legend(loc="best")
if self.shareRowLabel and (((self.pIndex-1) % (self.ncols)) == 0):
ax.set_ylabel(ylabel, fontsize=self.axFontSize)
if not self.shareRowLabel:
ax.set_ylabel(ylabel, fontsize=self.axFontSize)
fmt= matplotlib.ticker.ScalarFormatter(useOffset=True)
fmt.set_scientific(True)
#ax.yaxis.set_major_formatter(fmt)
ax.set_xlabel(xlabel, fontsize=self.axFontSize)
ax.tick_params(labelsize=self.axTickSize)
ax.tick_params(axis='y', labelsize=int(self.axTickSize*1.5))
self.y_list = []
self.x_list = []
self.labels = []
self.pIndex += 1
self.axList += [ax]
#ax.tick_params(axis='y', which='minor')
#ax.locator_params(axis='y', numticks=5)
# y_minor_ticks = ax.yaxis.get_minor_ticks()
# y_minor_ticks[0].label.set_visible(False)
# y_minor_ticks = ax.yaxis.get_major_ticks()
# y_minor_ticks[0].label.set_visible(False)
#
# y_formatter = ticker.ScalarFormatter(useOffset=True)
# ax.yaxis.set_major_formatter(y_formatter)
#
ax.minorticks_off()
vals = np.logspace(np.log10(y_min),np.log10(y_max), 5)
#vals = np.linspace(y_min,y_max, 5)
ax.set_yticks(vals)
ax.yaxis.set_major_formatter(FuncFormatter(myticks))
#ax.yaxis.set_major_formatter(FixedOrderFormatter(5))
# __vals = np.unique(10**(np.floor(np.log10(np.logspace(np.log10(y_min),
# np.log10(y_max), num=10)))))
# v = __vals[0]
# powers10 = [v]
# while v < __vals[-1]:
# v = v * 10
# powers10 += [v]
# powers10 += [__vals[-1]]
# powers10 = np.unique(powers10)
# if len(powers10) <= 3:
# ax.set_yticks(np.linspace(y_min, y_max, num=5))
# #ax.set_yticks([3.1*10**3], minor=False)
# ax.yaxis.set_major_formatter(ticker.FuncFormatter(myticks))
# else:
# ax.set_yticks(powers10)
# #ax.yaxis.set_major_locator(MaxNLocator(nbins=9))
# #ax.get_yaxis().get_major_formatter().labelOnlyBase = False
# #ax.get_xaxis().get_major_formatter().set_useOffset(False)
# #ax.get_xaxis().get_major_formatter().set_scientific(False)
return fig, ax
def plot_csv(results, fig, ax):
for rank, column in enumerate(results.columns):
color = colors[2*rank]
ls, marker = markers[rank]
n_points = results.shape[0]
freq = n_points / (int(np.log(n_points)) + 1)
ax.plot(results[column], markevery=freq,
markersize=8,
color=color, lw=self.line_width, label=column, ls=ls, marker=marker)
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, prop={'size':10},
ncol=2, mode="expand", borderaxespad=0.,fancybox=True, shadow=True)
#plt.tight_layout(pad=7)
plt.tight_layout(pad=0)
return fig, ax
#########
# HELPERS
#########
def get_overlapPercentage(index, y_list):
n_points = y_list[0].size
for i in range(index + 1):
n_points = min(n_points, y_list[i].size)
y_vector = y_list[index][:n_points, np.newaxis]
prev_lines = np.zeros((n_points, index))
for i in range(index):
prev_lines[:, i] = y_list[i][:n_points]
prev_lines /= (np.linalg.norm(prev_lines, axis=0) + 1e-10)
y_norm = y_vector / np.linalg.norm(y_vector, axis=0)
diff = np.abs((prev_lines - y_norm)).min(axis=1)
n_overlap = np.sum(diff < 1e-6)
percentage = n_overlap / float(n_points)
return percentage
def create_dirs(fname):
if "/" not in fname:
return
if not os.path.exists(os.path.dirname(fname)):
try:
os.makedirs(os.path.dirname(fname))
except OSError:
pass
def normalize(xy_points, ref_points, y_min, y_max, x_min, x_max):
xy_points[:, 1] = np.log(np.maximum(1e-15, xy_points[:, 1])) /np.log(10)
ref_points[:, 1] = np.log(np.maximum(ref_points[:, 1], 1e-15)) /np.log(10)
y_min = np.log(y_min)/np.log(10)
y_max = np.log(y_max)/np.log(10)
xy_normed = xy_points - np.array([x_min, y_min])
xy_normed /= np.array([x_max - x_min, y_max - y_min])
ref_normed = ref_points - np.array([x_min, y_min])
ref_normed /= np.array([x_max - x_min, y_max - y_min])
return xy_normed, ref_normed
# LABEL POSITIONS
def get_labelPositions(y_list, x_list, ylim=None, labels=None, ref_points=None):
if ref_points is None:
ref_points = []
"""Get label positions greedily"""
n_labels = len(y_list)
# GET BORDER POINTS
x_min, x_max = get_min_max(x_list)
if ylim is not None:
y_min, y_max = ylim
else:
y_min, y_max = get_min_max(y_list)
xdiff = (x_max - x_min)
ydiff = (y_max - y_min)
# Border points
bp1 = np.array(list(product([x_min, x_max, xdiff * 0.5],
[y_min, y_max, ydiff * 0.5])))[:-1]
bp1 = np.array(list(product([x_max],
[y_max])))[:-1]
bp1 = np.array(list(product([8],
[0])))
addedPoints = []
for yPoint in np.linspace(y_min, y_max, 6):
addedPoints += [(x_min,yPoint)]
addedPoints += [(x_max,yPoint)]
# for xx, yy in zip(x_list, y_list):
# for x, y in zip(xx, yy):
# if (abs(x - x_min) / xdiff) < 0.0005:
# addedPoints += [(x, y)]
# elif (abs(x - x_max) / xdiff) < 0.0005:
# addedPoints += [(x, y)]
# elif (abs(y - y_max) / ydiff) < 0.0005:
# addedPoints += [(x, y)]
# elif (abs(y - y_min) / ydiff) < 0.0005:
# addedPoints += [(x, y)]
sPoints = [(xx[0], yy[0]) for xx, yy in zip(x_list, y_list)]
ePoints = [(xx[-1], yy[-1]) for xx, yy in zip(x_list, y_list)]
bp2 = np.array(addedPoints + sPoints + ePoints)
if len(ref_points) == 0:
border_points = np.vstack([bp1, bp2])
else:
border_points = np.vstack([bp1, bp2, ref_points])
n_border = border_points.shape[0]
# Initialize placeholders
ref_points = np.zeros((n_border + n_labels, 2))
label_positions = np.zeros((n_labels, 2))
label_indices = np.zeros(n_labels, int)
ref_points[:n_border] = border_points
for i in range(n_labels):
# GET POSITIONS
if ylim is not None:
ind = (y_list[i]<y_max+1e-4) & (y_list[i]>y_min-1e-4)
n_points = x_list[i][ind].size
xy_points = np.zeros((n_points, 2))
xy_points[:, 0] = x_list[i][ind]
xy_points[:, 1] = y_list[i][ind]
else:
n_points = x_list[i].size
xy_points = np.zeros((n_points, 2))
xy_points[:, 0] = x_list[i]
xy_points[:, 1] = y_list[i]
# NORMALIZE
xy_normed, ref_normed = normalize(xy_points.copy(),
ref_points[:n_border+i].copy(), y_min, y_max, x_min, x_max)
# GET REF POINTS
dist = pairwise_distances(xy_normed, ref_normed,
metric="l1")
# elif scale == "log":
# xy_copy = xy_normed.copy()
# ref_copy = ref_normed.copy()
# xy_copy[:, 1] = 10**(xy_normed[:, 1])
# ref_copy[:, 1] = 10**(ref_normed[:, 1])
# dist = pairwise_distances(xy_copy, ref_copy,
# metric="l1")
# GET MINIMUM DISTANCES
min_dist = dist.min(axis=1)
# GET MAXIMUM MINIMUM DISTANCE
label_index = np.argmax(min_dist)
label_pos = xy_points[label_index]
ref_points[n_border + i] = label_pos
label_positions[i] = label_pos
label_indices[i] = label_index
return label_positions, label_indices
def get_min_max(v_list):
vector = v_list[0]
v_min = np.min(vector)
v_max = np.max(vector)
for i in range(1, len(v_list)):
vector = v_list[i]
v_min = min(np.min(vector), v_min)
v_max = max(np.max(vector), v_max)
return v_min, v_max
def get_label_angle(xdata, ydata, index, ax, color='0.5', size=12, window=3):
n_points = xdata.size
x1 = xdata[index]
y1 = ydata[index]
#ax = line.get_axes()
sp1 = ax.transData.transform_point((x1, y1))
slope_degrees = 0.
count= 0.
for i in range(index+1, min(index+window, n_points)):
y2 = ydata[i]
x2 = xdata[i]
sp2 = ax.transData.transform_point((x2, y2))
rise = (sp2[1] - sp1[1])
run = (sp2[0] - sp1[0])
slope_degrees += np.degrees(np.arctan2(rise, run))
count += 1.
for i in range(index-1, max(index-window, 0), -1):
y2 = ydata[i]
x2 = xdata[i]
sp2 = ax.transData.transform_point((x2, y2))
rise = - (sp2[1] - sp1[1])
run = -(sp2[0] - sp1[0])
slope_degrees += np.degrees(np.arctan2(rise, run))
count += 1.
slope_degrees /= count
return slope_degrees
def box_color(edgecolor, linestyle, marker):
"""Creates box shape"""
return dict(facecolor="white",
edgecolor=edgecolor, linestyle=linestyle,
#hatch=marker,
linewidth=2, boxstyle="round")
# def get_pairwise_distances(A, B):
# # GET EUCLIDEAN DISTANCES
# n_A = A.shape[0]
# n_B = B.shape[0]
# A_square = np.dot(A ** 2, np.ones((2, n_B)))
# B_square = np.dot(np.ones((n_A, 2)), B.T) ** 2
# dist = A_square + B_square - 2 * np.dot(A, B.T)
# return np.sqrt(dist)
| [
"numpy.log10",
"numpy.log",
"sklearn.metrics.pairwise.pairwise_distances",
"numpy.array",
"matplotlib.style.use",
"matplotlib.ticker.ScalarFormatter",
"numpy.arctan2",
"numpy.linalg.norm",
"numpy.arange",
"matplotlib.ticker.FuncFormatter",
"itertools.product",
"numpy.max",
"numpy.linspace",
... | [((293, 320), 'matplotlib.style.use', 'matplotlib.style.use', (['"""bmh"""'], {}), "('bmh')\n", (313, 320), False, 'import matplotlib\n'), ((15378, 15401), 'pylab.tight_layout', 'plt.tight_layout', ([], {'pad': '(0)'}), '(pad=0)\n', (15394, 15401), True, 'import pylab as plt\n'), ((15679, 15706), 'numpy.zeros', 'np.zeros', (['(n_points, index)'], {}), '((n_points, index))\n', (15687, 15706), True, 'import numpy as np\n'), ((15981, 16001), 'numpy.sum', 'np.sum', (['(diff < 1e-06)'], {}), '(diff < 1e-06)\n', (15987, 16001), True, 'import numpy as np\n'), ((16694, 16734), 'numpy.array', 'np.array', (['[x_max - x_min, y_max - y_min]'], {}), '([x_max - x_min, y_max - y_min])\n', (16702, 16734), True, 'import numpy as np\n'), ((16818, 16858), 'numpy.array', 'np.array', (['[x_max - x_min, y_max - y_min]'], {}), '([x_max - x_min, y_max - y_min])\n', (16826, 16858), True, 'import numpy as np\n'), ((17716, 17744), 'numpy.linspace', 'np.linspace', (['y_min', 'y_max', '(6)'], {}), '(y_min, y_max, 6)\n', (17727, 17744), True, 'import numpy as np\n'), ((18437, 18478), 'numpy.array', 'np.array', (['(addedPoints + sPoints + ePoints)'], {}), '(addedPoints + sPoints + ePoints)\n', (18445, 18478), True, 'import numpy as np\n'), ((18715, 18749), 'numpy.zeros', 'np.zeros', (['(n_border + n_labels, 2)'], {}), '((n_border + n_labels, 2))\n', (18723, 18749), True, 'import numpy as np\n'), ((18773, 18796), 'numpy.zeros', 'np.zeros', (['(n_labels, 2)'], {}), '((n_labels, 2))\n', (18781, 18796), True, 'import numpy as np\n'), ((18817, 18840), 'numpy.zeros', 'np.zeros', (['n_labels', 'int'], {}), '(n_labels, int)\n', (18825, 18840), True, 'import numpy as np\n'), ((20428, 20442), 'numpy.min', 'np.min', (['vector'], {}), '(vector)\n', (20434, 20442), True, 'import numpy as np\n'), ((20455, 20469), 'numpy.max', 'np.max', (['vector'], {}), '(vector)\n', (20461, 20469), True, 'import numpy as np\n'), ((758, 769), 'numpy.log10', 'np.log10', (['x'], {}), '(x)\n', (766, 769), True, 'import numpy as np\n'), ((1009, 1020), 'numpy.log10', 'np.log10', (['x'], {}), '(x)\n', (1017, 1020), True, 'import numpy as np\n'), ((1430, 1506), 'matplotlib.ticker.ScalarFormatter.__init__', 'ScalarFormatter.__init__', (['self'], {'useOffset': 'useOffset', 'useMathText': 'useMathText'}), '(self, useOffset=useOffset, useMathText=useMathText)\n', (1454, 1506), False, 'from matplotlib.ticker import ScalarFormatter, FuncFormatter\n'), ((2701, 2728), 'pylab.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (2711, 2728), True, 'import pylab as plt\n'), ((3427, 3437), 'pylab.show', 'plt.show', ([], {}), '()\n', (3435, 3437), True, 'import pylab as plt\n'), ((3763, 3782), 'numpy.arange', 'np.arange', (['n_points'], {}), '(n_points)\n', (3772, 3782), True, 'import numpy as np\n'), ((3834, 3851), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (3842, 3851), True, 'import numpy as np\n'), ((12668, 12717), 'matplotlib.ticker.ScalarFormatter', 'matplotlib.ticker.ScalarFormatter', ([], {'useOffset': '(True)'}), '(useOffset=True)\n', (12701, 12717), False, 'import matplotlib\n'), ((15877, 15909), 'numpy.linalg.norm', 'np.linalg.norm', (['y_vector'], {'axis': '(0)'}), '(y_vector, axis=0)\n', (15891, 15909), True, 'import numpy as np\n'), ((16438, 16448), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (16444, 16448), True, 'import numpy as np\n'), ((16521, 16531), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (16527, 16531), True, 'import numpy as np\n'), ((16549, 16562), 'numpy.log', 'np.log', (['y_min'], {}), '(y_min)\n', (16555, 16562), True, 'import numpy as np\n'), ((16563, 16573), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (16569, 16573), True, 'import numpy as np\n'), ((16590, 16603), 'numpy.log', 'np.log', (['y_max'], {}), '(y_max)\n', (16596, 16603), True, 'import numpy as np\n'), ((16604, 16614), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (16610, 16614), True, 'import numpy as np\n'), ((16648, 16672), 'numpy.array', 'np.array', (['[x_min, y_min]'], {}), '([x_min, y_min])\n', (16656, 16672), True, 'import numpy as np\n'), ((16770, 16794), 'numpy.array', 'np.array', (['[x_min, y_min]'], {}), '([x_min, y_min])\n', (16778, 16794), True, 'import numpy as np\n'), ((18537, 18558), 'numpy.vstack', 'np.vstack', (['[bp1, bp2]'], {}), '([bp1, bp2])\n', (18546, 18558), True, 'import numpy as np\n'), ((18593, 18626), 'numpy.vstack', 'np.vstack', (['[bp1, bp2, ref_points]'], {}), '([bp1, bp2, ref_points])\n', (18602, 18626), True, 'import numpy as np\n'), ((19631, 19685), 'sklearn.metrics.pairwise.pairwise_distances', 'pairwise_distances', (['xy_normed', 'ref_normed'], {'metric': '"""l1"""'}), "(xy_normed, ref_normed, metric='l1')\n", (19649, 19685), False, 'from sklearn.metrics.pairwise import pairwise_distances\n'), ((20137, 20156), 'numpy.argmax', 'np.argmax', (['min_dist'], {}), '(min_dist)\n', (20146, 20156), True, 'import numpy as np\n'), ((2534, 2551), 'pylab.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (2544, 2551), True, 'import pylab as plt\n'), ((3158, 3174), 'numpy.array', 'np.array', (['y_vals'], {}), '(y_vals)\n', (3166, 3174), True, 'import numpy as np\n'), ((3233, 3249), 'numpy.array', 'np.array', (['x_vals'], {}), '(x_vals)\n', (3241, 3249), True, 'import numpy as np\n'), ((9092, 9107), 'numpy.log10', 'np.log10', (['y_min'], {}), '(y_min)\n', (9100, 9107), True, 'import numpy as np\n'), ((9108, 9123), 'numpy.log10', 'np.log10', (['y_max'], {}), '(y_max)\n', (9116, 9123), True, 'import numpy as np\n'), ((9202, 9224), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['myticks'], {}), '(myticks)\n', (9215, 9224), False, 'from matplotlib.ticker import ScalarFormatter, FuncFormatter\n'), ((13611, 13626), 'numpy.log10', 'np.log10', (['y_min'], {}), '(y_min)\n', (13619, 13626), True, 'import numpy as np\n'), ((13627, 13642), 'numpy.log10', 'np.log10', (['y_max'], {}), '(y_max)\n', (13635, 13642), True, 'import numpy as np\n'), ((13765, 13787), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['myticks'], {}), '(myticks)\n', (13778, 13787), False, 'from matplotlib.ticker import ScalarFormatter, FuncFormatter\n'), ((15808, 15842), 'numpy.linalg.norm', 'np.linalg.norm', (['prev_lines'], {'axis': '(0)'}), '(prev_lines, axis=0)\n', (15822, 15842), True, 'import numpy as np\n'), ((15922, 15949), 'numpy.abs', 'np.abs', (['(prev_lines - y_norm)'], {}), '(prev_lines - y_norm)\n', (15928, 15949), True, 'import numpy as np\n'), ((16170, 16192), 'os.path.dirname', 'os.path.dirname', (['fname'], {}), '(fname)\n', (16185, 16192), False, 'import os\n'), ((16400, 16434), 'numpy.maximum', 'np.maximum', (['(1e-15)', 'xy_points[:, 1]'], {}), '(1e-15, xy_points[:, 1])\n', (16410, 16434), True, 'import numpy as np\n'), ((16483, 16518), 'numpy.maximum', 'np.maximum', (['ref_points[:, 1]', '(1e-15)'], {}), '(ref_points[:, 1], 1e-15)\n', (16493, 16518), True, 'import numpy as np\n'), ((17617, 17634), 'itertools.product', 'product', (['[8]', '[0]'], {}), '([8], [0])\n', (17624, 17634), False, 'from itertools import product\n'), ((19111, 19134), 'numpy.zeros', 'np.zeros', (['(n_points, 2)'], {}), '((n_points, 2))\n', (19119, 19134), True, 'import numpy as np\n'), ((19301, 19324), 'numpy.zeros', 'np.zeros', (['(n_points, 2)'], {}), '((n_points, 2))\n', (19309, 19324), True, 'import numpy as np\n'), ((20554, 20568), 'numpy.min', 'np.min', (['vector'], {}), '(vector)\n', (20560, 20568), True, 'import numpy as np\n'), ((20597, 20611), 'numpy.max', 'np.max', (['vector'], {}), '(vector)\n', (20603, 20611), True, 'import numpy as np\n'), ((21177, 21198), 'numpy.arctan2', 'np.arctan2', (['rise', 'run'], {}), '(rise, run)\n', (21187, 21198), True, 'import numpy as np\n'), ((21481, 21502), 'numpy.arctan2', 'np.arctan2', (['rise', 'run'], {}), '(rise, run)\n', (21491, 21502), True, 'import numpy as np\n'), ((6425, 6445), 'numpy.array', 'np.array', (['ref_points'], {}), '(ref_points)\n', (6433, 6445), True, 'import numpy as np\n'), ((8446, 8468), 'pylab.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (8456, 8468), True, 'import pylab as plt\n'), ((12398, 12420), 'pylab.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (12408, 12420), True, 'import pylab as plt\n'), ((16232, 16254), 'os.path.dirname', 'os.path.dirname', (['fname'], {}), '(fname)\n', (16247, 16254), False, 'import os\n'), ((17396, 17461), 'itertools.product', 'product', (['[x_min, x_max, xdiff * 0.5]', '[y_min, y_max, ydiff * 0.5]'], {}), '([x_min, x_max, xdiff * 0.5], [y_min, y_max, ydiff * 0.5])\n', (17403, 17461), False, 'from itertools import product\n'), ((17526, 17551), 'itertools.product', 'product', (['[x_max]', '[y_max]'], {}), '([x_max], [y_max])\n', (17533, 17551), False, 'from itertools import product\n'), ((5743, 5762), 'numpy.arange', 'np.arange', (['n_labels'], {}), '(n_labels)\n', (5752, 5762), True, 'import numpy as np\n'), ((9999, 10018), 'numpy.arange', 'np.arange', (['n_labels'], {}), '(n_labels)\n', (10008, 10018), True, 'import numpy as np\n'), ((14996, 15012), 'numpy.log', 'np.log', (['n_points'], {}), '(n_points)\n', (15002, 15012), True, 'import numpy as np\n'), ((5061, 5076), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (5069, 5076), True, 'import numpy as np\n'), ((7011, 7027), 'numpy.log', 'np.log', (['n_points'], {}), '(n_points)\n', (7017, 7027), True, 'import numpy as np\n'), ((11128, 11144), 'numpy.log', 'np.log', (['n_points'], {}), '(n_points)\n', (11134, 11144), True, 'import numpy as np\n'), ((5204, 5219), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (5212, 5219), True, 'import numpy as np\n')] |
import time
import copy
import numpy as np
from functools import reduce
from operator import indexOf, iconcat
from sysidentpy.utils.display_results import results
from sysidentpy.model_structure_selection import FROLS
def narmax_state_space(nx_model:FROLS, X_train, X_test, states_names):
xlag = nx_model.xlag if type(nx_model.xlag) == int else max(reduce(iconcat, nx_model.xlag, []))
max_lag = max(xlag, nx_model.ylag)
narmax_model = {}
narmax_time = 0
coeffs = []
regs = []
sim = []
for s_id, state in enumerate(states_names):
model = copy.deepcopy(nx_model)
x_train = np.delete(X_train, s_id, axis=1)[:-1]
y_train = X_train[1:, s_id].reshape(-1, 1)
x_test = np.delete(X_test, s_id, axis=1)
y_test = X_test[0:nx_model.ylag, s_id].reshape(-1, 1)
# Train model for one state and get time
tic = time.time()
model.fit(X=x_train, y=y_train)
toc = time.time()
narmax_time += toc - tic
# Print resulting model
param = results(model.final_model, model.theta, model.err, model.n_terms, dtype='sci')
param_names = np.delete(states_names, s_id, axis=0).tolist()
display_nx_model(param, model.theta, state, param_names, max_lag)
# Simulate model for the state Z
coeffs += [np.pad(model.theta.flatten(), (0, model.basis_function.__sizeof__() - len(model.theta)))]
regs += [[list(eq) for eq in model.final_model]]
sim += [model.predict(X=x_test, y=y_test)]
# Stack results for models and predictions
narmax_model['features'] = states_names
narmax_model['coeffs'] = np.vstack(coeffs)
narmax_model['regs'] = regs
narmax_sim = np.hstack(sim)
return narmax_model, narmax_sim, narmax_time
def display_nx_model(results, theta, output:str, input_vars, max_lag=1):
regressors = ''
for idx, regressor in enumerate(results):
if idx > 0:
regressors += ' +'
for lag in range(0, max_lag):
regressor[0] = regressor[0].replace('(k-' + str(lag + 1) + ')', '[k-' + str(lag) + ']')
regressors += ' ' + f'{theta[idx][0]:.3E}' + ' ' + regressor[0].replace('-0', '')
regressors = regressors.replace('y', output).replace('+ -', '- ')
for idx, var in enumerate(input_vars):
regressors = regressors.replace('x' + str(idx+1), var)
print(output + '[k+1] =' + regressors)
def solution_to_regressors(sol_terms, feature_names, order):
'''Convert an standard list of terms describing the solution into the NARMAX regressors format'''
# TODO: Division with powers in conversion
regressors = []
for idx_eq, eq in enumerate(sol_terms):
eq_regs = []
eq_features = [feature_names[idx_eq]] + np.delete(feature_names, idx_eq).tolist()
for term in eq:
n = 0
reg = np.zeros(order, dtype='int')
split_terms = term.split()
# Replace powers by repetitions
new_split_terms = []
for factor in split_terms:
new_factor = [factor]
if '^' in factor:
pos_power = indexOf(factor, '^')
new_factor = [factor[pos_power - 1]] * int(factor[pos_power + 1])
elif '/' in factor:
pos_div = indexOf(factor, '/')
new_factor = [factor[pos_div - 1], factor[pos_div + 1]]
new_split_terms += new_factor
# Convert terms into regressors
for factor in new_split_terms:
if factor.isalpha():
reg[n] = 1000*indexOf(eq_features, factor) + 1001
n += 1
elif '_k-' in factor:
pos_k = indexOf(factor, 'k')
base = factor[:pos_k - 1]
delay =int(factor[pos_k + 2])
reg[n] = 1000*indexOf(eq_features, base) + 1000 + delay
n += 1
eq_regs += [list(np.sort(reg))[::-1]]
regressors += [eq_regs]
return regressors | [
"operator.indexOf",
"sysidentpy.utils.display_results.results",
"numpy.hstack",
"functools.reduce",
"numpy.delete",
"numpy.sort",
"numpy.zeros",
"numpy.vstack",
"copy.deepcopy",
"time.time"
] | [((1651, 1668), 'numpy.vstack', 'np.vstack', (['coeffs'], {}), '(coeffs)\n', (1660, 1668), True, 'import numpy as np\n'), ((1718, 1732), 'numpy.hstack', 'np.hstack', (['sim'], {}), '(sim)\n', (1727, 1732), True, 'import numpy as np\n'), ((581, 604), 'copy.deepcopy', 'copy.deepcopy', (['nx_model'], {}), '(nx_model)\n', (594, 604), False, 'import copy\n'), ((731, 762), 'numpy.delete', 'np.delete', (['X_test', 's_id'], {'axis': '(1)'}), '(X_test, s_id, axis=1)\n', (740, 762), True, 'import numpy as np\n'), ((889, 900), 'time.time', 'time.time', ([], {}), '()\n', (898, 900), False, 'import time\n'), ((955, 966), 'time.time', 'time.time', ([], {}), '()\n', (964, 966), False, 'import time\n'), ((1049, 1127), 'sysidentpy.utils.display_results.results', 'results', (['model.final_model', 'model.theta', 'model.err', 'model.n_terms'], {'dtype': '"""sci"""'}), "(model.final_model, model.theta, model.err, model.n_terms, dtype='sci')\n", (1056, 1127), False, 'from sysidentpy.utils.display_results import results\n'), ((356, 390), 'functools.reduce', 'reduce', (['iconcat', 'nx_model.xlag', '[]'], {}), '(iconcat, nx_model.xlag, [])\n', (362, 390), False, 'from functools import reduce\n'), ((624, 656), 'numpy.delete', 'np.delete', (['X_train', 's_id'], {'axis': '(1)'}), '(X_train, s_id, axis=1)\n', (633, 656), True, 'import numpy as np\n'), ((2872, 2900), 'numpy.zeros', 'np.zeros', (['order'], {'dtype': '"""int"""'}), "(order, dtype='int')\n", (2880, 2900), True, 'import numpy as np\n'), ((1150, 1187), 'numpy.delete', 'np.delete', (['states_names', 's_id'], {'axis': '(0)'}), '(states_names, s_id, axis=0)\n', (1159, 1187), True, 'import numpy as np\n'), ((2770, 2802), 'numpy.delete', 'np.delete', (['feature_names', 'idx_eq'], {}), '(feature_names, idx_eq)\n', (2779, 2802), True, 'import numpy as np\n'), ((3161, 3181), 'operator.indexOf', 'indexOf', (['factor', '"""^"""'], {}), "(factor, '^')\n", (3168, 3181), False, 'from operator import indexOf, iconcat\n'), ((3334, 3354), 'operator.indexOf', 'indexOf', (['factor', '"""/"""'], {}), "(factor, '/')\n", (3341, 3354), False, 'from operator import indexOf, iconcat\n'), ((3785, 3805), 'operator.indexOf', 'indexOf', (['factor', '"""k"""'], {}), "(factor, 'k')\n", (3792, 3805), False, 'from operator import indexOf, iconcat\n'), ((4035, 4047), 'numpy.sort', 'np.sort', (['reg'], {}), '(reg)\n', (4042, 4047), True, 'import numpy as np\n'), ((3656, 3684), 'operator.indexOf', 'indexOf', (['eq_features', 'factor'], {}), '(eq_features, factor)\n', (3663, 3684), False, 'from operator import indexOf, iconcat\n'), ((3936, 3962), 'operator.indexOf', 'indexOf', (['eq_features', 'base'], {}), '(eq_features, base)\n', (3943, 3962), False, 'from operator import indexOf, iconcat\n')] |
# -*- coding: utf-8 -*-
"""AG module containing portfolio class
Todo:
* Improve portfolio analysis tools
"""
# Standard imports
from __future__ import annotations
from datetime import datetime
from numbers import Number
from copy import copy, deepcopy
from datetime import timedelta
import math
# Third party imports
import pandas as pd
import numpy as np
# Local imports
from ._asset import types, Asset
from ._standard import Currency
from .. import utils
# Typing
from typing import (
TYPE_CHECKING,
Any,
TypeVar,
Union,
Optional,
Literal,
Generic,
Iterable,
)
from ._asset import Asset
from ._standard import Call
Asset_T = TypeVar("Asset_T", bound=Asset, covariant=True)
if TYPE_CHECKING:
from ._collections import Environment
class PositionView:
"""
A 'view' of a position that allows for storage of necessary attributes for
calculation of statistical performance measures, but without holding
references to objects that would otherwise be garbage collected.
Parameters:
position:
The position to creata a 'view' of
Attributes:
asset (str):
The key of the underlying asset for this position
quantity (float):
The quantity of the underlying asset held by this position
short (bool):
Whether or not this position was short
cost (float):
This position's total cost (at the time that the view was created)
value (float):
This position's total value (at the time that the view as created)
symbol (str):
The character that corresponds with this position's base currency
cash (bool):
Whether or not this position is a Cash position (a position in a currency)
unit (str):
How to refer to a single unit of the underlying asset of this position
units (str):
How to refer to multiple units of the underlying asset of this position
"""
def __init__(self, position: Position) -> None:
self.asset = position.asset.key
self.quantity = position.quantity
self.short = position.short
self.cost = position.cost
self.value = position.value
self.symbol = position.symbol
self.cash = isinstance(position, Cash)
self.unit = position.asset.unit
self.units = position.asset.units
def __str__(self) -> str:
if self.cash:
return f"CASH {self.symbol}{self.value}"
short = "SHORT" if self.short else "LONG"
unit = self.unit if self.quantity == 1 else self.units
return (
f"{self.asset}_{short}: {self.quantity} {unit} @ "
f"{self.symbol}"
f"{round(self.value / self.quantity, 2)} /{self.unit}"
)
def __repr__(self) -> str:
return self.__str__()
def __eq__(self, other: object) -> bool:
if not isinstance(other, PositionView):
return NotImplemented
return self.key == other.key
def __getstate__(self) -> dict:
return self.__dict__
def __setstate__(self, state: dict) -> None:
self.__dict__ = state
@property
def key(self) -> str:
"""This position's 'key', which is used to map this position to itself
in a portfolio's .positions attribute"""
short = "SHORT" if self.short else "LONG"
return f"{self.asset}_{short}"
def empty(self) -> PositionView:
"""
Creates an 'empty' position from this position
Used when creating position difference reports
"""
empty = deepcopy(self)
empty.quantity, empty.cost, empty.value = 0, 0, 0
return empty
class Position(Generic[Asset_T]):
"""
Object representing a position in a financial asset
An object representing a financial stake in some underlying asset,
to be used in portfolios to track holdings. Automatically updates
value using the value of the underlying asset, and keeps track of
average cost to enter the position.
Parameters:
asset:
The asset underyling this position
quantity:
The quantity of the underlying asset owned or owed by this position
short:
Whether or not the quantity of the position is owned or owed
Attributes:
asset (Asset):
The underyling asset
quantity (float):
The quantity of the asset represented by the position
short (bool):
True if short, False if long
cost (float):
The total cost to enter this position
value (float):
The current market value of this position
average_cost (float):
The cost of this position per unit
key (str):
A key used for indexing this position in dicts
symbol (str):
A symbol corresponding to the currency of the asset underlying this
position
price (str):
A rounded version of value with the symbol attached, used for
printing purposes
expired (bool):
Whether or not this position is expired
total_return (float):
The total return of this position relative to its cost, represented
in the base currency of the underlying asset
percent_return (float):
The total return as a percentage of the cost
"""
def __init__(self, asset: Asset_T, quantity: float, short: bool = False) -> None:
self.asset: Asset_T = asset
self._quantity = round(quantity, 2)
self.short = short
self.cost = self.value
self.history = self._history()
def __add__(self, other: Position) -> Position:
# Positions can only be added to other positions
if not self == other:
return NotImplemented
new = copy(self)
# Updating quantity
new.quantity += other.quantity
# Updating position cost
short = -1 if new.short else 1
new.cost += new.asset.value * other.quantity * short
# Updating the position history
new.update_history()
return new
def __sub__(self, other: Position) -> Position:
# Positions can only be subtracted from other positions
if not self == other:
return NotImplemented
new = copy(self)
# Updating quantity
new.quantity -= other.quantity
# Updating position cost
short = -1 if new.short else 1
new.cost -= new.asset.value * other.quantity * short
# Updating position history
new.update_history()
return new
def __eq__(self, other: object) -> bool:
if not isinstance(other, Position):
return NotImplemented
return self.asset is other.asset and self.short is other.short
def __str__(self) -> str:
unit = self.asset.unit if self.quantity == 1 else self.asset.units
return (
f"<{self.quantity} {self.asset.key} {unit} @ {self.symbol}{self.average_cost}"
f" /{self.asset.unit} | VALUE: {self.price} "
f"| RETURN: {round(self.percent_return, 2)}%>"
)
def __repr__(self) -> str:
unit = self.asset.unit if self.quantity == 1 else self.asset.units
return (
f"<{self.quantity} {unit} @ {self.symbol}{self.average_cost} "
f"| RETURN: {round(self.percent_return, 2)}%>"
)
def __setstate__(self, state: dict) -> None:
self.__dict__ = state
def __getstate__(self) -> dict:
return self.__dict__
def __copy__(self) -> Position:
new = Position(self.asset, self.quantity, self.short)
new.history = self.history
return new
@property
def quantity(self) -> float:
"""The quantity of the underlying asset"""
return self._quantity
@quantity.setter
def quantity(self, value: float) -> None:
self._quantity = round(value, 2)
@property
def key(self) -> str:
"""The key used to encode this position within a Portfolio"""
short = "SHORT" if self.short else "LONG"
return f"{self.asset.key}_{short}"
@property
def symbol(self) -> str:
"""The symbol corresponding to the base currency of the position"""
return types.currency.instances[self.asset.base].symbol
@property
def value(self) -> float:
"""The position's total market value as of the current date"""
return self.asset.value * self.quantity * (-1 if self.short else 1)
@property
def price(self) -> str:
"""The position's price represented in the base currency, as a string"""
price = abs(self.value)
if price != 0:
r = 2
while price < 1:
r += 1
price *= 10
price = round(self.value, r)
return f"{self.symbol}{price}"
@property
def average_cost(self) -> float:
"""The average cost per unit for this position"""
return round(self.cost / self.quantity, 2)
@property
def expired(self) -> bool:
"""Whether or not the position is expired"""
return self.quantity <= 0 or self.asset.expired
@property
def total_return(self) -> float:
"""The total return of the position represented in its base currency"""
return self.value - self.cost
@property
def percent_return(self) -> float:
"""The total return as a proportion of the cost"""
return (self.total_return / self.cost) * 100 * (-1 if self.short else 1)
def expire(self, portfolio: Portfolio) -> None:
"""Expires this position within the input portfolio
Parameters:
portfolio:
The portfolio owning this pos
"""
self.asset.expire(portfolio, self)
def view(self) -> PositionView:
"""
A memory efficient copy of the position
Returns a 'view' of the position in its current state for use
in portfolio histories.
Returns:
Memory efficient copy of the position
"""
return PositionView(self)
def _history(self) -> pd.DataFrame:
"""A pandas DataFrame of this position's value history
Returns a datetime indexed dataframe of this positions market
value history and cost, which is automatically updates when
changes in the position or the underlying asset occur
Returns:
A position history
"""
history = pd.DataFrame(
[[self.value, self.cost]],
columns=["VALUE", "COST"],
index=pd.DatetimeIndex([self.asset.date], name="DATE"),
)
return history
def update_history(self) -> None:
"""
Updates the positions history to reflect changes
Updates this positions history whenever changes in the position
occur, either in the size of the position itself or the price
of the underlying asset
"""
self.history.loc[self.asset.date] = [self.value, self.cost]
class Cash(Position[Currency]):
"""
A special type of position representing some quantity of a currency
A special type of position whose underlying asset is some currency, which
defaults to the environment's base currency when not specified otherwise.
Parameters:
quantity:
The quantity of the currency the position holds
code:
The currency code of the currency this Cash position represents
Attributes:
code (str):
The currency code of the currency underlying this position
"""
def __init__(self, quantity: float, code: Optional[str] = None) -> None:
code = Currency.base if code is None else code
super().__init__(Currency(code), quantity)
def __str__(self) -> str:
return f"<{self.asset.code} {self.asset.symbol}{self.quantity}>"
def __repr__(self) -> str:
return self.__str__()
def __add__(self, other: object) -> Cash:
if not isinstance(other, (Cash, float, int)):
return NotImplemented
other = self.to_cash(other)
new = copy(self)
if new.asset is other.asset:
new.quantity += other.quantity
else:
value = new.asset.convert(other.asset.code) * other.quantity
new.quantity += value
return new
def __sub__(self, other: object) -> Cash:
if not isinstance(other, (Cash, int, float)):
return NotImplemented
other = self.to_cash(other)
new = copy(self)
if new.asset is other.asset:
new.quantity -= other.quantity
else:
value = new.asset.convert(other.asset.code) * other.quantity
new.quantity -= value
return new
def __lt__(self, other: object) -> bool:
if not isinstance(other, (Cash, int, float)):
return NotImplemented
other = self.to_cash(other)
return self.value < other.value
def __gt__(self, other: object) -> bool:
if not isinstance(other, (Cash, int, float)):
return NotImplemented
other = self.to_cash(other)
return self.value > other.value
def __eq__(self, other: object) -> bool:
if not isinstance(other, (Cash, int, float)):
return NotImplemented
other = self.to_cash(other)
return self.value == other.value
def __copy__(self) -> Cash:
new = Cash(self.quantity, self.asset.code)
new.history = self.history
return new
@property
def expired(self) -> Literal[False]:
"""Always returns false, Cash positions never expire"""
return False
@property
def code(self) -> str:
"""The currency code of this Cash Position's currency"""
return self.asset.code
@staticmethod
def from_position(position: Position) -> Cash:
"""
Returns a cash object representing the value of the asset passed
Parameters:
position:
The position to transform into a cash object
Returns:
The transformed cash object
"""
return Cash(position.value, position.asset.base)
def from_number(self, other: float) -> Cash:
"""
Given a number value, returns a cash object that shares the same
underlying currecy as this cash object, whose quantity is the given
number
Parameters:
other:
The quantity of the cash object to instantiate
Returns:
The cash object
"""
return Cash(other, self.asset.code)
def from_cash(self, other: Cash) -> Cash:
"""
Given a cash object, returns a cash object of equivalent value (based
on current exchange rates) represented in the currency of this cash
object
Parameters:
other:
The cash object to convert to units of this cash object's currency
Returns:
The converted cash object
"""
quantity = self.asset.rate(other.asset.code) * other.quantity
return Cash(quantity, self.asset.code)
def convert(self, code: str, inplace: bool = False) -> Cash:
"""
Converts this cash asset to another currency
Parameters:
code:
The currency code of the currency to convert to
inplace:
Whether or not to conver this cash position inplace
Returns:
The converted cash object, which is self if inplace=True
"""
quantity = self.asset.rate(code) * self.quantity
new = Cash(quantity, code)
if inplace:
self = new
return new
def to_cash(self, obj: Union[Cash, Position, float]) -> Cash:
"""
Given an ambiguous object which could be interpreted as a Cash position,
converts that object to a Cash position in the context of the calling
cash position and returns it.
Parameters:
obj:
The object to be converted to a cash position
Returns:
The converted cash position
"""
if isinstance(obj, (int, float)):
return self.from_number(obj)
elif isinstance(obj, Cash):
return self.from_cash(obj)
elif isinstance(obj, Position):
return self.from_position(obj)
raise TypeError(
f"obj type {obj.__class__.__name__} can not be converted to Cash"
)
class Portfolio:
"""
An object representing a portfolio of financial assets
AlphaGradient portfolios are designed to interact natively with
AlphaGradient assets, and provide all of the functionality one
would expect with a normal financial portfolio.
Parameters:
initial:
The initial quantity of the base currency held by the Portfolio
name:
The name of this portfolio, used for indexing within environments
base:
The base currency of this portfolio
Attributes:
cash (float):
How much of the base currency the Portfolio holds
date (datetime):
The last time this position was altered or valuated
positions (dict):
A dictionary of all of this Portfolio's current positions
longs (dict):
A dictionary of all long positions
shorts (dict):
A dictionary of all short positions
base (str):
A currency code representing this portfolio's base currency
value (float):
The total value of this portfolio, represented in the portfolio's
base currency
liquid (float):
The sum of all of the portfolio's liquid assets
"""
def __init__(
self,
initial: float,
name: Optional[Union[str, Environment]] = None,
base: Optional[str] = None,
) -> None:
if isinstance(name, types.environment.c):
self.name = self._generate_name(environment=name)
else:
assert type(name) is str # Mypy doesnt recognize the type narrowing above
self.name = self._generate_name() if name is None else name
self._base = Currency.base if base is None else base
self._cash = Cash(initial, self.base)
self._positions: dict[str, Position] = {"CASH": self.cash}
self.history = self._history()
self.type.instances[self.name] = self # type: ignore[attr-defined]
def __getattr__(self, attr: str) -> dict[str, Position]:
if attr in [c.__name__.lower() for c in types.instantiable()]:
return {
k: v
for k, v in self.positions.items()
if v.asset.__class__.__name__.lower() == attr
}
raise AttributeError(f"Portfolio has no attribute {attr}")
def __str__(self) -> str:
return f"<AlphaGradient Portfolio at {hex(id(self))}>"
def __repr__(self) -> str:
return self.__str__()
def _generate_name(
self, last: list[int] = [0], environment: Environment = None
) -> str:
"""
Generates a name for this portfolio. Only used when an environment
object is passed in for the 'name' parameter during Portfolio
initialization
Parameters:
environment:
The environment object to generate a portfolio name for
Returns:
A portfolio name appropriate for that environment
"""
if environment is None:
if last[0] == 0 and not self.type.instances: # type: ignore[attr-defined]
return "MAIN"
else:
name = f"P{last[0]}"
last[0] += 1
return name
else:
if environment._portfolios:
return f"P{len(environment._portfolios) - 1}"
else:
return "MAIN"
@property
def base(self) -> str:
"""This portfolio's base currency"""
return self._base
@base.setter
def base(self, new_base: str) -> None:
if Currency.validate_code(new_base, error=True):
self._base = new_base
@property
def base_symbol(self) -> str:
"""The symbol corresponding to this portfolio's base currency"""
return self._cash.symbol
@property
def cash(self) -> Cash:
"""This portfolio's currently held quantity of the base currency"""
return self._cash
@cash.setter
def cash(self, n: float) -> None:
if isinstance(n, Number):
self._cash.quantity = n
elif isinstance(n, Cash):
self._cash = Cash.from_position(n)
else:
raise TypeError(
"Cash can only be assigned to a numerical "
"value or another cash instance."
)
@property
def date(self) -> datetime:
"""This Portfolio's current date"""
return self._date # type: ignore[return-value]
@property
def liquid(self) -> float:
"""The total liquid value of the portfolio"""
return self.cash.quantity
@property
def longs(self) -> dict[str, Position]:
"""A dictionary which associates all long positions with their asset keys"""
return {v.asset.key: v for k, v in self.positions.items() if not v.short}
@property
def positions(self) -> dict[str, Position]:
"""The financial positions currency held by this portfolio"""
self._positions["CASH"] = self._cash
return self._positions
@property
def shorts(self) -> dict[str, Position]:
"""A dictionary which assocaites all short positions with their asset keys"""
return {v.asset.key: v for k, v in self.positions.items() if v.short}
@property
def value(self) -> float:
"""This portfolio's net market value"""
return sum([position.value for position in self.positions.values()])
def update_positions(self) -> None:
"""
Updates the Portfolios positions to removed those that have expired,
after calling their expire methods.
TODO: This should be a private method
"""
for expired in [pos for pos in self._positions.values() if pos.asset.expired]:
expired.expire(self)
self._positions = {
k: pos for k, pos in self._positions.items() if not pos.expired
}
# THIS SHOULD BE DONE IN THE CASH SETTER
self._positions["CASH"] = self.cash
def validate_transaction(
self, asset: Asset, quantity: float, short: bool = False
) -> bool:
"""
Validates a transaction by determining the this portfolio is
capable of performing it.
All transactions are validated before being performed. When the
arguments passed to a transaction are invalid but not cause for alarm,
simply returns False, which means that the algorithm runtime will
continue without performing the transaction
In some cases, the validation will trigger a runtime error for
transaction inputs that should not be possible or are uninterpretable.
Parameters:
asset:
The asset involved in the transaction
quantity:
The transaction quantity of the involved asset
short:
Whether the transaction is short or long (True is short, False
is long)
Returns:
A boolean value indicating the validity of the transaction.
Raises:
ValueError:
When the transaction quantity is below 0. A transaction quantity
is bounded at 0.
ValueError:
When the date of the asset's most recent valuation does not
match the current date of the portfolio.
ValueError:
When trying to perform a transaction with an asset whose market
is currently closed
"""
if not short:
if isinstance(asset, Currency) and asset.is_base:
return False
elif quantity == 0:
return False
if quantity < 0:
raise ValueError(
"Purchase quantity must be or exceed 0, " f"received {quantity}"
)
elif asset.date != self.date:
raise ValueError(
"Unable to complete transaction, "
f"{asset.date=} does not match "
f"{self.date=}. Please ensure portfolio "
"and asset dates are synced"
)
elif (asset.market_open != asset.market_close) and (
asset.date.time() > asset.market_close
or asset.date.time() < asset.market_open
):
raise ValueError(
f"Unable to complete transaction for {asset=} because the "
"market is closed. Market close is "
f"{utils.timestring(asset.market_close)}, time of last "
f"valuation is {utils.timestring(asset.date)}"
)
return True
def buy(self, asset: Asset, quantity: float) -> None:
"""
Buys an asset using this portfolio
Creates a long position in the given asset with a purchase volume
given by 'quantity'.
Parameters:
asset:
The asset in which to create a long position
quantity:
The purchase quantity
Returns:
TODO: Return the created/altered position
"""
# Ensure that the transaction can occur
if not self.validate_transaction(asset, quantity):
return
# Creating the position to be entered into
position = Position(asset, quantity)
if position.value > self.cash:
raise ValueError(
f"Purchasing {quantity} units of " # type: ignore[attr-defined]
f"{asset.name} {asset.type} requires "
f"{Cash(position.value, position.asset.base)}, "
f"but this portfolio only has {self.cash} "
"in reserve"
)
# Updating the position if one already exists
try:
self.positions[position.key] += position
except KeyError:
self.positions[position.key] = position
# Updating the potfolio's cash reserves
self._cash -= Cash.from_position(position)
# Updating the portfolio's history to reflect purchase
self.update_positions()
self.update_history()
def sell(self, asset: Asset, quantity: float) -> None:
"""Sells a long position in this portfolio
Decrements a long position in the given asset by 'quantity'.
Maximum sale quantity is the amount owned by the portfolio.
Parameters:
asset:
The asset of the corresponding decremented position
quantity:
The sale quantity
Returns:
TODO: Return the modified position
"""
# Ensure that the transaction can occur
if not self.validate_transaction(asset, quantity):
return
# Creating the position to be sold
position = Position(asset, quantity)
# Selling the position if the current position is large enough
# to satisfy the sale quantity
try:
current = self.positions[position.key]
if current.quantity >= quantity:
self._cash += Cash.from_position(position)
self.positions[position.key] -= position
else:
raise ValueError(
f"Portfolio has insufficient long " # type: ignore[attr-defined]
f"position in asset {asset.name} "
f"{asset.type} to sell {quantity} "
f"units. Only {current} units "
"available"
)
# We can only sell positions that we own
except KeyError as e:
raise ValueError(
"Portfolio has no long position in asset " # type: ignore[attr-defined]
f"{asset.name} {asset.type}"
) from e
# Updating the portfolio's history to reflect sale
self.update_positions()
self.update_history()
def short(self, asset: Asset, quantity: float) -> None:
"""
Shorts an asset using this portfolio
Creates a short position in the given asset with a short volume given
by 'quantity'.
Parameters:
asset:
The asset in which to create a short position
quantity:
The short sale quantity
Returns:
TODO: Return the created/altered position
"""
# Ensure that the transaction can occur
if not self.validate_transaction(asset, quantity, short=True):
return
# Creating the position to be shorted
position = Position(asset, quantity, short=True)
# Updating the position if one already exists
try:
self.positions[position.key] += position
except KeyError:
self.positions[position.key] = position
# Updating the potfolio's cash reserves
self._cash -= Cash.from_position(position)
# Updating the portfolio's history to reflect short sale
self.update_positions()
self.update_history()
def cover(self, asset: Asset, quantity: float) -> None:
"""Covers a short position in this portfolio
Covers a short position in the given asset with a cover
(purchase) volume given by 'quantity'.
Parameters:
asset:
The asset in which to cover a short position
quantity:
The cover (purchase) quantity
Returns:
TODO: Return the modified position
"""
# Ensure that the transaction can occur
if not self.validate_transaction(asset, quantity, short=True):
return
# Creating the short position to be covered
position = Position(asset, quantity, short=True)
required = -1 * position.value
if required > self._cash:
raise ValueError(
f"Covering {quantity} short sold units of " # type: ignore[attr-defined]
f"{asset.name} {asset.type} requires "
f"${required}, but this portfolio only "
f"has ${self.cash} in reserve"
)
# Covering the position if the current short position is large
# enough to satisfy the quantity to cover
try:
current = self.positions[position.key]
if current.quantity >= quantity:
self.cash += Cash.from_position(position)
self.positions[position.key] -= position
else:
raise ValueError(
"Portfolio has insufficient short " # type: ignore[attr-defined]
f"position in asset {asset.name} "
f"{asset.type} to cover {quantity} "
f"units. Only {current.quantity} units"
" have been sold short"
)
# We can only cover positions that we have shorts in
except KeyError as e:
raise ValueError(
"Portfolio has no short position in " # type: ignore[attr-defined]
f"asset {asset.name} {asset.type}"
)
# Updating the portfolio's history to reflect short cover
self.update_positions()
self.update_history()
def _history(self) -> pd.DataFrame:
"""
A history of this portfolio's positions and value
Returns a datetime indexed pandas dataframe of this portfolio's
positions and total market value. This function is only used
to initialize it
Returns:
Portfolio position/value history
"""
positions = [position.view() for position in self.positions.values()]
return pd.DataFrame(
[[positions, self.value]],
columns=["POSITIONS", "VALUE"],
index=pd.DatetimeIndex([self.date], name="DATE"),
)
def update_history(self) -> None:
"""
updates this portfolio's history
TODO: Should be a private method
"""
positions = [position.view() for position in self.positions.values()]
data = np.array([positions, self.value], dtype="object")
self.history.at[self.date] = data
def reset(self) -> None:
"""Restarts the portfolio's value history"""
self.history = self._history()
def invest(self, n: float) -> None:
"""
Adds cash to this portfolio
Parameters:
n:
The quantity of cash (in the portfolio's base currency) to add
Returns:
TODO: Return the modified cash position
"""
self.cash += n
def liquidate(self, force: bool = False) -> None:
"""Sells all positions that are not the base currency/cash position"""
if not force:
for pos in self.longs.values():
self.sell(pos.asset, pos.quantity)
for pos in self.shorts.values():
self.cover(pos.asset, pos.quantity)
else:
for pos in self.longs.values():
if isinstance(pos.asset, Currency) and pos.asset.is_base:
pass
else:
self.cash += Cash.from_position(pos)
pos.quantity = 0
for pos in self.shorts.values():
self.cash += Cash.from_position(pos)
pos.quantity = 0
self.update_positions()
def get_position(
self,
asset: Asset,
short: bool = False,
positions: Optional[Iterable[Position]] = None,
) -> Optional[Position]:
"""
Gets this portfolio's current position in the given asset
Parameters:
asset:
The asset being searched
short:
Is the position short or long
positions:
The positions to be searched
Returns:
A position in the asset if found, otherwise None
"""
if positions is None:
positions = self.shorts.values() if short else self.longs.values()
result = [pos for pos in positions if pos.asset is asset]
if result:
return result[0]
return None
def get_related_positions(
self,
asset: Asset,
short: bool = False,
positions: Optional[Iterable[Position]] = None,
) -> list[Position]:
"""
Gets this portfolio's current positions in the given asset or related
to the asset
Parameters:
asset:
The asset being searched
short:
Is the position short or long
positions:
The positions to be searched
Returns:
A list of positions that are related to the asset
"""
if positions is None:
positions = self.shorts.values() if short else self.longs.values()
result = [
pos
for pos in positions
if pos.asset is asset or asset in pos.asset.__dict__.values()
]
return result
def covered_call(
self,
call: Call,
quantity: Optional[float] = None,
limit: Optional[float] = None,
) -> int:
"""
Sells calls in the given quantity, but ensures that they are 'covered'
by owning stock equal to the number of shares accounted for by the
contracts in the case of assignment
Parameters:
call:
The call to be sold short
quantity:
The quantity to sell
limit:
Specifies a maximum amount to sell when quantity is not
specified
Returns:
The number of calls sold short
"""
# Getting the current number of owned shares
current: Any = self.get_position(call.underlying)
current = current.quantity if current is not None else 0
# Getting the quantity of shares currently accounted for by calls that
# have already been sold short (Number of outstanding short sold calls
# * 100 shares per contract)
sold_short: Any = (
sum(
pos.quantity
for pos in self.get_related_positions(
call.underlying, short=True, positions=self.call.values()
)
)
* 100
)
# The number of shares that are currently available to stand as
# collateral for a new covered call (shares owned that are not
# currently acting as collateral)
available = current - sold_short
# Determining the maximum quantity of calls that we can sell
if quantity is None:
# If no sale quantity is given, we find the highest possible
# multiple of 100 based on current liquidity, bounded to 0 (In case
# of negative liquidity)
quantity = max(
math.floor((available + (self.liquid / call.underlying.value)) / 100)
* 100,
0,
)
else:
# Turn call quantity into share quantity
quantity *= 100
# Shares need to be bought
if available < quantity:
can_purchase = math.floor(self.liquid / call.underlying.value)
purchase_quantity = quantity - available
# We lack the necessary liquidity to cover the sale
if can_purchase < purchase_quantity:
raise ValueError(
f"Not enough liquidity to purchase {purchase_quantity} "
"shares of the underyling to cover the short sale of "
f"{int(quantity / 100)} {call.name} contracts (requires "
f"{self.base_symbol}{round(purchase_quantity * call.underlying.value, 2)}"
f", only have {self.base_symbol}{self.liquid})"
)
# Making the purchase
self.buy(call.underlying, purchase_quantity)
# Returning quantity to number of contracts rather than shares
quantity = int(quantity / 100)
# Selling the calls
self.short(call, quantity)
return quantity
# Uses a protected keyword, so must be used set outside of the class
setattr(Portfolio, "type", types.portfolio)
setattr(types.portfolio, "c", Portfolio)
| [
"math.floor",
"pandas.DatetimeIndex",
"numpy.array",
"copy.deepcopy",
"copy.copy",
"typing.TypeVar"
] | [((672, 719), 'typing.TypeVar', 'TypeVar', (['"""Asset_T"""'], {'bound': 'Asset', 'covariant': '(True)'}), "('Asset_T', bound=Asset, covariant=True)\n", (679, 719), False, 'from typing import TYPE_CHECKING, Any, TypeVar, Union, Optional, Literal, Generic, Iterable\n'), ((3652, 3666), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (3660, 3666), False, 'from copy import copy, deepcopy\n'), ((5949, 5959), 'copy.copy', 'copy', (['self'], {}), '(self)\n', (5953, 5959), False, 'from copy import copy, deepcopy\n'), ((6448, 6458), 'copy.copy', 'copy', (['self'], {}), '(self)\n', (6452, 6458), False, 'from copy import copy, deepcopy\n'), ((12330, 12340), 'copy.copy', 'copy', (['self'], {}), '(self)\n', (12334, 12340), False, 'from copy import copy, deepcopy\n'), ((12749, 12759), 'copy.copy', 'copy', (['self'], {}), '(self)\n', (12753, 12759), False, 'from copy import copy, deepcopy\n'), ((32931, 32980), 'numpy.array', 'np.array', (['[positions, self.value]'], {'dtype': '"""object"""'}), "([positions, self.value], dtype='object')\n", (32939, 32980), True, 'import numpy as np\n'), ((38138, 38185), 'math.floor', 'math.floor', (['(self.liquid / call.underlying.value)'], {}), '(self.liquid / call.underlying.value)\n', (38148, 38185), False, 'import math\n'), ((10774, 10822), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['[self.asset.date]'], {'name': '"""DATE"""'}), "([self.asset.date], name='DATE')\n", (10790, 10822), True, 'import pandas as pd\n'), ((32638, 32680), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['[self.date]'], {'name': '"""DATE"""'}), "([self.date], name='DATE')\n", (32654, 32680), True, 'import pandas as pd\n'), ((37820, 37887), 'math.floor', 'math.floor', (['((available + self.liquid / call.underlying.value) / 100)'], {}), '((available + self.liquid / call.underlying.value) / 100)\n', (37830, 37887), False, 'import math\n')] |
import pytest
import gym
from gym import spaces
import numpy as np
from stable_baselines.common.env_checker import check_env
from stable_baselines.common.bit_flipping_env import BitFlippingEnv
from stable_baselines.common.identity_env import IdentityEnv, IdentityEnvBox
@pytest.mark.parametrize("env_id", ['CartPole-v0', 'Pendulum-v0', 'BreakoutNoFrameskip-v4'])
def test_env(env_id):
"""
Check that environmnent integrated in Gym pass the test.
:param env_id: (str)
"""
env = gym.make(env_id)
with pytest.warns(None) as record:
check_env(env)
# Pendulum-v0 will produce a warning because the action space is
# in [-2, 2] and not [-1, 1]
if env_id == 'Pendulum-v0':
assert len(record) == 1
else:
# The other environments must pass without warning
assert len(record) == 0
@pytest.mark.parametrize("env_class", [IdentityEnv, IdentityEnvBox, BitFlippingEnv])
def test_custom_envs(env_class):
env = env_class()
check_env(env)
@pytest.mark.parametrize("new_obs_space", [
# Small image
spaces.Box(low=0, high=255, shape=(32, 32, 3), dtype=np.uint8),
# Range not in [0, 255]
spaces.Box(low=0, high=1, shape=(64, 64, 3), dtype=np.uint8),
# Wrong dtype
spaces.Box(low=0, high=255, shape=(64, 64, 3), dtype=np.float32),
# Not an image, it should be a 1D vector
spaces.Box(low=-1, high=1, shape=(64, 3), dtype=np.float32),
# Tuple space is not supported by SB
spaces.Tuple([spaces.Discrete(5), spaces.Discrete(10)]),
# Dict space is not supported by SB when env is not a GoalEnv
spaces.Dict({"position": spaces.Discrete(5)}),
])
def test_non_default_spaces(new_obs_space):
env = gym.make('BreakoutNoFrameskip-v4')
env.observation_space = new_obs_space
# Patch methods to avoid errors
env.reset = new_obs_space.sample
def patched_step(_action):
return new_obs_space.sample(), 0.0, False, {}
env.step = patched_step
with pytest.warns(UserWarning):
check_env(env)
def check_reset_assert_error(env, new_reset_return):
"""
Helper to check that the error is caught.
:param env: (gym.Env)
:param new_reset_return: (Any)
"""
def wrong_reset():
return new_reset_return
# Patch the reset method with a wrong one
env.reset = wrong_reset
with pytest.raises(AssertionError):
check_env(env)
def test_common_failures_reset():
"""
Test that common failure cases of the `reset_method` are caught
"""
env = IdentityEnvBox()
# Return an observation that does not match the observation_space
check_reset_assert_error(env, np.ones((3,)))
# The observation is not a numpy array
check_reset_assert_error(env, 1)
# Return not only the observation
check_reset_assert_error(env, (env.observation_space.sample(), False))
def check_step_assert_error(env, new_step_return=()):
"""
Helper to check that the error is caught.
:param env: (gym.Env)
:param new_step_return: (tuple)
"""
def wrong_step(_action):
return new_step_return
# Patch the step method with a wrong one
env.step = wrong_step
with pytest.raises(AssertionError):
check_env(env)
def test_common_failures_step():
"""
Test that common failure cases of the `step` method are caught
"""
env = IdentityEnvBox()
# Wrong shape for the observation
check_step_assert_error(env, (np.ones((4,)), 1.0, False, {}))
# Obs is not a numpy array
check_step_assert_error(env, (1, 1.0, False, {}))
# Return a wrong reward
check_step_assert_error(env, (env.observation_space.sample(), np.ones(1), False, {}))
# Info dict is not returned
check_step_assert_error(env, (env.observation_space.sample(), 0.0, False))
# Done is not a boolean
check_step_assert_error(env, (env.observation_space.sample(), 0.0, 3.0, {}))
check_step_assert_error(env, (env.observation_space.sample(), 0.0, 1, {}))
| [
"numpy.ones",
"stable_baselines.common.env_checker.check_env",
"gym.spaces.Discrete",
"gym.spaces.Box",
"pytest.mark.parametrize",
"pytest.raises",
"stable_baselines.common.identity_env.IdentityEnvBox",
"gym.make",
"pytest.warns"
] | [((274, 369), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""env_id"""', "['CartPole-v0', 'Pendulum-v0', 'BreakoutNoFrameskip-v4']"], {}), "('env_id', ['CartPole-v0', 'Pendulum-v0',\n 'BreakoutNoFrameskip-v4'])\n", (297, 369), False, 'import pytest\n'), ((851, 938), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""env_class"""', '[IdentityEnv, IdentityEnvBox, BitFlippingEnv]'], {}), "('env_class', [IdentityEnv, IdentityEnvBox,\n BitFlippingEnv])\n", (874, 938), False, 'import pytest\n'), ((501, 517), 'gym.make', 'gym.make', (['env_id'], {}), '(env_id)\n', (509, 517), False, 'import gym\n'), ((994, 1008), 'stable_baselines.common.env_checker.check_env', 'check_env', (['env'], {}), '(env)\n', (1003, 1008), False, 'from stable_baselines.common.env_checker import check_env\n'), ((1709, 1743), 'gym.make', 'gym.make', (['"""BreakoutNoFrameskip-v4"""'], {}), "('BreakoutNoFrameskip-v4')\n", (1717, 1743), False, 'import gym\n'), ((2535, 2551), 'stable_baselines.common.identity_env.IdentityEnvBox', 'IdentityEnvBox', ([], {}), '()\n', (2549, 2551), False, 'from stable_baselines.common.identity_env import IdentityEnv, IdentityEnvBox\n'), ((3369, 3385), 'stable_baselines.common.identity_env.IdentityEnvBox', 'IdentityEnvBox', ([], {}), '()\n', (3383, 3385), False, 'from stable_baselines.common.identity_env import IdentityEnv, IdentityEnvBox\n'), ((527, 545), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (539, 545), False, 'import pytest\n'), ((565, 579), 'stable_baselines.common.env_checker.check_env', 'check_env', (['env'], {}), '(env)\n', (574, 579), False, 'from stable_baselines.common.env_checker import check_env\n'), ((1983, 2008), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (1995, 2008), False, 'import pytest\n'), ((2018, 2032), 'stable_baselines.common.env_checker.check_env', 'check_env', (['env'], {}), '(env)\n', (2027, 2032), False, 'from stable_baselines.common.env_checker import check_env\n'), ((1077, 1139), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(32, 32, 3)', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=(32, 32, 3), dtype=np.uint8)\n', (1087, 1139), False, 'from gym import spaces\n'), ((1173, 1233), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(1)', 'shape': '(64, 64, 3)', 'dtype': 'np.uint8'}), '(low=0, high=1, shape=(64, 64, 3), dtype=np.uint8)\n', (1183, 1233), False, 'from gym import spaces\n'), ((1257, 1321), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(64, 64, 3)', 'dtype': 'np.float32'}), '(low=0, high=255, shape=(64, 64, 3), dtype=np.float32)\n', (1267, 1321), False, 'from gym import spaces\n'), ((1372, 1431), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-1)', 'high': '(1)', 'shape': '(64, 3)', 'dtype': 'np.float32'}), '(low=-1, high=1, shape=(64, 3), dtype=np.float32)\n', (1382, 1431), False, 'from gym import spaces\n'), ((2351, 2380), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2364, 2380), False, 'import pytest\n'), ((2390, 2404), 'stable_baselines.common.env_checker.check_env', 'check_env', (['env'], {}), '(env)\n', (2399, 2404), False, 'from stable_baselines.common.env_checker import check_env\n'), ((2656, 2669), 'numpy.ones', 'np.ones', (['(3,)'], {}), '((3,))\n', (2663, 2669), True, 'import numpy as np\n'), ((3187, 3216), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (3200, 3216), False, 'import pytest\n'), ((3226, 3240), 'stable_baselines.common.env_checker.check_env', 'check_env', (['env'], {}), '(env)\n', (3235, 3240), False, 'from stable_baselines.common.env_checker import check_env\n'), ((3459, 3472), 'numpy.ones', 'np.ones', (['(4,)'], {}), '((4,))\n', (3466, 3472), True, 'import numpy as np\n'), ((3671, 3681), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (3678, 3681), True, 'import numpy as np\n'), ((1492, 1510), 'gym.spaces.Discrete', 'spaces.Discrete', (['(5)'], {}), '(5)\n', (1507, 1510), False, 'from gym import spaces\n'), ((1512, 1531), 'gym.spaces.Discrete', 'spaces.Discrete', (['(10)'], {}), '(10)\n', (1527, 1531), False, 'from gym import spaces\n'), ((1630, 1648), 'gym.spaces.Discrete', 'spaces.Discrete', (['(5)'], {}), '(5)\n', (1645, 1648), False, 'from gym import spaces\n')] |
import numpy as np
import tvm
from tvm.contrib import graph_runtime
import topi
import nnvm.symbol as sym
import nnvm.compiler
from nnvm.testing.config import ctx_list
def helper(symbol, inputs, dtype,
np_forward, np_backward=None, need_input=True, need_head_grads=True):
ishapes = {}
input_syms = []
np_inputs = {}
for (name, shape, s) in inputs:
ishapes.update({name: shape})
np_inputs.update({name: np.random.uniform(size=shape).astype(dtype)})
input_syms.append(s)
for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(symbol, target, ishapes)
m = graph_runtime.create(graph, lib, ctx)
m.run(**np_inputs)
y_np = np_forward(**np_inputs)
out = m.get_output(0, tvm.nd.empty(y_np.shape, dtype))
np.testing.assert_allclose(out.asnumpy(), y_np, atol=1e-5, rtol=1e-5)
# backward
if np_backward:
graph._set_symbol_list_attr("grad_ys", symbol)
graph._set_symbol_list_attr("grad_xs", input_syms)
graph._set_symbol_list_attr("grad_ys_out_grad", sym.Variable("head_grads", shape=y_np.shape))
graph = graph.apply("Gradient")
ishapes.update({"head_grads": y_np.shape})
graph, lib, _ = nnvm.compiler.build(graph, target, ishapes)
m = graph_runtime.create(graph, lib, ctx)
head_grads = np.random.uniform(size=y_np.shape).astype(dtype)
y_np = np_backward(head_grads=head_grads, **np_inputs)
b_inputs = {}
if need_input:
b_inputs.update(np_inputs)
if need_head_grads:
b_inputs.update({"head_grads":head_grads})
m.run(**b_inputs)
for i in range(len(y_np)):
out = m.get_output(i, tvm.nd.empty(y_np[i].shape, dtype))
np.testing.assert_allclose(out.asnumpy(), y_np[i], atol=1e-5, rtol=1e-5)
def verify_transpose(dshape, axes):
x = sym.Variable("x")
if axes:
y = sym.transpose(x, axes=axes)
else:
y = sym.transpose(x)
y = y + 1
dtype = "float32"
for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape})
m = graph_runtime.create(graph, lib, ctx)
# set input
data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
m.run(x=data)
out_np = np.transpose(data.asnumpy(), axes=axes) + 1
out = m.get_output(0, tvm.nd.empty(out_np.shape))
np.testing.assert_allclose(out.asnumpy(), out_np, atol=1e-5, rtol=1e-5)
def verify_reduce(dshape, fnp, fsym, **kwargs):
x = sym.Variable("x")
y = fsym(x + 1, **kwargs)
dtype = "float32"
for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape})
m = graph_runtime.create(graph, lib, ctx)
# set input
data = np.random.uniform(size=dshape).astype(dtype)
out_np = fnp(data + 1, **kwargs)
m.run(x=data)
out = m.get_output(0, tvm.nd.empty(out_np.shape))
np.testing.assert_allclose(out.asnumpy(), out_np, atol=1e-5, rtol=1e-5)
def test_tranpose():
verify_transpose((2, 3, 4), (0, 2, 1))
verify_transpose((2, 3, 4), None)
def test_reduce():
verify_reduce((2, 3, 4), np.max, sym.max, axis=1, keepdims=True)
verify_reduce((4, 4, 3), np.min, sym.min, keepdims=True)
verify_reduce((4, 4, 3), np.sum, sym.sum, axis=(0, 2))
def verify_flip(ishape, axis):
x = sym.Variable("x")
y = sym.flip(x, axis=axis) + 1
dtype = "float32"
x_np = np.random.uniform(size=ishape).astype(dtype)
res = np.flip(x_np, axis) + 1
for target, ctx in ctx_list():
# set input
graph, lib, _ = nnvm.compiler.build(y, target, {"x": ishape})
m = graph_runtime.create(graph, lib, ctx)
m.run(x=x_np)
out = m.get_output(0, tvm.nd.empty(res.shape))
np.testing.assert_allclose(out.asnumpy(), res, atol=1e-5, rtol=1e-5)
def test_flip():
verify_flip((3, 4, 3), 1)
verify_flip((3, 4, 3), 0)
verify_flip((3, 4, 3), 2)
verify_flip((3, 4, 3), -1)
verify_flip((3, 4, 3), -3)
verify_flip((3, 4, 3), -2)
def verify_reshape(dshape, oshape):
x = sym.Variable("x")
y = sym.reshape(x, shape=oshape)
y = y + 1
dtype = "float32"
for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape})
m = graph_runtime.create(graph, lib, ctx)
# set input
data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
m.run(x=data)
out_np = data.asnumpy().reshape(oshape) + 1
out = m.get_output(0, tvm.nd.empty(out_np.shape))
np.testing.assert_allclose(out.asnumpy(), out_np, atol=1e-5, rtol=1e-5)
def test_reshape():
verify_reshape((2, 3, 4), (-1, 2, 1))
verify_reshape((2, 3, 4), (8, 3))
verify_reshape((4, 7), (2, 7, 2))
def test_clip():
x = sym.Variable("x")
a_min=0.2
a_max=0.75
y = sym.clip(x, a_min=a_min, a_max=a_max)
def forward(x):
return np.clip(x, a_min=a_min, a_max=a_max)
def backward(head_grads, x):
mask1 = np.greater_equal(x, a_min).astype("float")
mask2 = np.less_equal(x, a_max).astype("float")
return [head_grads * mask1 * mask2]
dtype = "float32"
inputs = [('x', (3, 4, 5), x)]
helper(y, inputs, dtype, forward, backward)
def test_greater():
l = sym.Variable("l")
r = sym.Variable("r")
y = sym.greater(l, r)
def forward(l, r):
return np.greater(l, r).astype("float32")
def backward(head_grads, l, r):
return [np.zeros_like(l)]
dtype = "float32"
inputs = [('l', (3, 4, 5), l),
('r', (3, 4, 5), r)]
helper(y, inputs, dtype, forward, backward, need_head_grads=False)
def test_less():
l = sym.Variable("l")
r = sym.Variable("r")
y = sym.less(l, r)
def forward(l, r):
return np.less(l, r).astype("float32")
def backward(head_grads, l, r):
return [np.zeros_like(l)]
dtype = "float32"
inputs = [('l', (3, 4, 5), l),
('r', (3, 4, 5), r)]
helper(y, inputs, dtype, forward, backward, need_head_grads=False)
def test_reshape_like():
x = sym.Variable("x")
y = sym.Variable("y")
z = sym.reshape_like(x, y)
def forward(x, y):
return np.reshape(x, y.shape)
def backward(head_grads, x, y):
return [np.reshape(head_grads, x.shape),
np.zeros_like(y)]
dtype = "float32"
inputs = [('x', (3, 4, 5), x),
('y', (5, 4, 3), y)]
helper(z, inputs, dtype, forward, backward)
def verify_expand_like(in_shape, out_shape, axis, exclude):
x = sym.Variable("x")
y = sym.Variable("y")
z = sym.expand_like(x, y, axis=axis, exclude=exclude)
def forward(x, y):
odim = len(out_shape)
real_axis = [i if i >= 0 else i + odim for i in axis]
real_axis = sorted(real_axis)
if exclude:
real_axis = list(set(range(odim)) - set(real_axis))
for i in real_axis:
x = np.expand_dims(x, i).astype(x.dtype)
for i in real_axis:
x = np.concatenate([x]*out_shape[i], axis=i).astype(x.dtype)
return x
def backward(head_grads, x, y):
odim = len(out_shape)
real_axis = [i if i >= 0 else i + odim for i in axis]
real_axis = sorted(real_axis)
if exclude:
real_axis = list(set(range(odim)) - set(real_axis))
return [np.sum(head_grads, axis=tuple(real_axis)),
np.zeros_like(y)]
dtype = "float32"
inputs = [('x', in_shape, x),
('y', out_shape, y)]
helper(z, inputs, dtype, forward, backward, need_input=False)
def test_expand_like():
verify_expand_like((3,), (3, 2), [1], False)
verify_expand_like((2,), (2, 3), [1], False)
verify_expand_like((3, 4), (3, 5, 4), [1], False)
verify_expand_like((5, 7), (5, 6, 7, 8), [0, 2], True)
def verify_elemwise_sum(num_args):
s = [sym.Variable("input" + str(i)) for i in range(num_args)]
y = sym.elemwise_sum(*s, num_args=num_args)
def forward(**inputs):
return np.sum(np.array(list(inputs.values())), axis=0)
def backward(head_grads, **inputs):
return [head_grads] * num_args
dtype = "float32"
inputs = [("input" + str(i), (3, 4, 5), s[i])
for i in range(num_args)]
helper(y, inputs, dtype, forward, backward, need_input=False)
def test_elemwise_sum():
verify_elemwise_sum(1)
verify_elemwise_sum(5)
verify_elemwise_sum(7)
def test_block_grad():
x = sym.Variable("x")
y = sym.block_grad(x)
def forward(x):
return x
def backward(head_grads, x):
return [np.zeros_like(head_grads)]
dtype = "float32"
inputs = [('x', (3, 4, 5), x)]
helper(y, inputs, dtype, forward, backward, need_head_grads=False)
def test_full():
shape = (3, 4, 5)
value = 7
dtype = "float32"
for target, ctx in ctx_list():
data = sym.Variable("data", dtype=dtype)
# full_like
s = sym.full_like(data=data, fill_value=value, name="s")
graph, lib, _ = nnvm.compiler.build(s, target, {"data": shape})
m = graph_runtime.create(graph, lib, ctx)
m.run(data=np.random.uniform(size=shape).astype(dtype))
out = m.get_output(0, tvm.nd.empty(shape, dtype=dtype))
np.testing.assert_allclose(
out.asnumpy(),
np.full(shape, fill_value=value, dtype=dtype),
atol=1e-5, rtol=1e-5)
# ones_like
s = sym.ones_like(data=data, fill_value=value, name="s")
graph, lib, _ = nnvm.compiler.build(s, target, {"data": shape})
m = graph_runtime.create(graph, lib, ctx)
m.run(data=np.random.uniform(size=shape).astype(dtype))
out = m.get_output(0, tvm.nd.empty(shape, dtype=dtype))
np.testing.assert_allclose(
out.asnumpy(),
np.full(shape, fill_value=1, dtype=dtype),
atol=1e-5, rtol=1e-5)
# zeros_like
s = sym.zeros_like(data=data, fill_value=value, name="s")
graph, lib, _ = nnvm.compiler.build(s, target, {"data": shape})
m = graph_runtime.create(graph, lib, ctx)
m.run(data=np.random.uniform(size=shape).astype(dtype))
out = m.get_output(0, tvm.nd.empty(shape, dtype=dtype))
np.testing.assert_allclose(
out.asnumpy(),
np.full(shape, fill_value=0, dtype=dtype),
atol=1e-5, rtol=1e-5)
# full
s = sym.full(shape=shape, dtype=dtype, fill_value=value, name="s")
graph, lib, _ = nnvm.compiler.build(s, target)
m = graph_runtime.create(graph, lib, ctx)
m.run()
out = m.get_output(0, tvm.nd.empty(shape, dtype=dtype))
np.testing.assert_allclose(
out.asnumpy(),
np.full(shape, fill_value=value, dtype=dtype),
atol=1e-5, rtol=1e-5)
# ones
s = sym.ones(shape=shape, dtype=dtype, name="s")
graph, lib, _ = nnvm.compiler.build(s, target)
m = graph_runtime.create(graph, lib, ctx)
m.run()
out = m.get_output(0, tvm.nd.empty(shape, dtype=dtype))
np.testing.assert_allclose(
out.asnumpy(),
np.full(shape, fill_value=1, dtype=dtype),
atol=1e-5, rtol=1e-5)
# zeros
s = sym.zeros(shape=shape, dtype=dtype, name="s")
graph, lib, _ = nnvm.compiler.build(s, target)
m = graph_runtime.create(graph, lib, ctx)
m.run()
out = m.get_output(0, tvm.nd.empty(shape, dtype=dtype))
np.testing.assert_allclose(
out.asnumpy(),
np.full(shape, fill_value=0, dtype=dtype),
atol=1e-5, rtol=1e-5)
if __name__ == "__main__":
test_reshape()
test_reduce()
test_tranpose()
test_clip()
test_greater()
test_less()
test_reshape_like()
test_expand_like()
test_elemwise_sum()
test_block_grad()
test_full()
test_flip()
print(nnvm.compiler.engine.dump())
| [
"numpy.clip",
"nnvm.symbol.full",
"numpy.less_equal",
"nnvm.symbol.reshape",
"numpy.greater_equal",
"numpy.flip",
"numpy.greater",
"nnvm.symbol.ones",
"numpy.reshape",
"numpy.less",
"tvm.contrib.graph_runtime.create",
"nnvm.symbol.zeros",
"nnvm.testing.config.ctx_list",
"nnvm.symbol.greate... | [((546, 556), 'nnvm.testing.config.ctx_list', 'ctx_list', ([], {}), '()\n', (554, 556), False, 'from nnvm.testing.config import ctx_list\n'), ((1986, 2003), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {}), "('x')\n", (1998, 2003), True, 'import nnvm.symbol as sym\n'), ((2155, 2165), 'nnvm.testing.config.ctx_list', 'ctx_list', ([], {}), '()\n', (2163, 2165), False, 'from nnvm.testing.config import ctx_list\n'), ((2660, 2677), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {}), "('x')\n", (2672, 2677), True, 'import nnvm.symbol as sym\n'), ((2753, 2763), 'nnvm.testing.config.ctx_list', 'ctx_list', ([], {}), '()\n', (2761, 2763), False, 'from nnvm.testing.config import ctx_list\n'), ((3520, 3537), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {}), "('x')\n", (3532, 3537), True, 'import nnvm.symbol as sym\n'), ((3709, 3719), 'nnvm.testing.config.ctx_list', 'ctx_list', ([], {}), '()\n', (3717, 3719), False, 'from nnvm.testing.config import ctx_list\n'), ((4261, 4278), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {}), "('x')\n", (4273, 4278), True, 'import nnvm.symbol as sym\n'), ((4287, 4315), 'nnvm.symbol.reshape', 'sym.reshape', (['x'], {'shape': 'oshape'}), '(x, shape=oshape)\n', (4298, 4315), True, 'import nnvm.symbol as sym\n'), ((4375, 4385), 'nnvm.testing.config.ctx_list', 'ctx_list', ([], {}), '()\n', (4383, 4385), False, 'from nnvm.testing.config import ctx_list\n'), ((4980, 4997), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {}), "('x')\n", (4992, 4997), True, 'import nnvm.symbol as sym\n'), ((5035, 5072), 'nnvm.symbol.clip', 'sym.clip', (['x'], {'a_min': 'a_min', 'a_max': 'a_max'}), '(x, a_min=a_min, a_max=a_max)\n', (5043, 5072), True, 'import nnvm.symbol as sym\n'), ((5476, 5493), 'nnvm.symbol.Variable', 'sym.Variable', (['"""l"""'], {}), "('l')\n", (5488, 5493), True, 'import nnvm.symbol as sym\n'), ((5502, 5519), 'nnvm.symbol.Variable', 'sym.Variable', (['"""r"""'], {}), "('r')\n", (5514, 5519), True, 'import nnvm.symbol as sym\n'), ((5528, 5545), 'nnvm.symbol.greater', 'sym.greater', (['l', 'r'], {}), '(l, r)\n', (5539, 5545), True, 'import nnvm.symbol as sym\n'), ((5883, 5900), 'nnvm.symbol.Variable', 'sym.Variable', (['"""l"""'], {}), "('l')\n", (5895, 5900), True, 'import nnvm.symbol as sym\n'), ((5909, 5926), 'nnvm.symbol.Variable', 'sym.Variable', (['"""r"""'], {}), "('r')\n", (5921, 5926), True, 'import nnvm.symbol as sym\n'), ((5935, 5949), 'nnvm.symbol.less', 'sym.less', (['l', 'r'], {}), '(l, r)\n', (5943, 5949), True, 'import nnvm.symbol as sym\n'), ((6292, 6309), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {}), "('x')\n", (6304, 6309), True, 'import nnvm.symbol as sym\n'), ((6318, 6335), 'nnvm.symbol.Variable', 'sym.Variable', (['"""y"""'], {}), "('y')\n", (6330, 6335), True, 'import nnvm.symbol as sym\n'), ((6344, 6366), 'nnvm.symbol.reshape_like', 'sym.reshape_like', (['x', 'y'], {}), '(x, y)\n', (6360, 6366), True, 'import nnvm.symbol as sym\n'), ((6761, 6778), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {}), "('x')\n", (6773, 6778), True, 'import nnvm.symbol as sym\n'), ((6787, 6804), 'nnvm.symbol.Variable', 'sym.Variable', (['"""y"""'], {}), "('y')\n", (6799, 6804), True, 'import nnvm.symbol as sym\n'), ((6813, 6862), 'nnvm.symbol.expand_like', 'sym.expand_like', (['x', 'y'], {'axis': 'axis', 'exclude': 'exclude'}), '(x, y, axis=axis, exclude=exclude)\n', (6828, 6862), True, 'import nnvm.symbol as sym\n'), ((8152, 8191), 'nnvm.symbol.elemwise_sum', 'sym.elemwise_sum', (['*s'], {'num_args': 'num_args'}), '(*s, num_args=num_args)\n', (8168, 8191), True, 'import nnvm.symbol as sym\n'), ((8683, 8700), 'nnvm.symbol.Variable', 'sym.Variable', (['"""x"""'], {}), "('x')\n", (8695, 8700), True, 'import nnvm.symbol as sym\n'), ((8709, 8726), 'nnvm.symbol.block_grad', 'sym.block_grad', (['x'], {}), '(x)\n', (8723, 8726), True, 'import nnvm.symbol as sym\n'), ((9072, 9082), 'nnvm.testing.config.ctx_list', 'ctx_list', ([], {}), '()\n', (9080, 9082), False, 'from nnvm.testing.config import ctx_list\n'), ((639, 676), 'tvm.contrib.graph_runtime.create', 'graph_runtime.create', (['graph', 'lib', 'ctx'], {}), '(graph, lib, ctx)\n', (659, 676), False, 'from tvm.contrib import graph_runtime\n'), ((2029, 2056), 'nnvm.symbol.transpose', 'sym.transpose', (['x'], {'axes': 'axes'}), '(x, axes=axes)\n', (2042, 2056), True, 'import nnvm.symbol as sym\n'), ((2079, 2095), 'nnvm.symbol.transpose', 'sym.transpose', (['x'], {}), '(x)\n', (2092, 2095), True, 'import nnvm.symbol as sym\n'), ((2249, 2286), 'tvm.contrib.graph_runtime.create', 'graph_runtime.create', (['graph', 'lib', 'ctx'], {}), '(graph, lib, ctx)\n', (2269, 2286), False, 'from tvm.contrib import graph_runtime\n'), ((2847, 2884), 'tvm.contrib.graph_runtime.create', 'graph_runtime.create', (['graph', 'lib', 'ctx'], {}), '(graph, lib, ctx)\n', (2867, 2884), False, 'from tvm.contrib import graph_runtime\n'), ((3546, 3568), 'nnvm.symbol.flip', 'sym.flip', (['x'], {'axis': 'axis'}), '(x, axis=axis)\n', (3554, 3568), True, 'import nnvm.symbol as sym\n'), ((3661, 3680), 'numpy.flip', 'np.flip', (['x_np', 'axis'], {}), '(x_np, axis)\n', (3668, 3680), True, 'import numpy as np\n'), ((3823, 3860), 'tvm.contrib.graph_runtime.create', 'graph_runtime.create', (['graph', 'lib', 'ctx'], {}), '(graph, lib, ctx)\n', (3843, 3860), False, 'from tvm.contrib import graph_runtime\n'), ((4469, 4506), 'tvm.contrib.graph_runtime.create', 'graph_runtime.create', (['graph', 'lib', 'ctx'], {}), '(graph, lib, ctx)\n', (4489, 4506), False, 'from tvm.contrib import graph_runtime\n'), ((5109, 5145), 'numpy.clip', 'np.clip', (['x'], {'a_min': 'a_min', 'a_max': 'a_max'}), '(x, a_min=a_min, a_max=a_max)\n', (5116, 5145), True, 'import numpy as np\n'), ((6406, 6428), 'numpy.reshape', 'np.reshape', (['x', 'y.shape'], {}), '(x, y.shape)\n', (6416, 6428), True, 'import numpy as np\n'), ((9099, 9132), 'nnvm.symbol.Variable', 'sym.Variable', (['"""data"""'], {'dtype': 'dtype'}), "('data', dtype=dtype)\n", (9111, 9132), True, 'import nnvm.symbol as sym\n'), ((9165, 9217), 'nnvm.symbol.full_like', 'sym.full_like', ([], {'data': 'data', 'fill_value': 'value', 'name': '"""s"""'}), "(data=data, fill_value=value, name='s')\n", (9178, 9217), True, 'import nnvm.symbol as sym\n'), ((9302, 9339), 'tvm.contrib.graph_runtime.create', 'graph_runtime.create', (['graph', 'lib', 'ctx'], {}), '(graph, lib, ctx)\n', (9322, 9339), False, 'from tvm.contrib import graph_runtime\n'), ((9656, 9708), 'nnvm.symbol.ones_like', 'sym.ones_like', ([], {'data': 'data', 'fill_value': 'value', 'name': '"""s"""'}), "(data=data, fill_value=value, name='s')\n", (9669, 9708), True, 'import nnvm.symbol as sym\n'), ((9793, 9830), 'tvm.contrib.graph_runtime.create', 'graph_runtime.create', (['graph', 'lib', 'ctx'], {}), '(graph, lib, ctx)\n', (9813, 9830), False, 'from tvm.contrib import graph_runtime\n'), ((10144, 10197), 'nnvm.symbol.zeros_like', 'sym.zeros_like', ([], {'data': 'data', 'fill_value': 'value', 'name': '"""s"""'}), "(data=data, fill_value=value, name='s')\n", (10158, 10197), True, 'import nnvm.symbol as sym\n'), ((10282, 10319), 'tvm.contrib.graph_runtime.create', 'graph_runtime.create', (['graph', 'lib', 'ctx'], {}), '(graph, lib, ctx)\n', (10302, 10319), False, 'from tvm.contrib import graph_runtime\n'), ((10627, 10689), 'nnvm.symbol.full', 'sym.full', ([], {'shape': 'shape', 'dtype': 'dtype', 'fill_value': 'value', 'name': '"""s"""'}), "(shape=shape, dtype=dtype, fill_value=value, name='s')\n", (10635, 10689), True, 'import nnvm.symbol as sym\n'), ((10757, 10794), 'tvm.contrib.graph_runtime.create', 'graph_runtime.create', (['graph', 'lib', 'ctx'], {}), '(graph, lib, ctx)\n', (10777, 10794), False, 'from tvm.contrib import graph_runtime\n'), ((11058, 11102), 'nnvm.symbol.ones', 'sym.ones', ([], {'shape': 'shape', 'dtype': 'dtype', 'name': '"""s"""'}), "(shape=shape, dtype=dtype, name='s')\n", (11066, 11102), True, 'import nnvm.symbol as sym\n'), ((11170, 11207), 'tvm.contrib.graph_runtime.create', 'graph_runtime.create', (['graph', 'lib', 'ctx'], {}), '(graph, lib, ctx)\n', (11190, 11207), False, 'from tvm.contrib import graph_runtime\n'), ((11468, 11513), 'nnvm.symbol.zeros', 'sym.zeros', ([], {'shape': 'shape', 'dtype': 'dtype', 'name': '"""s"""'}), "(shape=shape, dtype=dtype, name='s')\n", (11477, 11513), True, 'import nnvm.symbol as sym\n'), ((11581, 11618), 'tvm.contrib.graph_runtime.create', 'graph_runtime.create', (['graph', 'lib', 'ctx'], {}), '(graph, lib, ctx)\n', (11601, 11618), False, 'from tvm.contrib import graph_runtime\n'), ((773, 804), 'tvm.nd.empty', 'tvm.nd.empty', (['y_np.shape', 'dtype'], {}), '(y_np.shape, dtype)\n', (785, 804), False, 'import tvm\n'), ((1342, 1379), 'tvm.contrib.graph_runtime.create', 'graph_runtime.create', (['graph', 'lib', 'ctx'], {}), '(graph, lib, ctx)\n', (1362, 1379), False, 'from tvm.contrib import graph_runtime\n'), ((2494, 2520), 'tvm.nd.empty', 'tvm.nd.empty', (['out_np.shape'], {}), '(out_np.shape)\n', (2506, 2520), False, 'import tvm\n'), ((3058, 3084), 'tvm.nd.empty', 'tvm.nd.empty', (['out_np.shape'], {}), '(out_np.shape)\n', (3070, 3084), False, 'import tvm\n'), ((3606, 3636), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'ishape'}), '(size=ishape)\n', (3623, 3636), True, 'import numpy as np\n'), ((3913, 3936), 'tvm.nd.empty', 'tvm.nd.empty', (['res.shape'], {}), '(res.shape)\n', (3925, 3936), False, 'import tvm\n'), ((4705, 4731), 'tvm.nd.empty', 'tvm.nd.empty', (['out_np.shape'], {}), '(out_np.shape)\n', (4717, 4731), False, 'import tvm\n'), ((5673, 5689), 'numpy.zeros_like', 'np.zeros_like', (['l'], {}), '(l)\n', (5686, 5689), True, 'import numpy as np\n'), ((6074, 6090), 'numpy.zeros_like', 'np.zeros_like', (['l'], {}), '(l)\n', (6087, 6090), True, 'import numpy as np\n'), ((6482, 6513), 'numpy.reshape', 'np.reshape', (['head_grads', 'x.shape'], {}), '(head_grads, x.shape)\n', (6492, 6513), True, 'import numpy as np\n'), ((6531, 6547), 'numpy.zeros_like', 'np.zeros_like', (['y'], {}), '(y)\n', (6544, 6547), True, 'import numpy as np\n'), ((7627, 7643), 'numpy.zeros_like', 'np.zeros_like', (['y'], {}), '(y)\n', (7640, 7643), True, 'import numpy as np\n'), ((8815, 8840), 'numpy.zeros_like', 'np.zeros_like', (['head_grads'], {}), '(head_grads)\n', (8828, 8840), True, 'import numpy as np\n'), ((9434, 9466), 'tvm.nd.empty', 'tvm.nd.empty', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (9446, 9466), False, 'import tvm\n'), ((9543, 9588), 'numpy.full', 'np.full', (['shape'], {'fill_value': 'value', 'dtype': 'dtype'}), '(shape, fill_value=value, dtype=dtype)\n', (9550, 9588), True, 'import numpy as np\n'), ((9925, 9957), 'tvm.nd.empty', 'tvm.nd.empty', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (9937, 9957), False, 'import tvm\n'), ((10034, 10075), 'numpy.full', 'np.full', (['shape'], {'fill_value': '(1)', 'dtype': 'dtype'}), '(shape, fill_value=1, dtype=dtype)\n', (10041, 10075), True, 'import numpy as np\n'), ((10414, 10446), 'tvm.nd.empty', 'tvm.nd.empty', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (10426, 10446), False, 'import tvm\n'), ((10523, 10564), 'numpy.full', 'np.full', (['shape'], {'fill_value': '(0)', 'dtype': 'dtype'}), '(shape, fill_value=0, dtype=dtype)\n', (10530, 10564), True, 'import numpy as np\n'), ((10841, 10873), 'tvm.nd.empty', 'tvm.nd.empty', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (10853, 10873), False, 'import tvm\n'), ((10950, 10995), 'numpy.full', 'np.full', (['shape'], {'fill_value': 'value', 'dtype': 'dtype'}), '(shape, fill_value=value, dtype=dtype)\n', (10957, 10995), True, 'import numpy as np\n'), ((11254, 11286), 'tvm.nd.empty', 'tvm.nd.empty', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (11266, 11286), False, 'import tvm\n'), ((11363, 11404), 'numpy.full', 'np.full', (['shape'], {'fill_value': '(1)', 'dtype': 'dtype'}), '(shape, fill_value=1, dtype=dtype)\n', (11370, 11404), True, 'import numpy as np\n'), ((11665, 11697), 'tvm.nd.empty', 'tvm.nd.empty', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (11677, 11697), False, 'import tvm\n'), ((11774, 11815), 'numpy.full', 'np.full', (['shape'], {'fill_value': '(0)', 'dtype': 'dtype'}), '(shape, fill_value=0, dtype=dtype)\n', (11781, 11815), True, 'import numpy as np\n'), ((1109, 1153), 'nnvm.symbol.Variable', 'sym.Variable', (['"""head_grads"""'], {'shape': 'y_np.shape'}), "('head_grads', shape=y_np.shape)\n", (1121, 1153), True, 'import nnvm.symbol as sym\n'), ((2920, 2950), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'dshape'}), '(size=dshape)\n', (2937, 2950), True, 'import numpy as np\n'), ((5196, 5222), 'numpy.greater_equal', 'np.greater_equal', (['x', 'a_min'], {}), '(x, a_min)\n', (5212, 5222), True, 'import numpy as np\n'), ((5255, 5278), 'numpy.less_equal', 'np.less_equal', (['x', 'a_max'], {}), '(x, a_max)\n', (5268, 5278), True, 'import numpy as np\n'), ((5585, 5601), 'numpy.greater', 'np.greater', (['l', 'r'], {}), '(l, r)\n', (5595, 5601), True, 'import numpy as np\n'), ((5989, 6002), 'numpy.less', 'np.less', (['l', 'r'], {}), '(l, r)\n', (5996, 6002), True, 'import numpy as np\n'), ((1405, 1439), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'y_np.shape'}), '(size=y_np.shape)\n', (1422, 1439), True, 'import numpy as np\n'), ((1815, 1849), 'tvm.nd.empty', 'tvm.nd.empty', (['y_np[i].shape', 'dtype'], {}), '(y_np[i].shape, dtype)\n', (1827, 1849), False, 'import tvm\n'), ((2335, 2365), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'dshape'}), '(size=dshape)\n', (2352, 2365), True, 'import numpy as np\n'), ((4555, 4585), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'dshape'}), '(size=dshape)\n', (4572, 4585), True, 'import numpy as np\n'), ((7145, 7165), 'numpy.expand_dims', 'np.expand_dims', (['x', 'i'], {}), '(x, i)\n', (7159, 7165), True, 'import numpy as np\n'), ((7226, 7268), 'numpy.concatenate', 'np.concatenate', (['([x] * out_shape[i])'], {'axis': 'i'}), '([x] * out_shape[i], axis=i)\n', (7240, 7268), True, 'import numpy as np\n'), ((447, 476), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'shape'}), '(size=shape)\n', (464, 476), True, 'import numpy as np\n'), ((9359, 9388), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'shape'}), '(size=shape)\n', (9376, 9388), True, 'import numpy as np\n'), ((9850, 9879), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'shape'}), '(size=shape)\n', (9867, 9879), True, 'import numpy as np\n'), ((10339, 10368), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'shape'}), '(size=shape)\n', (10356, 10368), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Miscellaneous utility functions
"""
from __future__ import print_function, unicode_literals, division, absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import next, str
from future.utils import raise_from
import sys
import re
from collections import Iterator
import inspect
from distutils.version import LooseVersion
from textwrap import dedent
import numpy as np
def human_order_sorted(l):
"""Sorts string in human order (i.e. 'stat10' will go after 'stat2')"""
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
if isinstance(text, tuple):
text = text[0]
return [atoi(c) for c in re.split('(\d+)', text)]
return sorted(l, key=natural_keys)
def trim(docstring, marker=None):
if isinstance(docstring, bytes):
docstring = str(docstring, 'utf-8')
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
# replace existing REST marker with doc level marker
stripped = line.lstrip().strip().rstrip()
if marker is not None and stripped and \
all([s == stripped[0] for s in stripped]) and \
stripped[0] not in [':']:
line = line.replace(stripped[0], marker)
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def find_indices(condition):
"Return the indices where ravel(condition) is true"
res, = np.nonzero(np.ravel(condition))
return res
def is_container(item):
"""Checks if item is a container (list, tuple, dict, set)
Parameters
----------
item : object
object to check for .__iter__
Returns
-------
output : Boolean
True if container
False if not (eg string)
"""
if isinstance(item, str):
return False
elif hasattr(item, '__iter__'):
return True
else:
return False
def container_to_string(cont):
"""Convert a container to a command line string.
Elements of the container are joined with a space between them,
suitable for a command line parameter.
If the container `cont` is only a sequence, like a string and not a
container, it is returned unmodified.
Parameters
----------
cont : container
A container object like a list, tuple, dict, or a set.
Returns
-------
cont_str : string
Container elements joined into a string.
"""
if hasattr(cont, '__iter__') and not isinstance(cont, str):
cont = ' '.join(cont)
return str(cont)
# Dependency checks. Copied this from Nipy, with some modificiations
# (added app as a parameter).
def package_check(pkg_name, version=None, app=None, checker=LooseVersion,
exc_failed_import=ImportError,
exc_failed_check=RuntimeError):
"""Check that the minimal version of the required package is installed.
Parameters
----------
pkg_name : string
Name of the required package.
version : string, optional
Minimal version number for required package.
app : string, optional
Application that is performing the check. For instance, the
name of the tutorial being executed that depends on specific
packages. Default is *Nipype*.
checker : object, optional
The class that will perform the version checking. Default is
distutils.version.LooseVersion.
exc_failed_import : Exception, optional
Class of the exception to be thrown if import failed.
exc_failed_check : Exception, optional
Class of the exception to be thrown if version check failed.
Examples
--------
package_check('numpy', '1.3')
package_check('scipy', '0.7', 'tutorial1')
"""
if app:
msg = '%s requires %s' % (app, pkg_name)
else:
msg = 'Nipype requires %s' % pkg_name
if version:
msg += ' with version >= %s' % (version,)
try:
mod = __import__(pkg_name)
except ImportError as e:
raise_from(exc_failed_import(msg), e)
if not version:
return
try:
have_version = mod.__version__
except AttributeError as e:
raise_from(exc_failed_check('Cannot find version for %s' % pkg_name), e)
if checker(have_version) < checker(version):
raise exc_failed_check(msg)
def str2bool(v):
if isinstance(v, bool):
return v
lower = v.lower()
if lower in ("yes", "true", "t", "1"):
return True
elif lower in ("no", "false", "n", "f", "0"):
return False
else:
raise ValueError("%s cannot be converted to bool" % v)
def flatten(S):
if S == []:
return S
if isinstance(S[0], list):
return flatten(S[0]) + flatten(S[1:])
return S[:1] + flatten(S[1:])
def unflatten(in_list, prev_structure):
if not isinstance(in_list, Iterator):
in_list = iter(in_list)
if not isinstance(prev_structure, list):
return next(in_list)
else:
out = []
for item in prev_structure:
out.append(unflatten(in_list, item))
return out
def normalize_mc_params(params, source):
"""
Normalize a single row of motion parameters to the SPM format.
SPM saves motion parameters as:
x Right-Left (mm)
y Anterior-Posterior (mm)
z Superior-Inferior (mm)
rx Pitch (rad)
ry Yaw (rad)
rz Roll (rad)
"""
if source.upper() == 'FSL':
params = params[[3, 4, 5, 0, 1, 2]]
elif source.upper() in ('AFNI', 'FSFAST'):
params = params[np.asarray([4, 5, 3, 1, 2, 0]) + (len(params) > 6)]
params[3:] = params[3:] * np.pi / 180.
elif source.upper() == 'NIPY':
from nipy.algorithms.registration import to_matrix44, aff2euler
matrix = to_matrix44(params)
params = np.zeros(6)
params[:3] = matrix[:3, 3]
params[-1:2:-1] = aff2euler(matrix)
return params
| [
"re.split",
"nipy.algorithms.registration.aff2euler",
"builtins.next",
"nipy.algorithms.registration.to_matrix44",
"numpy.asarray",
"builtins.str",
"future.standard_library.install_aliases",
"numpy.zeros",
"numpy.ravel"
] | [((296, 330), 'future.standard_library.install_aliases', 'standard_library.install_aliases', ([], {}), '()\n', (328, 330), False, 'from future import standard_library\n'), ((3432, 3441), 'builtins.str', 'str', (['cont'], {}), '(cont)\n', (3435, 3441), False, 'from builtins import next, str\n'), ((1019, 1042), 'builtins.str', 'str', (['docstring', '"""utf-8"""'], {}), "(docstring, 'utf-8')\n", (1022, 1042), False, 'from builtins import next, str\n'), ((2333, 2352), 'numpy.ravel', 'np.ravel', (['condition'], {}), '(condition)\n', (2341, 2352), True, 'import numpy as np\n'), ((5865, 5878), 'builtins.next', 'next', (['in_list'], {}), '(in_list)\n', (5869, 5878), False, 'from builtins import next, str\n'), ((861, 885), 're.split', 're.split', (['"""(\\\\d+)"""', 'text'], {}), "('(\\\\d+)', text)\n", (869, 885), False, 'import re\n'), ((6768, 6787), 'nipy.algorithms.registration.to_matrix44', 'to_matrix44', (['params'], {}), '(params)\n', (6779, 6787), False, 'from nipy.algorithms.registration import to_matrix44, aff2euler\n'), ((6805, 6816), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (6813, 6816), True, 'import numpy as np\n'), ((6878, 6895), 'nipy.algorithms.registration.aff2euler', 'aff2euler', (['matrix'], {}), '(matrix)\n', (6887, 6895), False, 'from nipy.algorithms.registration import to_matrix44, aff2euler\n'), ((6545, 6575), 'numpy.asarray', 'np.asarray', (['[4, 5, 3, 1, 2, 0]'], {}), '([4, 5, 3, 1, 2, 0])\n', (6555, 6575), True, 'import numpy as np\n')] |
''' one_line_description
DESCRIPTION:
Insert a paragraph-long description of the module here.
FUNCTIONS:
This module contains the following (main) functions:
* fun_name : one_line_description
(only add user-facing ones)
'''
import numpy as np
import matplotlib as mpl
import matplotlib.colors
import matplotlib.collections
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from mpl_toolkits.axes_grid1 import make_axes_locatable
# ------------------------------------------------------------------------------
# Main Functions
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Helper Functions
# ------------------------------------------------------------------------------
def plot_mesh(verts, verts_elems, ax, mesh_kwargs = {}):
# Get default kwargs and update with provided mesh_kwargs
kwargs = {}
if 'edgecolor' not in mesh_kwargs:
kwargs.update({'edgecolor': 'k'})
if 'linewidth' not in mesh_kwargs:
kwargs.update({'linewidth': 0.2})
kwargs.update(mesh_kwargs)
# Plot mesh
pc = mpl.collections.PolyCollection(verts, **kwargs)
ax.add_collection(pc)
ax.axis('equal')
# Return axis handles and polycollection
return ax, pc
def plot_mesh_node_prop(elems, nodes, prop, units, fig, ax,
sc_kwargs = {}):
# Get vertices and elements in proper format
verts, verts_elems = get_verts(elems, nodes)
# TODO
def plot_mesh_elem_prop(elems, nodes, prop, fig, ax, units = None,
colors = False, mesh_kwargs = {}, cb_kwargs = {}):
''' Plots filled mesh with colots mapped to values of prop
Purpose
-------
Paragraph-long description of function purpose
State assumptions and limitations. Examples are great.
Parameters
----------
verts : list of list of touples
(see get_verts)
List, where each element is a list of four touples (x, y) that defines
the coordinates of an element in CCW order.
verts_elems : list of df series
(see get_verts)
Each element contains rows from elems dataframe, in the same order as
verts.
prop : string
Propery to be used for contour colors. Must be in verts_elems
ax : matplotlib axis handle
axis handle on which to plot the figure
kwargs : dict
key word argumentsfor polycollection (colormap, edgecolor, etc.)
Returns
-------
ax : matplotlib axis
Returns the same axis, but with mesh added
'''
# Get vertices and elements in proper format
verts, verts_elems = get_verts(elems, nodes)
# Make sure that the property exists in elems
if prop not in verts_elems[0].index.to_list():
msg = 'Error in plotting mesh of '+ prop + '\n'
msg+= 'the property does not exist in elems dataframe'
raise Exception(msg)
# Get values from "verts_elems"
vals = np.array([float(elem[prop]) for elem in verts_elems])
# Outline color schemes (either discrete or continuous color maps)
if colors:
# Make sure colors are in RBG
colors = [matplotlib.colors.to_rgba(c) for c in colors]
unique_vals = np.sort(np.unique(vals))
facecolors = []
for v in vals:
i = np.where(v == unique_vals)[0][0]
idx = int(i % len(colors))
facecolors += [colors[idx]]
mesh_kwargs.update({'facecolors' : facecolors})
else:
mesh_kwargs.update({'array' : vals})
ax, pc = plot_mesh(verts, verts_elems, ax, mesh_kwargs)
# Add colorbar
# TODO - fix this for discrete colors
kwargs = {'visible' : True, 'orientation' : 'horizontal',
'ticklocation' : 'bottom'}
kwargs.update(cb_kwargs)
if kwargs['visible']:
del kwargs['visible']
divider = make_axes_locatable(ax)
cax = divider.append_axes('bottom', size = '5%', pad = '2%')
cb = plt.colorbar(pc, cax = cax, **kwargs)
# Colorbar plotting options
if 'ticks' not in kwargs.keys():
cax.xaxis.set_major_locator(ticker.MaxNLocator())
cb.outline.set_visible(False)
if 'label' not in kwargs.keys():
if units:
cb.set_label(prop + ' (' + units + ')', fontsize = 7)
else:
cb.set_label(prop, fontsize = 7)
else:
cax = None
# Plotting options
ax.set_xbound(lower = np.min(nodes['x']), upper = np.max(nodes['x']))
ax.set_ybound(lower = np.min(nodes['y']), upper = np.max(nodes['y']))
# ax.axis('off')
ax.axis('equal')
return fig, ax, cax
def get_verts(elems, nodes):
''' Returns list with element vertices coordinates, and list of elems.
Purpose
-------
This function creates a list "verts", where each element is a list of four
touples that defines the corners of the element. It also returns a list
"verts_elems" where each row is an row of the elems dataframe, corresponding
to the "verts" order.
Parameters
----------
elems : dataframe
Information on elements. At a minimum must include:
['N1', 'N2', 'N3', 'N4']
nodes : dataframe
Contains node information. At a minimum, must include:
['x', 'y', 'node_n']
Returns
-------
verts : list of list of touples
List, where each element is a list of four touples (x, y) that defines
the coordinates of an element in CCW order.
verts_elems : list of df series
Each element contains rows from elems dataframe, in the same order as
verts.
'''
# Make nodes be index-eable by "node_n" just in case
nodes_idx = nodes.set_index('node_n')
# Create list of element vertices and element rows
verts = []
verts_elems = []
for _, elem in elems.iterrows():
elem_vert = []
for nx in ['N1', 'N2', 'N3', 'N4']:
n = int(elem[nx])
elem_vert += [(nodes_idx.loc[n, 'x'], nodes_idx.loc[n, 'y'])]
verts += [elem_vert]
verts_elems += [elem]
return(verts, verts_elems)
| [
"numpy.unique",
"matplotlib.collections.PolyCollection",
"numpy.where",
"matplotlib.pyplot.colorbar",
"numpy.max",
"matplotlib.ticker.MaxNLocator",
"mpl_toolkits.axes_grid1.make_axes_locatable",
"numpy.min"
] | [((1189, 1236), 'matplotlib.collections.PolyCollection', 'mpl.collections.PolyCollection', (['verts'], {}), '(verts, **kwargs)\n', (1219, 1236), True, 'import matplotlib as mpl\n'), ((4019, 4042), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (4038, 4042), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((4126, 4161), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['pc'], {'cax': 'cax'}), '(pc, cax=cax, **kwargs)\n', (4138, 4161), True, 'import matplotlib.pyplot as plt\n'), ((3376, 3391), 'numpy.unique', 'np.unique', (['vals'], {}), '(vals)\n', (3385, 3391), True, 'import numpy as np\n'), ((4643, 4661), 'numpy.min', 'np.min', (["nodes['x']"], {}), "(nodes['x'])\n", (4649, 4661), True, 'import numpy as np\n'), ((4671, 4689), 'numpy.max', 'np.max', (["nodes['x']"], {}), "(nodes['x'])\n", (4677, 4689), True, 'import numpy as np\n'), ((4717, 4735), 'numpy.min', 'np.min', (["nodes['y']"], {}), "(nodes['y'])\n", (4723, 4735), True, 'import numpy as np\n'), ((4745, 4763), 'numpy.max', 'np.max', (["nodes['y']"], {}), "(nodes['y'])\n", (4751, 4763), True, 'import numpy as np\n'), ((4298, 4318), 'matplotlib.ticker.MaxNLocator', 'ticker.MaxNLocator', ([], {}), '()\n', (4316, 4318), True, 'import matplotlib.ticker as ticker\n'), ((3457, 3483), 'numpy.where', 'np.where', (['(v == unique_vals)'], {}), '(v == unique_vals)\n', (3465, 3483), True, 'import numpy as np\n')] |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import, division, unicode_literals
import argparse
import logging
import os
import os.path as osp
import queue
import signal
import subprocess
import sys
import threading
import numpy as np
import sentencepiece_ja.sp
# Set PATHs
import torch
PATH_TO_SENTEVAL = '..'
PATH_TO_DATA = osp.join('..', 'data')
PATH_TO_SKIPTHOUGHT = ''
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
proc = None
sp = None
outq = queue.Queue()
embedding_prefix = 'Sequence embedding: '
def prepare(params, samples):
global proc, sp
# keep open the extractor as a subprocess and send requests for each batch
if proc is None:
proc = subprocess.Popen(args.extract_command, stdout=subprocess.PIPE, stdin=subprocess.PIPE, encoding='utf8',
shell=True, bufsize=1, preexec_fn=os.setsid)
t = threading.Thread(target=output_reader)
t.start()
if args.bpe_model and sp is None:
sp = sentencepiece_ja.sp.SentencePieceProcessorJa(args.bpe_model)
def end_process():
if proc is not None:
os.killpg(os.getpgid(proc.pid), signal.SIGINT)
def output_reader():
for line in proc.stdout:
if line.startswith(embedding_prefix):
outq.put(line)
def batcher(params, batch):
for sentence_tokens in batch:
if sp:
if args.language == 'en':
sentence = ' '.join(sentence_tokens)
elif args.language == 'ja':
sentence = ''.join(sentence_tokens)
sentence_tokens = sp.encode_line(sentence)
if len(sentence_tokens) > args.max_tokens:
sentence_tokens = sentence_tokens[:args.max_tokens]
sentence = ' '.join(sentence_tokens)
proc.stdin.write(sentence + '\n')
embeddings = []
global count
for i in range(len(batch)):
line = outq.get(timeout=60)
embeddings.append(list(map(float, line[len(embedding_prefix):].rstrip().split(' '))))
return np.array(embeddings)
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--extract-command', required=True,
help='Command to start the extractor that reads sentences from stdin '
'and prints embeddings to stdout')
parser.add_argument('--bpe-model', help='Sentencepiece BPE to encode sentences before piping them to the command')
parser.add_argument('--max-tokens', default=1000, type=int, help='Truncates sentences longer than this many tokens')
parser.add_argument('--tasks', default=['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',
'Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion', 'MASC', 'BEAN'], nargs='+')
parser.add_argument('--kfold', type=int, default=10)
parser.add_argument('--language', choices=['en', 'ja'], default='en')
parser.add_argument('--usepytorch', action='store_true', default=False)
parser.add_argument('--noreg', action='store_true', default=False)
args = parser.parse_args()
# Set params for SentEval
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': args.usepytorch, 'kfold': args.kfold, 'batch_size': 512, 'noreg': args.noreg,
'classifier': {'nhid': 0, 'optim': 'adam', 'batch_size': 64,
'tenacity': 5, 'epoch_size': 4}}
try:
se = senteval.engine.SE(params_senteval, batcher, prepare)
results = se.eval(args.tasks)
print(results)
except Exception as e:
raise e
finally:
end_process()
| [
"logging.basicConfig",
"os.getpgid",
"sys.path.insert",
"senteval.engine.SE",
"argparse.ArgumentParser",
"subprocess.Popen",
"os.path.join",
"numpy.array",
"threading.Thread",
"queue.Queue"
] | [((510, 532), 'os.path.join', 'osp.join', (['""".."""', '"""data"""'], {}), "('..', 'data')\n", (518, 532), True, 'import os.path as osp\n'), ((559, 595), 'sys.path.insert', 'sys.path.insert', (['(0)', 'PATH_TO_SENTEVAL'], {}), '(0, PATH_TO_SENTEVAL)\n', (574, 595), False, 'import sys\n'), ((642, 655), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (653, 655), False, 'import queue\n'), ((2227, 2303), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s : %(message)s"""', 'level': 'logging.DEBUG'}), "(format='%(asctime)s : %(message)s', level=logging.DEBUG)\n", (2246, 2303), False, 'import logging\n'), ((2188, 2208), 'numpy.array', 'np.array', (['embeddings'], {}), '(embeddings)\n', (2196, 2208), True, 'import numpy as np\n'), ((2345, 2370), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2368, 2370), False, 'import argparse\n'), ((867, 1024), 'subprocess.Popen', 'subprocess.Popen', (['args.extract_command'], {'stdout': 'subprocess.PIPE', 'stdin': 'subprocess.PIPE', 'encoding': '"""utf8"""', 'shell': '(True)', 'bufsize': '(1)', 'preexec_fn': 'os.setsid'}), "(args.extract_command, stdout=subprocess.PIPE, stdin=\n subprocess.PIPE, encoding='utf8', shell=True, bufsize=1, preexec_fn=os.\n setsid)\n", (883, 1024), False, 'import subprocess\n'), ((1060, 1098), 'threading.Thread', 'threading.Thread', ([], {'target': 'output_reader'}), '(target=output_reader)\n', (1076, 1098), False, 'import threading\n'), ((4082, 4135), 'senteval.engine.SE', 'senteval.engine.SE', (['params_senteval', 'batcher', 'prepare'], {}), '(params_senteval, batcher, prepare)\n', (4100, 4135), False, 'import senteval\n'), ((1294, 1314), 'os.getpgid', 'os.getpgid', (['proc.pid'], {}), '(proc.pid)\n', (1304, 1314), False, 'import os\n')] |
"""Data Preprocessors."""
import numpy as np
from typing import Optional
class Normalization(object):
"""Rescale the data via a normalization.
Produce:
- Bring all values into the range [0, 1]
"""
def __call__(self, X, axis: int = 0):
min_x = np.amin(X, axis=axis)
max_x = np.amax(X, axis=axis)
return (X - min_x) / (max_x - min_x)
class Standardization(object):
"""Rescale the data via a standardization
Produce:
- mean(Xstandardized) = 0
- std(Xstandardized) = 1
"""
def __init__(self, mu: Optional[int] = None, sigma: Optional[int] = None):
self.mu = mu
self.sigma = sigma
def __call__(self, X, axis: int = 0):
if self.mu is None or self.sigma is None:
self.mu = np.mean(X, axis=axis)
self.sigma = np.std(X, axis=axis)
if not np.any(self.sigma):
self.sigma = np.ones_like(self.sigma)
return np.divide(X - self.mu, self.sigma)
| [
"numpy.mean",
"numpy.ones_like",
"numpy.amin",
"numpy.any",
"numpy.std",
"numpy.amax",
"numpy.divide"
] | [((277, 298), 'numpy.amin', 'np.amin', (['X'], {'axis': 'axis'}), '(X, axis=axis)\n', (284, 298), True, 'import numpy as np\n'), ((315, 336), 'numpy.amax', 'np.amax', (['X'], {'axis': 'axis'}), '(X, axis=axis)\n', (322, 336), True, 'import numpy as np\n'), ((963, 997), 'numpy.divide', 'np.divide', (['(X - self.mu)', 'self.sigma'], {}), '(X - self.mu, self.sigma)\n', (972, 997), True, 'import numpy as np\n'), ((785, 806), 'numpy.mean', 'np.mean', (['X'], {'axis': 'axis'}), '(X, axis=axis)\n', (792, 806), True, 'import numpy as np\n'), ((832, 852), 'numpy.std', 'np.std', (['X'], {'axis': 'axis'}), '(X, axis=axis)\n', (838, 852), True, 'import numpy as np\n'), ((873, 891), 'numpy.any', 'np.any', (['self.sigma'], {}), '(self.sigma)\n', (879, 891), True, 'import numpy as np\n'), ((922, 946), 'numpy.ones_like', 'np.ones_like', (['self.sigma'], {}), '(self.sigma)\n', (934, 946), True, 'import numpy as np\n')] |
import cv2
import numpy as np
from basic import imshow, rescale
def circular(img):
if img.shape[2] < 4:
img = np.concatenate(
[
img,
np.ones((img.shape[0], img.shape[1], 1), dtype = img.dtype) * 255
],
axis = 2,
)
mask = np.zeros_like(img[:, :, 3])
cv2.circle(mask, (int(img.shape[1]/2), int(img.shape[0]/2)), min(int(img.shape[1]/2), int(img.shape[0]/2)), color=(1, 1, 1), thickness=-1)
img[:, :, 3] = mask * img[:, :, 3]
return img
| [
"numpy.zeros_like",
"numpy.ones"
] | [((340, 367), 'numpy.zeros_like', 'np.zeros_like', (['img[:, :, 3]'], {}), '(img[:, :, 3])\n', (353, 367), True, 'import numpy as np\n'), ((204, 261), 'numpy.ones', 'np.ones', (['(img.shape[0], img.shape[1], 1)'], {'dtype': 'img.dtype'}), '((img.shape[0], img.shape[1], 1), dtype=img.dtype)\n', (211, 261), True, 'import numpy as np\n')] |
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from getdist import plots
mpl.use("Agg")
roots = ["mcmc"]
params = ["beta", "alpha100", "alpha143", "alpha217", "alpha353"]
g = plots.get_subplot_plotter(
chain_dir=os.path.join(os.getcwd(), "chains"),
analysis_settings={"ignore_rows": 0.3},
)
kwargs = dict(colors=["k"], lws=[1])
g.triangle_plot(roots, params, **kwargs, diag1d_kwargs=kwargs)
# Show Minami & Komatsu results https://arxiv.org/abs/2011.11254
eb_results = {
"beta": {"mean": 0.35, "std": 0.14},
"alpha100": {"mean": -0.28, "std": 0.13},
"alpha143": {"mean": +0.07, "std": 0.12},
"alpha217": {"mean": -0.07, "std": 0.11},
"alpha353": {"mean": -0.09, "std": 0.11},
}
from scipy.stats import norm
for i, param in enumerate(params):
ax = g.subplots[i, i]
xmin, xmax, ymin, ymax = ax.axis()
x = np.linspace(xmin, xmax, 100)
posterior = norm.pdf(x, eb_results[param]["mean"], eb_results[param]["std"])
ax.plot(x, posterior / np.max(posterior), color="tab:red")
# Fake legend
g.subplots[0, 0].plot([], [], color="tab:red", label="Minami & Komatsu")
g.subplots[0, 0].plot([], [], color="k", label="PSpipe")
g.subplots[0, 0].legend(loc="upper left", bbox_to_anchor=(1, 1))
# Add table on figure
table_results = r"""
\begin{tabular} { l c}
Parameter & 68\% limits\\
\hline
{\boldmath$\alpha_{100} $} & $-0.28\pm0.13 $\\
{\boldmath$\alpha_{143} $} & $0.07 \pm0.12 $\\
{\boldmath$\alpha_{217} $} & $-0.07\pm0.11 $\\
{\boldmath$\alpha_{353} $} & $-0.09\pm0.11 $\\
{\boldmath$\beta $} & $0.35 \pm0.14 $\\
\hline
\end{tabular}
"""
with mpl.rc_context(rc={"text.usetex": True}):
table = g.sample_analyser.mcsamples[roots[0]].getTable(limit=1, paramList=params)
kwargs = dict(size=15, ha="right")
g.subplots[0, 0].text(5, +0.0, "<NAME>" + table_results.replace("\n", ""), **kwargs)
g.subplots[0, 0].text(5, -1.0, "PSpipe" + table.tableTex().replace("\n", ""), **kwargs)
plt.savefig("EB_plot_chain_results.png")
| [
"matplotlib.pyplot.savefig",
"matplotlib.rc_context",
"matplotlib.use",
"os.getcwd",
"numpy.max",
"numpy.linspace",
"scipy.stats.norm.pdf"
] | [((114, 128), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (121, 128), True, 'import matplotlib as mpl\n'), ((2002, 2042), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""EB_plot_chain_results.png"""'], {}), "('EB_plot_chain_results.png')\n", (2013, 2042), True, 'import matplotlib.pyplot as plt\n'), ((891, 919), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(100)'], {}), '(xmin, xmax, 100)\n', (902, 919), True, 'import numpy as np\n'), ((936, 1000), 'scipy.stats.norm.pdf', 'norm.pdf', (['x', "eb_results[param]['mean']", "eb_results[param]['std']"], {}), "(x, eb_results[param]['mean'], eb_results[param]['std'])\n", (944, 1000), False, 'from scipy.stats import norm\n'), ((1653, 1693), 'matplotlib.rc_context', 'mpl.rc_context', ([], {'rc': "{'text.usetex': True}"}), "(rc={'text.usetex': True})\n", (1667, 1693), True, 'import matplotlib as mpl\n'), ((273, 284), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (282, 284), False, 'import os\n'), ((1028, 1045), 'numpy.max', 'np.max', (['posterior'], {}), '(posterior)\n', (1034, 1045), True, 'import numpy as np\n')] |
import numpy as np
f_src=open('nodes_list.txt','r')
# imported node information
nodes=[] # info: [{'node':'x', 'x':pos_x, 'z':pos_z}]
len_nodes=0 # amount
while(True):
linetmp=f_src.readline()
if(not linetmp):
break
line=linetmp.split('\t')
if(len(line)!=3):
break
nodes.append({'node':line[0],'x':int(line[1]),'z':int(line[2])})
len_nodes=len(nodes)
f_src.close()
print("Distance table:")
# n*n distance table: length=dist_table_full[index1][index2]
dist_table_full=np.full((len_nodes, len_nodes), -1, dtype=int)
# n*(n-1)/2 E dict: {'node1':index1, 'node2':index2, 'length':len}
dist_table_dict=[]
print("#\t",end ="")
for i in range(0,len_nodes):
if(i==len_nodes-1):
print(nodes[i]['node'],end ="\n")
else:
print(nodes[i]['node'],end ="\t")
for i in range(0,len_nodes):
print(nodes[i]['node'],end ="\t")
for j in range(0,len_nodes):
dist_table_full[i][j]=int(pow(pow(nodes[i]['x']-nodes[j]['x'],2)+pow(nodes[i]['z']-nodes[j]['z'],2),1/2))
if(i < j):
dist_table_dict.append({'node1':i, 'node2':j, 'length':dist_table_full[i][j]})
if(j==len_nodes-1):
print(dist_table_full[i][j],end ="\n")
else:
print(dist_table_full[i][j],end ="\t")
print()
## MST: Prim
# G = (V=nodes, E=dist_table_full)
prim_V_a=[] # target nodes collection
prim_V_b=[] # source nodes collection
prim_path=[] # generated path: {'node1':index, 'node2':index}
sum_length=0
for i in range(0,len_nodes):
prim_V_b.append({'index':i})
prim_V_a.append(prim_V_b.pop()) # init node
while(prim_V_b):
node_b=prim_V_b.pop()
connectedto_a_index=-1 # which a should b connected to
min_length=-1 # total minimum length
for node_a in prim_V_a:
target_len=dist_table_full[node_a['index']][node_b['index']]
if(target_len == -1):
continue # skip
if(min_length==-1):
min_length=target_len
connectedto_a_index=node_a['index']
else:
if(min_length > target_len):
min_length = target_len
connectedto_a_index=node_a['index']
if(connectedto_a_index == -1):
print("MST failed for node %d %s : no path valid"%(node_b['index'],nodes[node_b['index']]['node']))
else:
sum_length=sum_length+dist_table_full[connectedto_a_index][node_b['index']]
prim_V_a.append(node_b)
prim_path.append({'node1':min(connectedto_a_index,node_b['index']), 'node2':max(connectedto_a_index,node_b['index'])})
# MST length table
conn_table_mst=np.full((len(prim_V_a), len(prim_V_a)), 0, dtype=int)
for gen_path in prim_path:
conn_table_mst[gen_path['node1']][gen_path['node2']]=dist_table_full[gen_path['node1']][gen_path['node2']]
conn_table_mst[gen_path['node2']][gen_path['node1']]=dist_table_full[gen_path['node2']][gen_path['node1']]
# print table
print("MST table:")
print("#\t",end ="")
for i in range(0,len(prim_V_a)):
if(i==len(prim_V_a)-1):
print(nodes[prim_V_a[i]['index']]['node'],end ="\n")
else:
print(nodes[prim_V_a[i]['index']]['node'],end ="\t")
for i in range(0,len(prim_V_a)):
print(nodes[prim_V_a[i]['index']]['node'],end ="\t")
for j in range(0,len(prim_V_a)):
if(j==len(prim_V_a)-1):
print(conn_table_mst[prim_V_a[i]['index']][prim_V_a[j]['index']],end ="\n")
else:
print(conn_table_mst[prim_V_a[i]['index']][prim_V_a[j]['index']],end ="\t")
print("Total length: %d"%sum_length)
## MST: Prim
print()
print("MST paths:")
print("From\tTo\tLength")
for path in prim_path:
print("%s\t%s\t%d"%(nodes[path['node1']]['node'],nodes[path['node2']]['node'],conn_table_mst[path['node1']][path['node2']]))
print("%s\t%s\t%d"%(nodes[path['node2']]['node'],nodes[path['node1']]['node'],conn_table_mst[path['node2']][path['node1']]))
| [
"numpy.full"
] | [((490, 536), 'numpy.full', 'np.full', (['(len_nodes, len_nodes)', '(-1)'], {'dtype': 'int'}), '((len_nodes, len_nodes), -1, dtype=int)\n', (497, 536), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.autograd import Variable
from rl_agent.film_utils import ResidualBlock, FiLMedResBlock
from .gpu_utils import FloatTensor
class DRQNText(nn.Module):
def __init__(self, config, n_actions, state_dim, is_multi_objective):
super(DRQNText, self).__init__()
# General params
self.lr = config["learning_rate"]
self.discount_factor = config["discount_factor"]
self.is_multi_objective = is_multi_objective
self.state_dim = state_dim
# Resblock params
self.n_regular_block = config["n_regular_block"]
#self.n_modulated_block = config["n_modulated_block"]
self.resblock_dropout = config["resblock_dropout"]
# Head params
self.n_channel_head = config["head_channel"]
self.kernel_size_head = config["head_kernel"]
self.pool_kernel_size_head = config["head_pool_kernel"]
# If use attention : no head/pooling
self.use_attention = config['fusing_method_before_recurrent'] == 'attention'
self.use_film = config["use_film"]
# Text_encoding params
self.word_embedding_size = config['word_emb_size']
self.text_lstm_size = config['text_lstm_size']
# dimensions are (len_seq, vocab_size) for one sentence
self.vocab_size = self.state_dim['objective']
# LSTM Params
self.n_hidden_lstm = config['n_hidden_lstm_dyn']
self.lstm_dynamic_num_layer = config['lstm_dyn_n_layer']
self.input_shape = state_dim['env_state']
self.objective_shape = state_dim['objective']
self.n_channel_per_state = self.input_shape[0]
assert not self.use_film, "Not possible at the moment"
# if is_multi_objective and self.use_film:
# self.film_gen = TextFilmGen(config=config['film_gen_param_text'],
# n_block_to_modulate=self.n_modulated_block,
# n_features_per_block=self.n_channel_per_state,
# input_size=self.lstm_size)
self.n_actions = n_actions
self.regular_blocks = nn.ModuleList()
# self.modulated_blocks = nn.ModuleList()
# Create resblock, not modulated by FiLM
for regular_resblock_num in range(self.n_regular_block):
current_regular_resblock = ResidualBlock(in_dim=self.n_channel_per_state,
with_residual=True)
self.regular_blocks.append(current_regular_resblock)
# # Create FiLM-ed resblock
# for modulated_block_num in range(self.n_modulated_block):
# current_modulated_resblock = FiLMedResBlock(in_dim=self.n_channel_per_state,
# with_residual=True,
# with_batchnorm=False,
# with_cond=[True],
# dropout=self.resblock_dropout)
#
# self.modulated_blocks.append(current_modulated_resblock)
if self.word_embedding_size > 0:
self.word_embedding = nn.Embedding(self.vocab_size, self.word_embedding_size)
else:
# todo : one hot encoding
raise NotImplementedError("Word embedding is needed.")
self.text_objective_lstm = nn.LSTM(input_size=self.word_embedding_size,
hidden_size=self.text_lstm_size,
num_layers=1,
batch_first=True)
# head
if self.kernel_size_head != 0 and not self.use_attention:
self.head_conv = nn.Conv2d(in_channels=self.n_channel_per_state,
out_channels=self.n_channel_head,
kernel_size=self.kernel_size_head)
else:
self.head_conv = lambda x:x
vizfeat_shape_before_fuse = self.compute_conv_size()
vizfeat_n_feat_map = vizfeat_shape_before_fuse[1]
vizfeat_height = vizfeat_shape_before_fuse[2]
vizfeat_width = vizfeat_shape_before_fuse[3]
vizfeat_size_flatten_before_fuse = vizfeat_n_feat_map*vizfeat_height*vizfeat_width
# The mlp has to deal with the image features AND text features
# How do you fuse them ? (concatenation, dot-product, attention, don't use)
# todo : one function "choose_fuse" that return the size of fused and the true fusing fusion
if config['fusing_method_before_recurrent'] == 'concatenate':
lstm_input_size = self.text_lstm_size + vizfeat_size_flatten_before_fuse
self.fuse_before_lstm = self.concatenate_text_vision
elif config['fusing_method_before_recurrent'] == 'dot':
self.embedding_size_before_dot = config['embedding_size_before_dot']
self.visual_embedding_before_dot = nn.Linear(vizfeat_size_flatten_before_fuse, self.embedding_size_before_dot)
self.text_embedding_before_dot = nn.Linear(self.text_lstm_size, self.embedding_size_before_dot)
self.fuse_before_lstm = self.dot_product_text_vision
lstm_input_size = self.embedding_size_before_dot
elif config['fusing_method_before_recurrent'] == 'attention':
attention_input_size = self.text_lstm_size + vizfeat_n_feat_map
hidden_mlp_size = config['hidden_mlp_attention']
if hidden_mlp_size > 0:
hidden_layer_att = nn.Linear(attention_input_size, hidden_mlp_size)
relu = nn.ReLU()
self.attention_hidden = nn.Sequential(hidden_layer_att, relu)
else:
self.attention_hidden = lambda x:x
hidden_mlp_size = attention_input_size
self.attention_last = nn.Linear(hidden_mlp_size, 1)
self.fuse_before_lstm = self.compute_attention
# text concatenated with vision after visual_attention, so size = lstm_size + width*heigth
lstm_input_size = self.text_lstm_size + vizfeat_n_feat_map
elif config['fusing_method_before_recurrent'] == "no_fuse": # Usual Film method, the text is not used
# if Film no fuse, the size expected by the fc is the size of visual features, flattened
lstm_input_size = vizfeat_size_flatten_before_fuse
self.fuse_before_lstm = self.vectorize
else:
raise NotImplementedError("Wrong Fusion method : {}, can only be : concatenate, dot, attention or no_fuse, but need to be explicit".format(config['fusing_method_before_recurrent']))
self.lstm_cells = nn.ModuleList()
for cell in range(self.lstm_dynamic_num_layer):
self.lstm_cells.append(nn.LSTMCell(input_size=lstm_input_size, hidden_size= self.n_hidden_lstm))
# Todo : use layer norm ??
# Todo : fuse after lstm
self.final_fc = nn.Linear(in_features=self.n_hidden_lstm, out_features=self.n_actions)
optimizer = config['optimizer'].lower()
optim_config = [
{'params': self.get_all_params_except_film(), 'weight_decay': config['default_w_decay']}, # Default config
]
# if self.use_film:
# optim_config.append({'params': self.film_gen.parameters(), 'weight_decay': config['FiLM_decay']}) # Film gen parameters
# assert len([i for i in optim_config[1]['params']]) + len([i for i in optim_config[0]['params']]) == len([i for i in self.parameters()])
if optimizer == 'rmsprop':
self.optimizer = optim.RMSprop(optim_config, lr=self.lr)
elif optimizer == 'adam':
self.optimizer = optim.Adam(optim_config, lr=self.lr)
elif optimizer == "sgd":
self.optimizer = optim.SGD(optim_config, lr=self.lr)
else:
assert False, 'Optimizer not recognized'
self.currently_optimizing = False
self.last_h = Variable(torch.ones(1, self.n_hidden_lstm).type(FloatTensor))
self.last_c = Variable(torch.ones(1, self.n_hidden_lstm).type(FloatTensor))
def forward(self, x):
"""
One timestep forward in time
2 mode are available :
current_optimizing = True
or
current_optimizing = False
It only changes WHERE you store h and c (since you do one step of optimizing at every step of an episode
you want to remember the h used during the episode, and use another h during the optimization
"""
if self.currently_optimizing:
h, c = self.optimize_h, self.optimize_c
else:
h, c = self.last_h, self.last_c
x = self.forward_env_features(x)
for lstm_cell in self.lstm_cells:
h, c = lstm_cell(x, (h,c))
if self.currently_optimizing:
self.optimize_h, self.optimize_c = h, c
else:
self.last_h, self.last_c = h, c
q_values = self.final_fc(h)
return q_values
# def forward_multiple_step(self, batch_seq):
# """
# :param batch_seq: should be of size [Length_seq, Batch, FeatureMaps, Width, Height]
# :return: q_values for the last step
# """
#
# state_sequence_length = batch_seq.size(0)
#
# h = Variable(torch.ones_like(batch_seq[0]))
# c = Variable(torch.ones_like(batch_seq[0]))
#
# # todo : what if there are less step than 'state_sequence_length' ?
# # variable number of state_seq_length ?
# for step in range(state_sequence_length):
# x = batch_seq[step]
#
# x = self.forward_env_features(x)
#
# for lstm_cell in self.lstm_cells:
# h, c = lstm_cell(x, (h, c))
#
# q_values = self.final_fc(h)
# return q_values
def forward_env_features(self, x):
"""
Apply convolution and fusing to raw input x
:param x:
:return:
"""
x, text_objective = x['env_state'], x['objective']
embedded_objective = self.word_embedding(text_objective)
_, (text_state, _) = self.text_objective_lstm(embedded_objective)
# delete the 'sequence' dimension of the lstm, since we take only the last hidden_state
text_state = text_state.squeeze(0)
# if self.use_film:
# gammas, betas = self.film_gen.forward(text_state)
# else:
gammas, betas = None, None
x = self.compute_conv(x, gammas=gammas, betas=betas)
# fusing text and images
x = self.fuse_before_lstm(text=text_state, vision=x)
return x
def compute_conv_size(self):
# Don't convert it to cuda because the model is not yet on GPU (because you're still defining the model here ;)
tmp = Variable(torch.zeros(1, *self.input_shape))
return self.compute_conv(tmp).size()
def compute_conv(self, x, gammas=None, betas=None):
batch_size = x.size(0)
# if gammas is None:
# # Gammas = all ones
# # Betas = all zeros
# gammas = Variable(torch.ones(batch_size, self.n_channel_per_state * self.n_modulated_block).type_as(x.data))
# betas = Variable(torch.zeros_like(gammas.data).type_as(x.data))
for i,regular_resblock in enumerate(self.regular_blocks):
x = regular_resblock.forward(x)
# for i,modulated_resblock in enumerate(self.modulated_blocks):
# gamma_beta_id = slice(self.n_channel_per_state * i, self.n_channel_per_state * (i + 1))
# x = modulated_resblock.forward(x, gammas=gammas[:, gamma_beta_id], betas=betas[:, gamma_beta_id])
if not self.use_attention:
x = self.head_conv(x)
x = F.max_pool2d(x, kernel_size=self.pool_kernel_size_head)
return x
def optimize_mode(self, optimize, batch_size=None):
if optimize:
assert batch_size, "If you want to switch to optimize mode, you have to specify the batch size."
# todo : ones or zero ??
self.optimize_h, self.optimize_c = Variable(torch.ones(batch_size, self.n_hidden_lstm).type(FloatTensor)), Variable(torch.ones(batch_size, self.n_hidden_lstm).type(FloatTensor))
self.currently_optimizing = optimize
def get_all_params_except_film(self):
params = []
for name, param in self.named_parameters():
if "film_gen" not in name:
params.append(param)
return params
def concatenate_text_vision(self, text, vision):
vision = vision.view(vision.size(0), -1)
return torch.cat((vision, text), dim=1)
def dot_product_text_vision(self, text, vision):
vision = vision.view(vision.size(0), -1)
text = self.text_embedding_before_dot(text)
vision = self.visual_embedding_before_dot(vision)
return text * vision
def compute_attention(self, text, vision):
"""
:param text: lstm-encoded text. dim is (batch, hidden_lstm_size)
:param vision: cnn-encoded image. dim is (batch, n_feature_map, width, height)
:return: vision after visual attention is applied. dim is (batch, n_feature_map)
"""
n_feature_map = vision.size(1)
width = vision.size(2)
height = vision.size(3)
attention_weights_list = []
# compute attention for every pixel, compute the sum
for i in range(width):
for j in range(height):
current_pixel = vision[:, :, i, j]
assert current_pixel.dim() == 2
current_weight = self.attention_last(self.attention_hidden(torch.cat((text, current_pixel), dim=1)))
attention_weights_list.append(current_weight)
all_weigths = torch.cat(attention_weights_list, dim=1)
all_weigths = F.softmax(all_weigths, dim=1).unsqueeze(2)
vision = vision.view(-1, n_feature_map, height * width)
vision = torch.bmm(vision, all_weigths)
vision = vision.squeeze(2)
return self.concatenate_text_vision(text, vision)
def vectorize(self, text, vision):
return vision.view(vision.size(0), -1)
if __name__ == "__main__":
import numpy as np
from torch.autograd import Variable
config = {
"num_layer" : 2,
"optimizer" : "RMSprop",
"learning_rate" : 1e-3
}
x = dict()
obj_img = np.random.random((1,3,28,28))
img = np.random.random((1,15,28,28))
x['state'] = Variable(torch.FloatTensor(img))
x['objective'] = Variable(torch.FloatTensor(obj_img))
filmed_net = FilmedNet(config)
filmed_net.forward(x)
| [
"torch.nn.ReLU",
"torch.nn.Sequential",
"torch.bmm",
"torch.nn.functional.softmax",
"numpy.random.random",
"torch.nn.ModuleList",
"torch.nn.LSTM",
"rl_agent.film_utils.ResidualBlock",
"torch.optim.RMSprop",
"torch.nn.Embedding",
"torch.optim.SGD",
"torch.nn.functional.max_pool2d",
"torch.cat... | [((14602, 14634), 'numpy.random.random', 'np.random.random', (['(1, 3, 28, 28)'], {}), '((1, 3, 28, 28))\n', (14618, 14634), True, 'import numpy as np\n'), ((14642, 14675), 'numpy.random.random', 'np.random.random', (['(1, 15, 28, 28)'], {}), '((1, 15, 28, 28))\n', (14658, 14675), True, 'import numpy as np\n'), ((2231, 2246), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (2244, 2246), True, 'import torch.nn as nn\n'), ((3476, 3590), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'self.word_embedding_size', 'hidden_size': 'self.text_lstm_size', 'num_layers': '(1)', 'batch_first': '(True)'}), '(input_size=self.word_embedding_size, hidden_size=self.\n text_lstm_size, num_layers=1, batch_first=True)\n', (3483, 3590), True, 'import torch.nn as nn\n'), ((6799, 6814), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (6812, 6814), True, 'import torch.nn as nn\n'), ((7077, 7147), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'self.n_hidden_lstm', 'out_features': 'self.n_actions'}), '(in_features=self.n_hidden_lstm, out_features=self.n_actions)\n', (7086, 7147), True, 'import torch.nn as nn\n'), ((12804, 12836), 'torch.cat', 'torch.cat', (['(vision, text)'], {'dim': '(1)'}), '((vision, text), dim=1)\n', (12813, 12836), False, 'import torch\n'), ((13969, 14009), 'torch.cat', 'torch.cat', (['attention_weights_list'], {'dim': '(1)'}), '(attention_weights_list, dim=1)\n', (13978, 14009), False, 'import torch\n'), ((14157, 14187), 'torch.bmm', 'torch.bmm', (['vision', 'all_weigths'], {}), '(vision, all_weigths)\n', (14166, 14187), False, 'import torch\n'), ((14700, 14722), 'torch.FloatTensor', 'torch.FloatTensor', (['img'], {}), '(img)\n', (14717, 14722), False, 'import torch\n'), ((14754, 14780), 'torch.FloatTensor', 'torch.FloatTensor', (['obj_img'], {}), '(obj_img)\n', (14771, 14780), False, 'import torch\n'), ((2451, 2517), 'rl_agent.film_utils.ResidualBlock', 'ResidualBlock', ([], {'in_dim': 'self.n_channel_per_state', 'with_residual': '(True)'}), '(in_dim=self.n_channel_per_state, with_residual=True)\n', (2464, 2517), False, 'from rl_agent.film_utils import ResidualBlock, FiLMedResBlock\n'), ((3265, 3320), 'torch.nn.Embedding', 'nn.Embedding', (['self.vocab_size', 'self.word_embedding_size'], {}), '(self.vocab_size, self.word_embedding_size)\n', (3277, 3320), True, 'import torch.nn as nn\n'), ((3827, 3948), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'self.n_channel_per_state', 'out_channels': 'self.n_channel_head', 'kernel_size': 'self.kernel_size_head'}), '(in_channels=self.n_channel_per_state, out_channels=self.\n n_channel_head, kernel_size=self.kernel_size_head)\n', (3836, 3948), True, 'import torch.nn as nn\n'), ((7732, 7771), 'torch.optim.RMSprop', 'optim.RMSprop', (['optim_config'], {'lr': 'self.lr'}), '(optim_config, lr=self.lr)\n', (7745, 7771), False, 'from torch import optim\n'), ((10986, 11019), 'torch.zeros', 'torch.zeros', (['(1)', '*self.input_shape'], {}), '(1, *self.input_shape)\n', (10997, 11019), False, 'import torch\n'), ((11938, 11993), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x'], {'kernel_size': 'self.pool_kernel_size_head'}), '(x, kernel_size=self.pool_kernel_size_head)\n', (11950, 11993), True, 'import torch.nn.functional as F\n'), ((5060, 5135), 'torch.nn.Linear', 'nn.Linear', (['vizfeat_size_flatten_before_fuse', 'self.embedding_size_before_dot'], {}), '(vizfeat_size_flatten_before_fuse, self.embedding_size_before_dot)\n', (5069, 5135), True, 'import torch.nn as nn\n'), ((5181, 5243), 'torch.nn.Linear', 'nn.Linear', (['self.text_lstm_size', 'self.embedding_size_before_dot'], {}), '(self.text_lstm_size, self.embedding_size_before_dot)\n', (5190, 5243), True, 'import torch.nn as nn\n'), ((6906, 6977), 'torch.nn.LSTMCell', 'nn.LSTMCell', ([], {'input_size': 'lstm_input_size', 'hidden_size': 'self.n_hidden_lstm'}), '(input_size=lstm_input_size, hidden_size=self.n_hidden_lstm)\n', (6917, 6977), True, 'import torch.nn as nn\n'), ((7835, 7871), 'torch.optim.Adam', 'optim.Adam', (['optim_config'], {'lr': 'self.lr'}), '(optim_config, lr=self.lr)\n', (7845, 7871), False, 'from torch import optim\n'), ((14032, 14061), 'torch.nn.functional.softmax', 'F.softmax', (['all_weigths'], {'dim': '(1)'}), '(all_weigths, dim=1)\n', (14041, 14061), True, 'import torch.nn.functional as F\n'), ((5971, 6000), 'torch.nn.Linear', 'nn.Linear', (['hidden_mlp_size', '(1)'], {}), '(hidden_mlp_size, 1)\n', (5980, 6000), True, 'import torch.nn as nn\n'), ((7934, 7969), 'torch.optim.SGD', 'optim.SGD', (['optim_config'], {'lr': 'self.lr'}), '(optim_config, lr=self.lr)\n', (7943, 7969), False, 'from torch import optim\n'), ((8111, 8144), 'torch.ones', 'torch.ones', (['(1)', 'self.n_hidden_lstm'], {}), '(1, self.n_hidden_lstm)\n', (8121, 8144), False, 'import torch\n'), ((8195, 8228), 'torch.ones', 'torch.ones', (['(1)', 'self.n_hidden_lstm'], {}), '(1, self.n_hidden_lstm)\n', (8205, 8228), False, 'import torch\n'), ((5651, 5699), 'torch.nn.Linear', 'nn.Linear', (['attention_input_size', 'hidden_mlp_size'], {}), '(attention_input_size, hidden_mlp_size)\n', (5660, 5699), True, 'import torch.nn as nn\n'), ((5723, 5732), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5730, 5732), True, 'import torch.nn as nn\n'), ((5773, 5810), 'torch.nn.Sequential', 'nn.Sequential', (['hidden_layer_att', 'relu'], {}), '(hidden_layer_att, relu)\n', (5786, 5810), True, 'import torch.nn as nn\n'), ((13842, 13881), 'torch.cat', 'torch.cat', (['(text, current_pixel)'], {'dim': '(1)'}), '((text, current_pixel), dim=1)\n', (13851, 13881), False, 'import torch\n'), ((12292, 12334), 'torch.ones', 'torch.ones', (['batch_size', 'self.n_hidden_lstm'], {}), '(batch_size, self.n_hidden_lstm)\n', (12302, 12334), False, 'import torch\n'), ((12364, 12406), 'torch.ones', 'torch.ones', (['batch_size', 'self.n_hidden_lstm'], {}), '(batch_size, self.n_hidden_lstm)\n', (12374, 12406), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 12 13:33:45 2018
@author: tghosh
"""
import config
from dataloader.loader import Loader
from preprocessing.utils import Preprocess, remove_empty_docs
from dataloader.embeddings import GloVe
from model.cnn_document_model import DocumentModel, TrainingParameters
from keras.callbacks import ModelCheckpoint, EarlyStopping
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import SVC
train_params = TrainingParameters('imdb_transfer_tanh_activation',
model_file_path = config.MODEL_DIR+ '/imdb/transfer_model_10.hdf5',
model_hyper_parameters = config.MODEL_DIR+ '/imdb/transfer_model_10.json',
model_train_parameters = config.MODEL_DIR+ '/imdb/transfer_model_10_meta.json',
num_epochs=30,
batch_size=128)
#train_df = Loader.load_imdb_data(directory = 'train')
train_df = pd.read_csv(config.IMDB_DATA_CSV + '/movie_reviews_train.csv', encoding='ISO-8859-1')
print(train_df.shape)
# build TFIDF features on train reviews
tv = TfidfVectorizer(use_idf=True, min_df=0.00005, max_df=1.0, ngram_range=(1, 1), stop_words = 'english',
sublinear_tf=True)
tv_features = tv.fit_transform(train_df['review'].tolist())
#test_df = Loader.load_imdb_data(directory = 'test')
test_df = pd.read_csv(config.IMDB_DATA_CSV + '/movie_reviews_test.csv', encoding='ISO-8859-1')
print(test_df.shape)
corpus = train_df['review'].tolist()
target = train_df['sentiment'].tolist()
corpus, target = remove_empty_docs(corpus, target)
print(len(corpus))
preprocessor = Preprocess(corpus=corpus)
corpus_to_seq = preprocessor.fit()
#Take only 5% of data for training
train_df = train_df.sample(frac=0.05, random_state = train_params.seed)
corpus = train_df['review'].tolist()
target = train_df['sentiment'].tolist()
corpus_to_seq = preprocessor.transform(corpus)
test_corpus = test_df['review'].tolist()
test_target = test_df['sentiment'].tolist()
test_corpus, test_target = remove_empty_docs(test_corpus, test_target)
print(len(test_corpus))
test_corpus_to_seq = preprocessor.transform(test_corpus)
x_train = np.array(corpus_to_seq)
x_test = np.array(test_corpus_to_seq)
y_train = np.array(target)
y_test = np.array(test_target)
print(x_train.shape, y_train.shape)
glove=GloVe(50)
initial_embeddings = glove.get_embedding(preprocessor.word_index)
amazon_review_model = DocumentModel.load_model("C:/Users/tghosh/Work/Data Science/Transfer Learning/Chapter-7/models/amazonreviews/model_06.json")
amazon_review_model.load_model_weights("C:/Users/tghosh/Work/Data Science/Transfer Learning/Chapter-7/models/amazonreviews/model_06.hdf5")
learned_embeddings = amazon_review_model.get_classification_model().get_layer('imdb_embedding').get_weights()[0]
glove.update_embeddings(preprocessor.word_index , np.array(learned_embeddings), amazon_review_model.word_index)
initial_embeddings = glove.get_embedding(preprocessor.word_index)
imdb_model = DocumentModel(vocab_size=preprocessor.get_vocab_size(),
word_index = preprocessor.word_index,
num_sentences=Preprocess.NUM_SENTENCES,
embedding_weights=initial_embeddings,
embedding_regularizer_l2 = 0.0,
conv_activation = 'tanh',
train_embedding = True,
learn_word_conv = False,
learn_sent_conv = False,
hidden_dims=64,
input_dropout=0.1,
hidden_layer_kernel_regularizer=0.01,
final_layer_kernel_regularizer=0.01)
#transfer word & sentence conv filters
for l_name in ['word_conv','sentence_conv','hidden_0', 'final']:
imdb_model.get_classification_model()\
.get_layer(l_name).set_weights(weights=amazon_review_model
.get_classification_model()
.get_layer(l_name).get_weights())
from keras.optimizers import Adam
adam = Adam(lr=0.002)
imdb_model.get_classification_model()\
.compile(loss="binary_crossentropy", optimizer='rmsprop', metrics=["accuracy"])
checkpointer = ModelCheckpoint(filepath=train_params.model_file_path,
verbose=1,
save_best_only=True,
save_weights_only=True)
early_stop = EarlyStopping(patience=2)
imdb_model.get_classification_model().fit(x_train, y_train, batch_size=train_params.batch_size,
epochs=train_params.num_epochs,
verbose=2,validation_split=0.01,
callbacks=[checkpointer])
#imdb_model.load_model_weights(train_params.model_file_path)
imdb_model.get_classification_model().evaluate( x_test, y_test, batch_size=train_params.batch_size*10, verbose=2)
#imdb_model._save_model(train_params.model_hyper_parameters)
#train_params.save()
#learned_embeddings = imdb_model.get_classification_model().get_layer('imdb_embedding').get_weights()[0]
#embd_change = {}
#for word, i in preprocessor.word_index.items():
# embd_change[word] = np.linalg.norm(initial_embeddings[i]-learned_embeddings[i])
#embd_change = sorted(embd_change.items(), key=lambda x: x[1], reverse=True)
#embd_change[0:100]
#print(len(tv.get_feature_names()))
#tv_train_features = tv.transform(corpus)
#tv_test_features = tv.transform(test_corpus)
#
#clf = SVC(C=1,kernel='linear', random_state=1, gamma=0.01)
#svm=clf.fit(tv_train_features, target)
#preds_test = svm.predict(tv_test_features)
#
#from sklearn.metrics import classification_report,accuracy_score,confusion_matrix
#print(classification_report(y_test, preds_test))
#print(confusion_matrix(y_test, preds_test)) | [
"keras.optimizers.Adam",
"model.cnn_document_model.TrainingParameters",
"preprocessing.utils.remove_empty_docs",
"preprocessing.utils.Preprocess",
"model.cnn_document_model.DocumentModel.load_model",
"pandas.read_csv",
"keras.callbacks.ModelCheckpoint",
"numpy.array",
"sklearn.feature_extraction.tex... | [((515, 835), 'model.cnn_document_model.TrainingParameters', 'TrainingParameters', (['"""imdb_transfer_tanh_activation"""'], {'model_file_path': "(config.MODEL_DIR + '/imdb/transfer_model_10.hdf5')", 'model_hyper_parameters': "(config.MODEL_DIR + '/imdb/transfer_model_10.json')", 'model_train_parameters': "(config.MODEL_DIR + '/imdb/transfer_model_10_meta.json')", 'num_epochs': '(30)', 'batch_size': '(128)'}), "('imdb_transfer_tanh_activation', model_file_path=config.\n MODEL_DIR + '/imdb/transfer_model_10.hdf5', model_hyper_parameters=\n config.MODEL_DIR + '/imdb/transfer_model_10.json',\n model_train_parameters=config.MODEL_DIR +\n '/imdb/transfer_model_10_meta.json', num_epochs=30, batch_size=128)\n", (533, 835), False, 'from model.cnn_document_model import DocumentModel, TrainingParameters\n'), ((1060, 1150), 'pandas.read_csv', 'pd.read_csv', (["(config.IMDB_DATA_CSV + '/movie_reviews_train.csv')"], {'encoding': '"""ISO-8859-1"""'}), "(config.IMDB_DATA_CSV + '/movie_reviews_train.csv', encoding=\n 'ISO-8859-1')\n", (1071, 1150), True, 'import pandas as pd\n'), ((1213, 1333), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'use_idf': '(True)', 'min_df': '(5e-05)', 'max_df': '(1.0)', 'ngram_range': '(1, 1)', 'stop_words': '"""english"""', 'sublinear_tf': '(True)'}), "(use_idf=True, min_df=5e-05, max_df=1.0, ngram_range=(1, 1),\n stop_words='english', sublinear_tf=True)\n", (1228, 1333), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((1482, 1571), 'pandas.read_csv', 'pd.read_csv', (["(config.IMDB_DATA_CSV + '/movie_reviews_test.csv')"], {'encoding': '"""ISO-8859-1"""'}), "(config.IMDB_DATA_CSV + '/movie_reviews_test.csv', encoding=\n 'ISO-8859-1')\n", (1493, 1571), True, 'import pandas as pd\n'), ((1683, 1716), 'preprocessing.utils.remove_empty_docs', 'remove_empty_docs', (['corpus', 'target'], {}), '(corpus, target)\n', (1700, 1716), False, 'from preprocessing.utils import Preprocess, remove_empty_docs\n'), ((1752, 1777), 'preprocessing.utils.Preprocess', 'Preprocess', ([], {'corpus': 'corpus'}), '(corpus=corpus)\n', (1762, 1777), False, 'from preprocessing.utils import Preprocess, remove_empty_docs\n'), ((2158, 2201), 'preprocessing.utils.remove_empty_docs', 'remove_empty_docs', (['test_corpus', 'test_target'], {}), '(test_corpus, test_target)\n', (2175, 2201), False, 'from preprocessing.utils import Preprocess, remove_empty_docs\n'), ((2295, 2318), 'numpy.array', 'np.array', (['corpus_to_seq'], {}), '(corpus_to_seq)\n', (2303, 2318), True, 'import numpy as np\n'), ((2329, 2357), 'numpy.array', 'np.array', (['test_corpus_to_seq'], {}), '(test_corpus_to_seq)\n', (2337, 2357), True, 'import numpy as np\n'), ((2369, 2385), 'numpy.array', 'np.array', (['target'], {}), '(target)\n', (2377, 2385), True, 'import numpy as np\n'), ((2395, 2416), 'numpy.array', 'np.array', (['test_target'], {}), '(test_target)\n', (2403, 2416), True, 'import numpy as np\n'), ((2461, 2470), 'dataloader.embeddings.GloVe', 'GloVe', (['(50)'], {}), '(50)\n', (2466, 2470), False, 'from dataloader.embeddings import GloVe\n'), ((2560, 2694), 'model.cnn_document_model.DocumentModel.load_model', 'DocumentModel.load_model', (['"""C:/Users/tghosh/Work/Data Science/Transfer Learning/Chapter-7/models/amazonreviews/model_06.json"""'], {}), "(\n 'C:/Users/tghosh/Work/Data Science/Transfer Learning/Chapter-7/models/amazonreviews/model_06.json'\n )\n", (2584, 2694), False, 'from model.cnn_document_model import DocumentModel, TrainingParameters\n'), ((4444, 4458), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.002)'}), '(lr=0.002)\n', (4448, 4458), False, 'from keras.optimizers import Adam\n'), ((4613, 4727), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'train_params.model_file_path', 'verbose': '(1)', 'save_best_only': '(True)', 'save_weights_only': '(True)'}), '(filepath=train_params.model_file_path, verbose=1,\n save_best_only=True, save_weights_only=True)\n', (4628, 4727), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping\n'), ((4843, 4868), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': '(2)'}), '(patience=2)\n', (4856, 4868), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping\n'), ((2988, 3016), 'numpy.array', 'np.array', (['learned_embeddings'], {}), '(learned_embeddings)\n', (2996, 3016), True, 'import numpy as np\n')] |
"""
Multi-device matrix multiplication using parla with cupy as the kernel engine.
"""
import sys
import time
import numpy as np
import cupy as cp
from parla import Parla, get_all_devices
from parla.array import copy, clone_here
from parla.cpu import cpu
from parla.cuda import gpu
from parla.function_decorators import specialized
from parla.ldevice import LDeviceSequenceBlocked
from parla.tasks import spawn, TaskSpace, CompletedTaskSpace, reserve_persistent_memory
from parla.parray import asarray_batch
def main():
@spawn(placement=cpu)
async def main_task():
ngpus = cp.cuda.runtime.getDeviceCount()
repetitions = int(sys.argv[1])
# set up two n x n arrays to multiply together.
# n is chosen so that all three can be
# stored within the memory of a single GPU
# so that strong scaling numbers make sense.
n = 32000
blocks = ngpus
block_size = n // ngpus
h_ordr = 'C'
d_ordr = 'F'
print("BlockSize: ", block_size, "GPUS: ", ngpus)
# Overdecomposing doesn't actually seem to help in this case
# with the current parla runtime. This may be related to
# some weirdness within the scheduler though, so
# we can leave the code for blocks in-place for further
# testing later.
np.random.seed(0)
a_cpu = np.random.rand(n, n).astype(dtype=np.float32, order=h_ordr)
b_cpu = np.random.rand(n, n).astype(dtype=np.float32, order=h_ordr)
print("Finished Data Allocation", flush=True)
# Partition the two arrays and set up the
# partitioned array where the result will be stored.
# This could also be done using a parla mapper object.
a_part = []
b_part = []
c_part = []
distribute=True
reset=True
fixed_placement=False
verbose=False
sync=False
time_list = list()
# Start all operans from CPU memory.
for i in range(blocks):
if distribute:
with cp.cuda.Device(i):
a_part.append(cp.asarray(a_cpu[i * block_size : (i + 1) * block_size], order=d_ordr))
b_part.append(cp.asarray(b_cpu[i * block_size : (i + 1) * block_size], order=d_ordr))
cp.cuda.stream.get_current_stream().synchronize()
else:
a_part.append(a_cpu[i * block_size : (i + 1) * block_size])
b_part.append(b_cpu[i * block_size : (i + 1) * block_size])
for i in range(blocks):
c_part.append(list())
for j in range(blocks):
c_part[i].append(np.empty((0, 0), dtype=np.float32, order=h_ordr))
#print(len(c_part), len(c_part[0]), c_part[0][0].shape)
# 1. NEW: convert to parray in batch
a_part, b_part = asarray_batch(a_part, b_part)
c_part = asarray_batch(c_part)
#print(len(c_part), len(c_part[0]))
for repetition in range(repetitions):
#reset cblocks to None
for i in range(blocks):
for j in range(blocks):
c_part[i][j].update(np.empty((0, 0), dtype=np.float32, order=h_ordr))
if reset:
#reset coherence to only be in starting locations
rspace = TaskSpace("reset")
for i in range(blocks):
@spawn(rspace[i], placement=gpu(i%ngpus), memory=2*block_size*n, inout=[a_part[i], b_part[i]])
def reset_task():
a_part[i].update(a_part[i].array)
b_part[i].update(b_part[i].array)
await rspace
matmul = TaskSpace("matmul")
start = time.perf_counter()
for i in range(blocks):
for j in range(blocks):
a_block = a_part[i]
b_block = b_part[j]
c_block = c_part[i][j]
memsize = (block_size**2)*4
if fixed_placement:
loc = gpu(i%ngpus)
else:
loc = gpu
@spawn(matmul[i, j], placement = loc, memory=memsize, input=[a_block, b_block], output=[c_block])
def matmul_task():
a = a_block.array
b = b_block.array
c = c_block.array
stream = cp.cuda.get_current_stream()
stream.synchronize()
assert(a.device.id == b.device.id)
if verbose:
print(f"+({i}, {j}): ", a.shape, b.shape, c.shape, " | On Device: ", cp.cuda.runtime.getDevice(), a.device.id, flush=True)
local_start = time.perf_counter()
c = a @ b.T
if sync:
stream.synchronize()
local_end = time.perf_counter()
c_block.update(c)
c = c_block.array
if verbose:
print(f"-({i}, {j}): ", a.shape, b.shape, c.shape, " | Elapsed: ", local_end-local_start, flush=True)
await matmul
stop = time.perf_counter()
print(f"Iteration {repetition} | Time elapsed: ", stop - start, flush=True)
time_list.append(stop-start)
mean = np.mean(np.array(time_list))
median = np.median(np.array(time_list))
print(f"Execution Time:: Average = {mean} | Median = {median}", flush=True)
if __name__ == "__main__":
with Parla():
main()
| [
"cupy.cuda.Device",
"parla.cuda.gpu",
"numpy.random.rand",
"parla.tasks.TaskSpace",
"time.perf_counter",
"cupy.cuda.runtime.getDevice",
"numpy.array",
"parla.Parla",
"numpy.random.seed",
"numpy.empty",
"parla.tasks.spawn",
"cupy.cuda.runtime.getDeviceCount",
"cupy.cuda.stream.get_current_str... | [((530, 550), 'parla.tasks.spawn', 'spawn', ([], {'placement': 'cpu'}), '(placement=cpu)\n', (535, 550), False, 'from parla.tasks import spawn, TaskSpace, CompletedTaskSpace, reserve_persistent_memory\n'), ((594, 626), 'cupy.cuda.runtime.getDeviceCount', 'cp.cuda.runtime.getDeviceCount', ([], {}), '()\n', (624, 626), True, 'import cupy as cp\n'), ((1338, 1355), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1352, 1355), True, 'import numpy as np\n'), ((2860, 2889), 'parla.parray.asarray_batch', 'asarray_batch', (['a_part', 'b_part'], {}), '(a_part, b_part)\n', (2873, 2889), False, 'from parla.parray import asarray_batch\n'), ((2907, 2928), 'parla.parray.asarray_batch', 'asarray_batch', (['c_part'], {}), '(c_part)\n', (2920, 2928), False, 'from parla.parray import asarray_batch\n'), ((5702, 5709), 'parla.Parla', 'Parla', ([], {}), '()\n', (5707, 5709), False, 'from parla import Parla, get_all_devices\n'), ((3717, 3736), 'parla.tasks.TaskSpace', 'TaskSpace', (['"""matmul"""'], {}), "('matmul')\n", (3726, 3736), False, 'from parla.tasks import spawn, TaskSpace, CompletedTaskSpace, reserve_persistent_memory\n'), ((3757, 3776), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3774, 3776), False, 'import time\n'), ((5338, 5357), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5355, 5357), False, 'import time\n'), ((5511, 5530), 'numpy.array', 'np.array', (['time_list'], {}), '(time_list)\n', (5519, 5530), True, 'import numpy as np\n'), ((5559, 5578), 'numpy.array', 'np.array', (['time_list'], {}), '(time_list)\n', (5567, 5578), True, 'import numpy as np\n'), ((1372, 1392), 'numpy.random.rand', 'np.random.rand', (['n', 'n'], {}), '(n, n)\n', (1386, 1392), True, 'import numpy as np\n'), ((1448, 1468), 'numpy.random.rand', 'np.random.rand', (['n', 'n'], {}), '(n, n)\n', (1462, 1468), True, 'import numpy as np\n'), ((3338, 3356), 'parla.tasks.TaskSpace', 'TaskSpace', (['"""reset"""'], {}), "('reset')\n", (3347, 3356), False, 'from parla.tasks import spawn, TaskSpace, CompletedTaskSpace, reserve_persistent_memory\n'), ((2067, 2084), 'cupy.cuda.Device', 'cp.cuda.Device', (['i'], {}), '(i)\n', (2081, 2084), True, 'import cupy as cp\n'), ((2674, 2722), 'numpy.empty', 'np.empty', (['(0, 0)'], {'dtype': 'np.float32', 'order': 'h_ordr'}), '((0, 0), dtype=np.float32, order=h_ordr)\n', (2682, 2722), True, 'import numpy as np\n'), ((4191, 4289), 'parla.tasks.spawn', 'spawn', (['matmul[i, j]'], {'placement': 'loc', 'memory': 'memsize', 'input': '[a_block, b_block]', 'output': '[c_block]'}), '(matmul[i, j], placement=loc, memory=memsize, input=[a_block, b_block],\n output=[c_block])\n', (4196, 4289), False, 'from parla.tasks import spawn, TaskSpace, CompletedTaskSpace, reserve_persistent_memory\n'), ((2120, 2188), 'cupy.asarray', 'cp.asarray', (['a_cpu[i * block_size:(i + 1) * block_size]'], {'order': 'd_ordr'}), '(a_cpu[i * block_size:(i + 1) * block_size], order=d_ordr)\n', (2130, 2188), True, 'import cupy as cp\n'), ((2226, 2294), 'cupy.asarray', 'cp.asarray', (['b_cpu[i * block_size:(i + 1) * block_size]'], {'order': 'd_ordr'}), '(b_cpu[i * block_size:(i + 1) * block_size], order=d_ordr)\n', (2236, 2294), True, 'import cupy as cp\n'), ((3174, 3222), 'numpy.empty', 'np.empty', (['(0, 0)'], {'dtype': 'np.float32', 'order': 'h_ordr'}), '((0, 0), dtype=np.float32, order=h_ordr)\n', (3182, 3222), True, 'import numpy as np\n'), ((4096, 4110), 'parla.cuda.gpu', 'gpu', (['(i % ngpus)'], {}), '(i % ngpus)\n', (4099, 4110), False, 'from parla.cuda import gpu\n'), ((4487, 4515), 'cupy.cuda.get_current_stream', 'cp.cuda.get_current_stream', ([], {}), '()\n', (4513, 4515), True, 'import cupy as cp\n'), ((4846, 4865), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4863, 4865), False, 'import time\n'), ((5021, 5040), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5038, 5040), False, 'import time\n'), ((2318, 2353), 'cupy.cuda.stream.get_current_stream', 'cp.cuda.stream.get_current_stream', ([], {}), '()\n', (2351, 2353), True, 'import cupy as cp\n'), ((3445, 3459), 'parla.cuda.gpu', 'gpu', (['(i % ngpus)'], {}), '(i % ngpus)\n', (3448, 3459), False, 'from parla.cuda import gpu\n'), ((4754, 4781), 'cupy.cuda.runtime.getDevice', 'cp.cuda.runtime.getDevice', ([], {}), '()\n', (4779, 4781), True, 'import cupy as cp\n')] |
# core/operators/_affine.py
"""Classes for operators that depend affinely on external parameters, i.e.,
A(µ) = sum_{i=1}^{nterms} θ_{i}(µ) * A_{i}.
"""
__all__ = [
"AffineConstantOperator",
"AffineLinearOperator",
"AffineQuadraticOperator",
# AffineCrossQuadraticOperator",
"AffineCubicOperator",
]
import numpy as np
from ._base import _BaseParametricOperator
from ._nonparametric import (ConstantOperator,
LinearOperator,
QuadraticOperator,
# CrossQuadraticOperator,
CubicOperator)
class _AffineOperator(_BaseParametricOperator):
"""Base class for parametric operators with affine structure, i.e.,
A(µ) = sum_{i=1}^{nterms} θ_{i}(µ) * A_{i}.
The matrix A(µ) for a given µ is constructed by calling the object.
Attributes
----------
coefficient_functions : list of `nterms` callables
Scalar-valued coefficient functions in each term of the affine
expansion (θ_{i}'s).
matrices : list of `nterms` ndarrays, all of the same shape
Operator matrices in each term of the affine expansion (A_{i}'s).
"""
def __init__(self, coefficient_functions, matrices):
"""Save the coefficient functions and operator matrices.
Parameters
----------
coefficient_functions : list of `nterms` callables
Scalar-valued coefficient functions in each term of the affine
expansion (θ_{i}'s).
matrices : list of `nterms` ndarrays, all of the same shape
Operator matrices in each term of the affine expansion (A_{i}'s).
"""
_BaseParametricOperator.__init__(self)
# Ensure that the coefficient functions are callable.
if any(not callable(theta) for theta in coefficient_functions):
raise TypeError("coefficient functions of affine operator "
"must be callable")
self.__coefficient_functions = coefficient_functions
# Check that the right number of terms are included.
# if (n_coeffs := len(coeffs) != (n_matrices := len(matrices)):
n_coeffs, n_matrices = len(coefficient_functions), len(matrices)
if n_coeffs != n_matrices:
raise ValueError(f"{n_coeffs} = len(coefficient_functions) "
f"!= len(matrices) = {n_matrices}")
# Check that each matrix in the list has the same shape.
self._check_shape_consistency(matrices, "operator matrix")
self.__matrices = matrices
@property
def coefficient_functions(self):
"""Coefficient scalar-valued functions in the affine expansion."""
return self.__coefficient_functions
@property
def matrices(self):
"""Component matrices in each term of the affine expansion."""
return self.__matrices
@property
def shape(self):
"""Shape: the shape of the operator matrices."""
return self.matrices[0].shape
@staticmethod
def _validate_coefficient_functions(coefficient_functions, parameter):
"""Check that each coefficient function 1) is a callable function,
2) takes in the right sized inputs, and 3) returns scalar values.
Parameters
----------
coefficient_functions : list of `nterms` callables
Scalar-valued coefficient functions in each term of the affine
expansion (θ_{i}'s).
parameter : (p,) ndarray or float (p = 1).
Parameter input to use as a test for the coefficient functions (µ).
"""
for theta in coefficient_functions:
if not callable(theta):
raise TypeError("coefficient functions of affine operator "
"must be callable")
elif not np.isscalar(theta(parameter)):
raise ValueError("coefficient functions of affine operator "
"must return a scalar")
def __call__(self, parameter):
"""Evaluate the affine operator at the given parameter."""
entries = np.sum([thetai(parameter)*Ai for thetai, Ai in zip(
self.coefficient_functions, self.matrices)],
axis=0)
return self.OperatorClass(entries)
def __len__(self):
"""Length: number of terms in the affine expansion."""
return len(self.matrices)
def __eq__(self, other):
"""Test whether the operator matrices of two AffineOperator objects
are numerically equal. Coefficient functions are *NOT* compared.
"""
if not isinstance(other, self.__class__):
return False
if len(self) != len(other):
return False
if self.shape != other.shape:
return False
return all(np.all(left == right)
for left, right in zip(self.matrices, other.matrices))
class AffineConstantOperator(_AffineOperator):
"""Constant operator with affine parametric structure, i.e.,
c(µ) = sum_{i=1}^{nterms} θ_{i}(µ) * c_{i}.
The vector c(µ) for a given µ is constructed by calling the object.
Attributes
----------
coefficient_functions : list of `nterms` callables
Scalar-valued coefficient functions in each term of the affine
expansion (θ_{i}'s).
matrices : list of `nterms` ndarrays, all of the same shape
Operator matrices in each term of the affine expansion (c_{i}'s).
"""
_OperatorClass = ConstantOperator
class AffineLinearOperator(_AffineOperator):
"""Linear operator with affine parametric structure, i.e.,
A(µ) = sum_{i=1}^{nterms} θ_{i}(µ) * A_{i}.
The matrix A(µ) for a given µ is constructed by calling the object.
Attributes
----------
coefficient_functions : list of `nterms` callables
Scalar-valued coefficient functions in each term of the affine
expansion (θ_{i}'s).
matrices : list of `nterms` ndarrays, all of the same shape
Operator matrices in each term of the affine expansion (A_{i}'s).
"""
_OperatorClass = LinearOperator
class AffineQuadraticOperator(_AffineOperator):
"""Quadratic operator with affine parametric structure, i.e.,
H(µ) = sum_{i=1}^{nterms} θ_{i}(µ) * H_{i}.
The matrix H(µ) for a given µ is constructed by calling the object.
Attributes
----------
coefficient_functions : list of `nterms` callables
Scalar-valued coefficient functions in each term of the affine
expansion (θ_{i}'s).
matrices : list of `nterms` ndarrays, all of the same shape
Operator matrices in each term of the affine expansion (H_{i}'s).
"""
_OperatorClass = QuadraticOperator
class AffineCubicOperator(_AffineOperator):
"""Cubic operator with affine parametric structure, i.e.,
G(µ) = sum_{i=1}^{nterms} θ_{i}(µ) * G_{i}.
The matrix G(µ) for a given µ is constructed by calling the object.
Attributes
----------
coefficient_functions : list of `nterms` callables
Scalar-valued coefficient functions in each term of the affine
expansion (θ_{i}'s).
matrices : list of `nterms` ndarrays, all of the same shape
Operator matrices in each term of the affine expansion (G_{i}'s).
"""
_OperatorClass = CubicOperator
| [
"numpy.all"
] | [((4884, 4905), 'numpy.all', 'np.all', (['(left == right)'], {}), '(left == right)\n', (4890, 4905), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import tensorflow as tf
import numpy as np
import os
import time
import random
import sys
model_name = sys.argv[2]
textfile = sys.argv[1]
if model_name == None:
model_name = "shakespeare"
if textfile == None:
sys.exit('No data text file specified in command line args')
text = open(textfile, 'rb').read().decode(encoding='utf-8')
vocab = sorted(set(text))
char_to_index = {u:i for i, u in enumerate(vocab)}
index_to_char = np.array(vocab)
text_as_int = np.array([char_to_index[c] for c in text])
seq_length = 100
n_epochs = len(text) // (seq_length+1)
char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)
sequences = char_dataset.batch(seq_length+1, drop_remainder=True)
def split_input_target(chunk):
input_text = chunk[:-1]
target_text = chunk[1:]
return input_text, target_text
dataset = sequences.map(split_input_target)
# Batch size
BATCH_SIZE = 64
# Buffer size to shuffle the dataset
# (TF data is designed to work with possibly infinite sequences,
# so it doesn't attempt to shuffle the entire sequence in memory. Instead,
# it maintains a buffer in which it shuffles elements).
BUFFER_SIZE = 10000
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
# print(dataset)
vocab_size = len(vocab)
embedding_dim = 256
rnn_units = 1024
def build_model(vocab_size, embedding_dim, rnn_units, batch_size):
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, batch_input_shape=[batch_size, None]),
tf.keras.layers.GRU(rnn_units, return_sequences=True, stateful=True, recurrent_initializer='glorot_uniform'),
tf.keras.layers.Dense(vocab_size)
])
return model
model = build_model(vocab_size=len(vocab), embedding_dim=embedding_dim, rnn_units=rnn_units, batch_size=BATCH_SIZE)
for input_example_batch, target_example_batch in dataset.take(1):
example_batch_predictions = model(input_example_batch)
print(example_batch_predictions.shape, "# (batch_size, sequence_length, vocab_size)")
model.summary()
sampled_indices = tf.random.categorical(example_batch_predictions[0], num_samples=1)
sampled_indices = tf.squeeze(sampled_indices,axis=-1).numpy()
def loss(labels, logits):
return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)
example_batch_loss = loss(target_example_batch, example_batch_predictions)
print("Prediction shape: ", example_batch_predictions.shape)
print("Scalar loss: ", example_batch_loss.numpy().mean())
model.compile(optimizer='adam', loss=loss)
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt_{epoch}")
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_prefix, save_weights_only=True
)
EPOCHS = 10
history = model.fit(dataset, epochs=EPOCHS, callbacks=[checkpoint_callback])
tf.train.latest_checkpoint(checkpoint_dir)
model = build_model(vocab_size, embedding_dim, rnn_units, batch_size=1)
model.load_weights(tf.train.latest_checkpoint(checkpoint_dir))
model.build(tf.TensorShape([1, None]))
model.summary()
model.save(f'models/{model_name}') # save the model locally
print("Succesfully saved model")
| [
"sys.exit",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.random.categorical",
"os.path.join",
"tensorflow.keras.layers.Embedding",
"numpy.array",
"tensorflow.keras.losses.sparse_categorical_crossentropy",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.callbacks.ModelCheckpoint",
"... | [((458, 473), 'numpy.array', 'np.array', (['vocab'], {}), '(vocab)\n', (466, 473), True, 'import numpy as np\n'), ((489, 531), 'numpy.array', 'np.array', (['[char_to_index[c] for c in text]'], {}), '([char_to_index[c] for c in text])\n', (497, 531), True, 'import numpy as np\n'), ((603, 650), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['text_as_int'], {}), '(text_as_int)\n', (637, 650), True, 'import tensorflow as tf\n'), ((2074, 2140), 'tensorflow.random.categorical', 'tf.random.categorical', (['example_batch_predictions[0]'], {'num_samples': '(1)'}), '(example_batch_predictions[0], num_samples=1)\n', (2095, 2140), True, 'import tensorflow as tf\n'), ((2629, 2673), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""ckpt_{epoch}"""'], {}), "(checkpoint_dir, 'ckpt_{epoch}')\n", (2641, 2673), False, 'import os\n'), ((2696, 2786), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', ([], {'filepath': 'checkpoint_prefix', 'save_weights_only': '(True)'}), '(filepath=checkpoint_prefix,\n save_weights_only=True)\n', (2730, 2786), True, 'import tensorflow as tf\n'), ((2890, 2932), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (2916, 2932), True, 'import tensorflow as tf\n'), ((242, 302), 'sys.exit', 'sys.exit', (['"""No data text file specified in command line args"""'], {}), "('No data text file specified in command line args')\n", (250, 302), False, 'import sys\n'), ((2241, 2327), 'tensorflow.keras.losses.sparse_categorical_crossentropy', 'tf.keras.losses.sparse_categorical_crossentropy', (['labels', 'logits'], {'from_logits': '(True)'}), '(labels, logits, from_logits\n =True)\n', (2288, 2327), True, 'import tensorflow as tf\n'), ((3024, 3066), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (3050, 3066), True, 'import tensorflow as tf\n'), ((3080, 3105), 'tensorflow.TensorShape', 'tf.TensorShape', (['[1, None]'], {}), '([1, None])\n', (3094, 3105), True, 'import tensorflow as tf\n'), ((2159, 2195), 'tensorflow.squeeze', 'tf.squeeze', (['sampled_indices'], {'axis': '(-1)'}), '(sampled_indices, axis=-1)\n', (2169, 2195), True, 'import tensorflow as tf\n'), ((1427, 1522), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['vocab_size', 'embedding_dim'], {'batch_input_shape': '[batch_size, None]'}), '(vocab_size, embedding_dim, batch_input_shape=[\n batch_size, None])\n', (1452, 1522), True, 'import tensorflow as tf\n'), ((1527, 1639), 'tensorflow.keras.layers.GRU', 'tf.keras.layers.GRU', (['rnn_units'], {'return_sequences': '(True)', 'stateful': '(True)', 'recurrent_initializer': '"""glorot_uniform"""'}), "(rnn_units, return_sequences=True, stateful=True,\n recurrent_initializer='glorot_uniform')\n", (1546, 1639), True, 'import tensorflow as tf\n'), ((1645, 1678), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['vocab_size'], {}), '(vocab_size)\n', (1666, 1678), True, 'import tensorflow as tf\n')] |
import numpy as np
from typing import Tuple
def similarity_transform(
X: np.ndarray, Y: np.ndarray, dim: int = 3
) -> Tuple[np.ndarray, float, np.ndarray]:
"""Calculate the similarity transform between two (matching) point sets.
Parameters
----------
X: np.ndarray
Points of first trajectory (dim x n matrix, where dim = 3 for 3D)
Y: np.ndarray
Points of second trajectory (dim x n matrix, where dim = 3 for 3D)
dim: int
Dimensionality of points
Returns
-------
R: np.ndarray
Rotation matrix from X to Y
c: np.ndarray
Scale factor from Y to Y
t: np.ndarray
Translation from X to Y
Reference
---------
<NAME>, "Least-squares estimation of transformation parameters
between two point patterns," in IEEE Transactions on Pattern Analysis
and Machine Intelligence, vol. 13, no. 4, pp. 376-380, April 1991,
doi: 10.1109/34.88573.
"""
if X.shape[0] != dim:
raise ValueError(
f"You've set {dim=}, so X should have shape ({dim}xn) "
+ f"but is {X.shape}!"
)
if Y.shape[0] != dim:
raise ValueError(
f"You've set {dim=}, so Y should have shape ({dim}xn) but "
+ f"is {Y.shape}!"
)
if X.shape != Y.shape:
raise ValueError(
f"X and Y must have same shape! But {X.shape} != {Y.shape}"
)
m, n = X.shape
mu_x = np.mean(X, axis=1)
mu_y = np.mean(Y, axis=1)
X_centered = (X.T - mu_x).T
Y_centered = (Y.T - mu_y).T
s_xx = np.mean(np.sum(X_centered**2, 0))
Sigma_xy = 1 / n * X_centered @ Y_centered.T
U, D, V = np.linalg.svd(Sigma_xy)
V = V.T # numpy has it the other way around as Umeyama
D = np.diag(D)
S = np.eye(m)
if np.linalg.matrix_rank(Sigma_xy) > (m - 1):
if np.linalg.det(Sigma_xy) < 0:
S[m - 1, m - 1] = -1
elif np.linalg.matrix_rank(Sigma_xy) == m - 1:
if np.linalg.det(U) * np.linalg.det(V) < 0:
S[m - 1, m - 1] = -1
else:
print("Rank too small! Cannot estimate transformation.")
R = np.eye(m)
c = 1
t = np.zeros(m)
return R, c, t
R = (U @ S @ V.T).T
c = np.trace(D @ S) / s_xx
t = mu_y - c * R @ mu_x
return R, c, t
def construct_transformation_matrix(
R: np.ndarray, c: np.ndarray, t: np.ndarray
) -> np.ndarray:
"""Get transformation matrix from rotation R, scale c and translation."""
n = R.shape[0]
T = np.identity(n + 1)
T[:n, :n] = c * R
T[:n, n] = t
return T
def apply_transformation(points: np.ndarray, T: np.ndarray) -> np.ndarray:
"""Transform points with transformation matrix T.
Parameters
----------
points: np.ndarray
3 x n matrix containing the points to transform
T: np.ndarray
4 x 4 transformation matrix in homogeneous coordinates
Returns
-------
np.ndarray
3 x n matrix containing the transformed points
"""
m = points.shape[0]
if m != 3:
raise ValueError(f"points should be (3xn) but is {points.shape}!")
src = np.ones((m + 1, points.shape[1]))
src[:m, :] = np.copy(points)
src = np.dot(T, src)
return src[:m, :] / src[m:, :]
| [
"numpy.identity",
"numpy.mean",
"numpy.eye",
"numpy.copy",
"numpy.linalg.matrix_rank",
"numpy.ones",
"numpy.trace",
"numpy.diag",
"numpy.linalg.det",
"numpy.sum",
"numpy.dot",
"numpy.zeros",
"numpy.linalg.svd"
] | [((1453, 1471), 'numpy.mean', 'np.mean', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (1460, 1471), True, 'import numpy as np\n'), ((1483, 1501), 'numpy.mean', 'np.mean', (['Y'], {'axis': '(1)'}), '(Y, axis=1)\n', (1490, 1501), True, 'import numpy as np\n'), ((1676, 1699), 'numpy.linalg.svd', 'np.linalg.svd', (['Sigma_xy'], {}), '(Sigma_xy)\n', (1689, 1699), True, 'import numpy as np\n'), ((1768, 1778), 'numpy.diag', 'np.diag', (['D'], {}), '(D)\n', (1775, 1778), True, 'import numpy as np\n'), ((1788, 1797), 'numpy.eye', 'np.eye', (['m'], {}), '(m)\n', (1794, 1797), True, 'import numpy as np\n'), ((2528, 2546), 'numpy.identity', 'np.identity', (['(n + 1)'], {}), '(n + 1)\n', (2539, 2546), True, 'import numpy as np\n'), ((3148, 3181), 'numpy.ones', 'np.ones', (['(m + 1, points.shape[1])'], {}), '((m + 1, points.shape[1]))\n', (3155, 3181), True, 'import numpy as np\n'), ((3199, 3214), 'numpy.copy', 'np.copy', (['points'], {}), '(points)\n', (3206, 3214), True, 'import numpy as np\n'), ((3225, 3239), 'numpy.dot', 'np.dot', (['T', 'src'], {}), '(T, src)\n', (3231, 3239), True, 'import numpy as np\n'), ((1587, 1613), 'numpy.sum', 'np.sum', (['(X_centered ** 2)', '(0)'], {}), '(X_centered ** 2, 0)\n', (1593, 1613), True, 'import numpy as np\n'), ((1805, 1836), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['Sigma_xy'], {}), '(Sigma_xy)\n', (1826, 1836), True, 'import numpy as np\n'), ((2248, 2263), 'numpy.trace', 'np.trace', (['(D @ S)'], {}), '(D @ S)\n', (2256, 2263), True, 'import numpy as np\n'), ((1859, 1882), 'numpy.linalg.det', 'np.linalg.det', (['Sigma_xy'], {}), '(Sigma_xy)\n', (1872, 1882), True, 'import numpy as np\n'), ((1930, 1961), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['Sigma_xy'], {}), '(Sigma_xy)\n', (1951, 1961), True, 'import numpy as np\n'), ((2144, 2153), 'numpy.eye', 'np.eye', (['m'], {}), '(m)\n', (2150, 2153), True, 'import numpy as np\n'), ((2180, 2191), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (2188, 2191), True, 'import numpy as np\n'), ((1983, 1999), 'numpy.linalg.det', 'np.linalg.det', (['U'], {}), '(U)\n', (1996, 1999), True, 'import numpy as np\n'), ((2002, 2018), 'numpy.linalg.det', 'np.linalg.det', (['V'], {}), '(V)\n', (2015, 2018), True, 'import numpy as np\n')] |
import os
import numpy as np
from .filewrappers import *
from .from_binary import bioptigen_binary_reader as bioptigen
from .from_binary import heidelberg_binary_reader as heidelberg
from .from_binary import bioptigen_scan_type_map
from ..utilities import get_lut
from ..exceptions import FileLoadError
import matplotlib as mpl
mpl.use("Qt5Agg")
import matplotlib.pyplot as plt
##### Frames #####
class BaseOCTFrame:
def __init__(self,frame):
self.frame = frame
def from_bytes(self,f):
f.seek(self.abs_pos,0)
im = np.fromfile(f, dtype=np.uint16, count=self.count)
return im
def get_geom(self):
pass
def load(self,f):
pass
def __lt__(self,other):
pass
def __eq__(self,other):
return self.im == other
class E2EOCTFrame(BaseOCTFrame):
def __init__(self,frame,lut=None,**kwargs):
super().__init__(frame)
if lut is not None:
self.lut = lut
else:
self.lut = get_lut()
# self.gamma = 1.0/2.4
self.gamma = 0.15
self.get_geom()
def get_geom(self):
Ascans = self.frame.width
depth = self.frame.height
self.ind = self.frame.ind
self.load_geom = (Ascans, depth)
self.geom = (depth,Ascans)
self.count = Ascans*depth
self.abs_pos = self.frame.pos
def load(self,f):
Ascans, depth = self.load_geom
raveled = self.from_lut(f)
im = raveled.reshape(Ascans,depth)
im = self.normalize_float(im)
im = np.rot90(im,axes=(0,1))
return im
def from_lut(self,f):
def lut_to_uf16(f):
im = np.fromfile(f, dtype=np.uint16, count=self.count)
im = self.lut[im]
return im
f.seek(self.abs_pos,0)
im = lut_to_uf16(f)
return im
def normalize_float(self,im,dtype='uint16'):
im = im*(1/np.max(im))
if dtype=='uint16':
im = 65535 * np.float_power(im, self.gamma)
else:
im = 255 * np.float_power(im, self.gamma)
return im
def __repr__(self):
return f'Pos: {self.abs_pos}\nPixels: {self.count}\nBytes:{self.frame.size}'
class BioptigenOCTFrame(BaseOCTFrame):
def __init__(self,frame):
super().__init__(frame)
self.get_geom()
def get_geom(self):
Ascans = self.frame.image.columns
depth = self.frame.image.rows
self.geom = (Ascans, depth)
self.count = self.frame.image.totalpixels
self.abs_pos = self.frame.image.offset
def load(self,f):
Ascans, depth = self.geom
raveled= self.from_bytes(f)
im = raveled.reshape(Ascans,depth)
# im = np.rot90(im,axes=(0,1))
return im
framemap = {bioptigen:BioptigenOCTFrame,
heidelberg: E2EOCTFrame}
class OCTFrame:
def convert(frame,type=bioptigen,**kwargs):
return framemap[type](frame,**kwargs)
#########################
##### Frame Managers #####
class BaseFrameManager:
def __init__(self,octFile:cachedOCT):
self.oct_data = None
self.frames = None
self.set_scale(1,1)
self.octFile = octFile
self.path = octFile.file.path.as_posix()
self.geom = (0,0)
def _construct_header(self):
pass
def _parse_file(self):
##put pointers to data into list of OCTFrame objects
##put expected values into .header attribute
self.parsedOCT = self.octFile.file.binary_format.parse_file(self.path)
# try:
# self.parsedOCT = self.octFile.file.binary_format.parse_file(self.path)
# except Exception as e:
# raise(FileLoadError(e,f'{self.octFile.file.extension} not a supported OCT Filetype'))
def _get_frames(self):
pass
def set_frameorder(self,indexArr):
self.frames = self._reorder_frames(indexArr)
self._to_current_order.append(indexArr)
self._to_original_order = self._to_original_order[indexArr]
def _reorder_frames(self,indexArr):
return self.frames[indexArr]
def get_original_frame_order(self):
return self._reorder_frames(self._to_original_order)
def set_original_frame_order(self):
self.set_frameorder(self._to_original_order)
self._to_original_order = np.asarray(range(self.count))
self._to_current_order = []
def set_scale(self,xres = None, yres = None):
if xres is not None:
self.xresolution = xres
if yres is not None:
self.yresolution = yres
class BioptigenFrameManager(BaseFrameManager):
def __init__(self,octFile:cachedOCT):
super().__init__(octFile)
self._parse_file()
self._construct_header()
self._get_frames()
self._calc_oct_conversions()
self.set_frameorder(self.indicies['to_vol'])
def _parse_file(self):
super()._parse_file()
self.oct_data = self.parsedOCT.data
self.oct_data = np.asarray(self.oct_data)
self.scantype = self.octFile.scantype = bioptigen_scan_type_map[self.parsedOCT.header.scantype.value]
def _struct_to_dict(self, S, pickleable=False):
out = {}
from construct import Container, ListContainer
pickleableDict = pickleable
for k,v in S.items():
keyIsPickleable = (k!= "_io")&(k!='pixels')&(k!='config')
if (pickleableDict&keyIsPickleable)|(not pickleableDict):
if isinstance(v,Container)|issubclass(Container,type(v)):
v = self._struct_to_dict(v,pickleable)
elif isinstance(v,ListContainer):
v = self._struct_to_dict({k:v for k,v in zip(range(len(v)),v)},pickleable)
out[k] = v
return out
def _construct_header(self):
header = self.octFile.header = self.parsedOCT.header
self.octFile.header_out = self._struct_to_dict(header,pickleable=True)
header_dict = {}
header_dict['system'] = 'Bioptigen'
header_dict['framecount'] = header.frames.value
header_dict['scancount'] = header.scans.value
header_dict['totalframes'] = header.framecount.value
header_dict['xmin'] = header.xmin.value
header_dict['xmax'] = header.xmax.value
header_dict['ymin'] = header.ymin.value
header_dict['ymax'] = header.ymax.value
header_dict['scandepth'] = header.scandepth.value
header_dict['scanlength'] = header.scanlength.value
header_dict['elscanlength'] = header.elscanlength.value
header_dict['scanangle'] = header.scanangle.value
self.header = header_dict
def _get_frames(self):
oct_type = self.octFile.file.binary_format
self.frames = np.asarray([OCTFrame.convert(frame,oct_type) for frame in self.oct_data])
self.count = len(self.frames)
self._to_original_order = np.asarray(range(self.count))
self._to_current_order = []
self.geom = self.frames[0].geom
def _calc_oct_conversions(self):
dim4, dim3 = self.header['framecount'],self.header['scancount']
stack_to_vol = np.asarray([i+j*(dim4) for i in range(0,dim4) for j in range(0,dim3)])
vol_to_stack = np.asarray([i+j*(dim3) for i in range(0,dim3) for j in range(0,dim4)])
self.indicies = {'to_vol':stack_to_vol,
'to_stack':vol_to_stack}
class E2EFrameManager(BaseFrameManager):
def __init__(self,octFile):
super().__init__(octFile)
self.LUT = get_lut()
self._parse_file()
self._get_frames()
self._construct_header()
self._calc_oct_conversions()
self.set_frameorder(self.indicies['to_vol'])
def _parse_file(self):
super()._parse_file()
self.oct_data = self.parsedOCT.frames
self.oct_data = np.asarray(self.oct_data)
self.scantype = self.octFile.scantype = 'rect'
def _struct_to_dict(self, S, pickleable=False):
out = {}
from construct import Container, ListContainer
pickleableDict = pickleable
for k,v in S.items():
keyIsPickleable = (k!= "_io")&(k!='pixels')&(k!='config')
if (pickleableDict&keyIsPickleable)|(not pickleableDict):
if isinstance(v,Container)|issubclass(Container,type(v)):
v = self._struct_to_dict(v,pickleable)
elif isinstance(v,ListContainer):
v = self._struct_to_dict({k:v for k,v in zip(range(len(v)),v)},pickleable)
elif isinstance(v,list):
v = [self._struct_to_dict(x,pickleable) for x in v]
out[k] = v
return out
def _construct_header(self):
header = self.octFile.header = self.parsedOCT.header
self.octFile.header_out = self._struct_to_dict(header,pickleable=True)
header_dict = {}
header_dict['system'] = 'Heidelberg'
header_dict['framecount'] = header.frame_details.framecount
header_dict['scancount'] = header.frame_details.scancount
header_dict['totalframes'] = header.frame_details.totalframes
##Inferred from .xml files
header_dict['scanlength'] = 2.9
yres = header_dict['scanlength']/self.geom[1]
self.set_scale(xres=3.9e-3,yres=yres)
header_dict['scandepth'] = self.xresolution*self.geom[0]
header_dict['elscanlength'] = 2.9
header_dict['ymin'] = 0
header_dict['ymax'] = header_dict['scanlength']
header_dict['xmin'] = 0
header_dict['xmax'] = header_dict['scandepth']
header_dict['framecount'] = 3
header_dict['scancount'] = int(header_dict['scancount']/header_dict['framecount'])
self.header = header_dict
def _get_frames(self):
self.frames = np.asarray([OCTFrame.convert(frame,self.octFile.oct_type,lut=self.LUT) for frame in self.oct_data])
self.count = len(self.frames)
self._to_original_order = np.asarray(range(self.count))
self._to_current_order = []
self.geom = self.frames[0].geom
def _calc_oct_conversions(self):
dim4, dim3 = self.header['framecount'],self.header['scancount']
stack_to_vol = np.asarray([i+j*(dim4) for i in range(0,dim4) for j in range(0,dim3)])
vol_to_stack = np.asarray([i+j*(dim3) for i in range(0,dim3) for j in range(0,dim4)])
self.indicies = {'to_vol':stack_to_vol,
'to_stack':vol_to_stack}
class FrameManager:
frame_manager_map = {bioptigen:BioptigenFrameManager,
heidelberg: E2EFrameManager}
def start(octFile:cachedOCT):
return FrameManager.frame_manager_map[octFile.file.binary_format](octFile)
#########################
if __name__=='__main__':
import time
##Heidelberg
filename = f'E2E004-Dillon.E2E'
filepath = f"{os.path.dirname(os.path.abspath(__file__))}{os.sep}ExampleFiles{os.sep}{os.sep}{filename}"
print('beginning parsing')
file = cachedOCT(filepath)
##BIoptigen
# filename = f'5663_OD_V_1x1_0_0000068.OCT'
# filepath = f"{os.path.dirname(os.path.abspath(__file__))}{os.sep}ExampleFiles{os.sep}{os.sep}{filename}"
| [
"numpy.fromfile",
"matplotlib.use",
"numpy.asarray",
"numpy.max",
"numpy.rot90",
"os.path.abspath",
"numpy.float_power"
] | [((328, 345), 'matplotlib.use', 'mpl.use', (['"""Qt5Agg"""'], {}), "('Qt5Agg')\n", (335, 345), True, 'import matplotlib as mpl\n'), ((549, 598), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.uint16', 'count': 'self.count'}), '(f, dtype=np.uint16, count=self.count)\n', (560, 598), True, 'import numpy as np\n'), ((1552, 1577), 'numpy.rot90', 'np.rot90', (['im'], {'axes': '(0, 1)'}), '(im, axes=(0, 1))\n', (1560, 1577), True, 'import numpy as np\n'), ((5022, 5047), 'numpy.asarray', 'np.asarray', (['self.oct_data'], {}), '(self.oct_data)\n', (5032, 5047), True, 'import numpy as np\n'), ((7895, 7920), 'numpy.asarray', 'np.asarray', (['self.oct_data'], {}), '(self.oct_data)\n', (7905, 7920), True, 'import numpy as np\n'), ((1679, 1728), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.uint16', 'count': 'self.count'}), '(f, dtype=np.uint16, count=self.count)\n', (1690, 1728), True, 'import numpy as np\n'), ((1936, 1946), 'numpy.max', 'np.max', (['im'], {}), '(im)\n', (1942, 1946), True, 'import numpy as np\n'), ((2001, 2031), 'numpy.float_power', 'np.float_power', (['im', 'self.gamma'], {}), '(im, self.gamma)\n', (2015, 2031), True, 'import numpy as np\n'), ((2069, 2099), 'numpy.float_power', 'np.float_power', (['im', 'self.gamma'], {}), '(im, self.gamma)\n', (2083, 2099), True, 'import numpy as np\n'), ((10935, 10960), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (10950, 10960), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Functions for generating spatial permutations
"""
import warnings
import numpy as np
from scipy import optimize, spatial
def _gen_rotation(seed=None):
"""
Generates random matrix for rotating spherical coordinates
Parameters
----------
seed : {int, np.random.RandomState instance, None}, optional
Seed for random number generation
Returns
-------
rotate_{l,r} : (3, 3) numpy.ndarray
Rotations for left and right hemisphere coordinates, respectively
"""
rs = np.random.default_rng(seed)
# for reflecting across Y-Z plane
reflect = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 1]])
# generate rotation for left
rotate_l, temp = np.linalg.qr(rs.normal(size=(3, 3)))
rotate_l = rotate_l @ np.diag(np.sign(np.diag(temp)))
if np.linalg.det(rotate_l) < 0:
rotate_l[:, 0] = -rotate_l[:, 0]
# reflect the left rotation across Y-Z plane
rotate_r = reflect @ rotate_l @ reflect
return rotate_l, rotate_r
def gen_spinsamples(coords, hemiid, n_rotate=1000, check_duplicates=True,
method='original', exact=False, seed=None, verbose=False,
return_cost=False):
"""
Returns a resampling array for `coords` obtained from rotations / spins
Using the method initially proposed in [ST1]_ (and later modified + updated
based on findings in [ST2]_ and [ST3]_), this function applies random
rotations to the user-supplied `coords` in order to generate a resampling
array that preserves its spatial embedding. Rotations are generated for one
hemisphere and mirrored for the other (see `hemiid` for more information).
Due to irregular sampling of `coords` and the randomness of the rotations
it is possible that some "rotations" may resample with replacement (i.e.,
will not be a true permutation). The likelihood of this can be reduced by
either increasing the sampling density of `coords` or changing the
``method`` parameter (see Notes for more information on the latter).
Parameters
----------
coords : (N, 3) array_like
X, Y, Z coordinates of `N` nodes/parcels/regions/vertices defined on a
sphere
hemiid : (N,) array_like
Array denoting hemisphere designation of coordinates in `coords`, where
values should be {0, 1} denoting the different hemispheres. Rotations
are generated for one hemisphere and mirrored across the y-axis for the
other hemisphere.
n_rotate : int, optional
Number of rotations to generate. Default: 1000
check_duplicates : bool, optional
Whether to check for and attempt to avoid duplicate resamplings. A
warnings will be raised if duplicates cannot be avoided. Setting to
True may increase the runtime of this function! Default: True
method : {'original', 'vasa', 'hungarian'}, optional
Method by which to match non- and rotated coordinates. Specifying
'original' will use the method described in [ST1]_. Specfying 'vasa'
will use the method described in [ST4]_. Specfying 'hungarian' will use
the Hungarian algorithm to minimize the global cost of reassignment
(will dramatically increase runtime). Default: 'original'
seed : {int, np.random.Generator instance, None}, optional
Seed for random number generation. Default: None
verbose : bool, optional
Whether to print occasional status messages. Default: False
return_cost : bool, optional
Whether to return cost array (specified as Euclidean distance) for each
coordinate for each rotation Default: True
Returns
-------
spinsamples : (N, `n_rotate`) numpy.ndarray
Resampling matrix to use in permuting data based on supplied `coords`.
cost : (N, `n_rotate`,) numpy.ndarray
Cost (specified as Euclidean distance) of re-assigning each coordinate
for every rotation in `spinsamples`. Only provided if `return_cost` is
True.
Notes
-----
By default, this function uses the minimum Euclidean distance between the
original coordinates and the new, rotated coordinates to generate a
resampling array after each spin. Unfortunately, this can (with some
frequency) lead to multiple coordinates being re-assigned the same value:
>>> from netneurotools import stats as nnstats
>>> coords = [[0, 0, 1], [1, 0, 0], [0, 0, 1], [1, 0, 0]]
>>> hemi = [0, 0, 1, 1]
>>> nnstats.gen_spinsamples(coords, hemi, n_rotate=1, seed=1,
... method='original', check_duplicates=False)
array([[0],
[0],
[2],
[3]], dtype=int32)
While this is reasonable in most circumstances, if you feel incredibly
strongly about having a perfect "permutation" (i.e., all indices appear
once and exactly once in the resampling), you can set the ``method``
parameter to either 'vasa' or 'hungarian':
>>> nnstats.gen_spinsamples(coords, hemi, n_rotate=1, seed=1,
... method='vasa', check_duplicates=False)
array([[1],
[0],
[2],
[3]], dtype=int32)
>>> nnstats.gen_spinsamples(coords, hemi, n_rotate=1, seed=1,
... method='hungarian', check_duplicates=False)
array([[0],
[1],
[2],
[3]], dtype=int32)
Note that setting this parameter may increase the runtime of the function
(especially for `method='hungarian'`). Refer to [ST1]_ for information on
why the default (i.e., ``exact`` set to False) suffices in most cases.
For the original MATLAB implementation of this function refer to [ST5]_.
References
----------
.. [ST1] <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>., <NAME>., & <NAME>. (2018).
On testing for spatial correspondence between maps of human brain
structure and function. NeuroImage, 178, 540-51.
.. [ST2] <NAME>., & <NAME>. (2016). Random Rotation Ensembles.
Journal of Machine Learning Research, 17(4), 1–26.
.. [ST3] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., & <NAME>. (2018). SPANOL (SPectral ANalysis of Lobes):
A Spectral Clustering Framework for Individual and Group Parcellation of
Cortical Surfaces in Lobes. Frontiers in Neuroscience, 12, 354.
.. [ST4] <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>., ... & <NAME>. (2018). Adolescent
tuning of association cortex in human structural brain networks.
Cerebral Cortex, 28(1), 281-294.
.. [ST5] https://github.com/spin-test/spin-test
"""
methods = ['original', 'vasa', 'hungarian']
if method not in methods:
raise ValueError('Provided method "{}" invalid. Must be one of {}.'
.format(method, methods))
if exact:
warnings.warn('The `exact` parameter will no longer be supported in '
'an upcoming release. Please use the `method` parameter '
'instead.', DeprecationWarning, stacklevel=3)
if exact == 'vasa' and method == 'original':
method = 'vasa'
elif exact and method == 'original':
method = 'hungarian'
seed = np.random.default_rng(seed)
coords = np.asanyarray(coords)
hemiid = np.squeeze(np.asanyarray(hemiid, dtype='int8'))
# check supplied coordinate shape
if coords.shape[-1] != 3 or coords.squeeze().ndim != 2:
raise ValueError('Provided `coords` must be of shape (N, 3), not {}'
.format(coords.shape))
# ensure hemisphere designation array is correct
if hemiid.ndim != 1:
raise ValueError('Provided `hemiid` array must be one-dimensional.')
if len(coords) != len(hemiid):
raise ValueError('Provided `coords` and `hemiid` must have the same '
'length. Provided lengths: coords = {}, hemiid = {}'
.format(len(coords), len(hemiid)))
if np.max(hemiid) != 1 or np.min(hemiid) != 0:
raise ValueError('Hemiid must have values in {0, 1} denoting left and '
'right hemisphere coordinates, respectively. '
+ 'Provided array contains values: {}'
.format(np.unique(hemiid)))
# empty array to store resampling indices
# int32 should be enough; if you're ever providing `coords` with more than
# 2147483647 rows please reconsider your life choices
spinsamples = np.zeros((len(coords), n_rotate), dtype='int32')
cost = np.zeros((len(coords), n_rotate))
inds = np.arange(len(coords), dtype='int32')
# generate rotations and resampling array!
msg, warned = '', False
for n in range(n_rotate):
count, duplicated = 0, True
if verbose:
msg = 'Generating spin {:>5} of {:>5}'.format(n, n_rotate)
print(msg, end='\r', flush=True)
while duplicated and count < 500:
count, duplicated = count + 1, False
resampled = np.zeros(len(coords), dtype='int32')
# rotate each hemisphere separately
for h, rot in enumerate(_gen_rotation(seed=seed)):
hinds = (hemiid == h)
coor = coords[hinds]
# if we need an "exact" mapping (i.e., each node needs to be
# assigned EXACTLY once) then we have to calculate the full
# distance matrix which is a nightmare with respect to memory
# for anything that isn't parcellated data.
# that is, don't do this with vertex coordinates!
if method == 'vasa':
dist = spatial.distance_matrix(coor, coor @ rot)
# min of max a la Vasa et al., 2018
col = np.zeros(len(coor), dtype='int32')
for r in range(len(dist)):
# find parcel whose closest neighbor is farthest away
# overall; assign to that
row = dist.min(axis=1).argmax()
col[row] = dist[row].argmin()
cost[inds[hinds][row], n] = dist[row, col[row]]
# set to -inf and inf so they can't be assigned again
dist[row] = -np.inf
dist[:, col[row]] = np.inf
# optimization of total cost using Hungarian algorithm. this
# may result in certain parcels having higher cost than with
# `method='vasa'` but should always result in the total cost
# being lower #tradeoffs
elif method == 'hungarian':
dist = spatial.distance_matrix(coor, coor @ rot)
row, col = optimize.linear_sum_assignment(dist)
cost[hinds, n] = dist[row, col]
# if nodes can be assigned multiple targets, we can simply use
# the absolute minimum of the distances (no optimization
# required) which is _much_ lighter on memory
# huge thanks to https://stackoverflow.com/a/47779290 for this
# memory-efficient method
elif method == 'original':
dist, col = spatial.cKDTree(coor @ rot).query(coor, 1)
cost[hinds, n] = dist
resampled[hinds] = inds[hinds][col]
# if we want to check for duplicates ensure that we don't have any
if check_duplicates:
if np.any(np.all(resampled[:, None] == spinsamples[:, :n], 0)):
duplicated = True
# if our "spin" is identical to the input then that's no good
elif np.all(resampled == inds):
duplicated = True
# if we broke out because we tried 500 rotations and couldn't generate
# a new one, warn that we're using duplicate rotations and give up.
# this should only be triggered if check_duplicates is set to True
if count == 500 and not warned:
warnings.warn('Duplicate rotations used. Check resampling array '
'to determine real number of unique permutations.')
warned = True
spinsamples[:, n] = resampled
if verbose:
print(' ' * len(msg) + '\b' * len(msg), end='', flush=True)
if return_cost:
return spinsamples, cost
return spinsamples
| [
"numpy.random.default_rng",
"numpy.unique",
"scipy.optimize.linear_sum_assignment",
"scipy.spatial.cKDTree",
"scipy.spatial.distance_matrix",
"numpy.linalg.det",
"numpy.asanyarray",
"numpy.array",
"numpy.max",
"numpy.diag",
"numpy.min",
"warnings.warn",
"numpy.all"
] | [((549, 576), 'numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (570, 576), True, 'import numpy as np\n'), ((630, 674), 'numpy.array', 'np.array', (['[[-1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[-1, 0, 0], [0, 1, 0], [0, 0, 1]])\n', (638, 674), True, 'import numpy as np\n'), ((7397, 7424), 'numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (7418, 7424), True, 'import numpy as np\n'), ((7439, 7460), 'numpy.asanyarray', 'np.asanyarray', (['coords'], {}), '(coords)\n', (7452, 7460), True, 'import numpy as np\n'), ((832, 855), 'numpy.linalg.det', 'np.linalg.det', (['rotate_l'], {}), '(rotate_l)\n', (845, 855), True, 'import numpy as np\n'), ((7008, 7185), 'warnings.warn', 'warnings.warn', (['"""The `exact` parameter will no longer be supported in an upcoming release. Please use the `method` parameter instead."""', 'DeprecationWarning'], {'stacklevel': '(3)'}), "(\n 'The `exact` parameter will no longer be supported in an upcoming release. Please use the `method` parameter instead.'\n , DeprecationWarning, stacklevel=3)\n", (7021, 7185), False, 'import warnings\n'), ((7485, 7520), 'numpy.asanyarray', 'np.asanyarray', (['hemiid'], {'dtype': '"""int8"""'}), "(hemiid, dtype='int8')\n", (7498, 7520), True, 'import numpy as np\n'), ((8160, 8174), 'numpy.max', 'np.max', (['hemiid'], {}), '(hemiid)\n', (8166, 8174), True, 'import numpy as np\n'), ((8183, 8197), 'numpy.min', 'np.min', (['hemiid'], {}), '(hemiid)\n', (8189, 8197), True, 'import numpy as np\n'), ((12279, 12403), 'warnings.warn', 'warnings.warn', (['"""Duplicate rotations used. Check resampling array to determine real number of unique permutations."""'], {}), "(\n 'Duplicate rotations used. Check resampling array to determine real number of unique permutations.'\n )\n", (12292, 12403), False, 'import warnings\n'), ((809, 822), 'numpy.diag', 'np.diag', (['temp'], {}), '(temp)\n', (816, 822), True, 'import numpy as np\n'), ((8453, 8470), 'numpy.unique', 'np.unique', (['hemiid'], {}), '(hemiid)\n', (8462, 8470), True, 'import numpy as np\n'), ((9859, 9900), 'scipy.spatial.distance_matrix', 'spatial.distance_matrix', (['coor', '(coor @ rot)'], {}), '(coor, coor @ rot)\n', (9882, 9900), False, 'from scipy import optimize, spatial\n'), ((11740, 11791), 'numpy.all', 'np.all', (['(resampled[:, None] == spinsamples[:, :n])', '(0)'], {}), '(resampled[:, None] == spinsamples[:, :n], 0)\n', (11746, 11791), True, 'import numpy as np\n'), ((11931, 11956), 'numpy.all', 'np.all', (['(resampled == inds)'], {}), '(resampled == inds)\n', (11937, 11956), True, 'import numpy as np\n'), ((10891, 10932), 'scipy.spatial.distance_matrix', 'spatial.distance_matrix', (['coor', '(coor @ rot)'], {}), '(coor, coor @ rot)\n', (10914, 10932), False, 'from scipy import optimize, spatial\n'), ((10964, 11000), 'scipy.optimize.linear_sum_assignment', 'optimize.linear_sum_assignment', (['dist'], {}), '(dist)\n', (10994, 11000), False, 'from scipy import optimize, spatial\n'), ((11463, 11490), 'scipy.spatial.cKDTree', 'spatial.cKDTree', (['(coor @ rot)'], {}), '(coor @ rot)\n', (11478, 11490), False, 'from scipy import optimize, spatial\n')] |
import numpy as np
import struct
class IdxFile:
TYPE_MAPPING = {b"\x08": "unsigned byte", b"\x09": "signed byte", b"\x0B": "short (2 bytes)",
b"\x0C": "int (4 bytes)", b"\x0D": "float (4 bytes)", b"\x0E": "double (8 bytes)"}
BYTE_MAPPING = {b"\x08": 1, b"\x09": 1, b"\x0B": 2, b"\x0C": 4, b"\x0D": 4, b"\x0E": 8}
NP_TYPE_MAPPING = {b"\x08": np.uint8, b"\x09": np.int8, b"\x0B": np.short, b"\x0C": np.int32, b"\x0D": np.float32,
b"\x0E": np.double}
def __init__(self, file_name, dimension, type_flag):
self.file_name = file_name
self.dimension = dimension
self.type_flag = type_flag
def __len__(self):
return self.dimension[0]
def _read_data_sample(self, shape, fptr):
bytes_per_data = self.BYTE_MAPPING[self.type_flag]
data_bytes = fptr.read(np.prod(shape) * bytes_per_data)
return np.ndarray(shape, "B", data_bytes)
def generator(self, batch_size=1):
with open(self.file_name, "rb") as fptr:
fptr.seek(2) # skip initial 0x00 0x00
fptr.seek(2) # skip magic_number
fptr.seek(4 * len(self.dimension)) # skip dimension
shape = [batch_size]
if len(self.dimension) > 1:
shape += self.dimension[1:]
for i in range(int(np.ceil(len(self) / batch_size) - 1)):
yield self._read_data_sample(shape, fptr)
last_batch_size = len(self) % batch_size if len(self) % batch_size != 0 else batch_size
shape[0] = last_batch_size
yield self._read_data_sample(shape, fptr)
@classmethod
def from_file(cls, file_name):
"""
THE IDX FILE FORMAT
the IDX file format is a simple format for vectors and multidimensional matrices of various numerical types.
The basic format is
magic number
size in dimension 0
size in dimension 1
size in dimension 2
.....
size in dimension N
data
The magic number is an integer (MSB first). The first 2 bytes are always 0.
The third byte codes the type of the data:
0x08: unsigned byte
0x09: signed byte
0x0B: short (2 bytes)
0x0C: int (4 bytes)
0x0D: float (4 bytes)
0x0E: double (8 bytes)
The 4-th byte codes the number of dimensions of the vector/matrix: 1 for vectors, 2 for matrices....
The sizes in each dimension are 4-byte integers (MSB first, high endian, like in most non-Intel processors).
The data is stored like in a C array, i.e. the index in the last dimension changes the fastest.
"""
with open(file_name, "rb") as fptr:
fptr.read(2) # discard initial 0x00 0x00
magic_number = struct.unpack("cb", fptr.read(2))
type_flag = magic_number[0]
dimension = magic_number[1]
dimension_list = [0 for _ in range(dimension)]
for d in range(dimension):
size_i = struct.unpack(">I", fptr.read(4)) # Dimension is represented in 4 bytes
dimension_list[d] = size_i[0]
dimension_tuple = tuple(dimension_list)
return cls(file_name, dimension_tuple, type_flag)
| [
"numpy.prod",
"numpy.ndarray"
] | [((910, 944), 'numpy.ndarray', 'np.ndarray', (['shape', '"""B"""', 'data_bytes'], {}), "(shape, 'B', data_bytes)\n", (920, 944), True, 'import numpy as np\n'), ((862, 876), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (869, 876), True, 'import numpy as np\n')] |
from __future__ import division, absolute_import, print_function
import numpy as np
"""
A sript to generate van der Waals surface of molecules.
"""
# Van der Waals radii (in angstrom) are taken from GAMESS.
vdw_r = {'H': 1.20, 'HE': 1.20,
'LI': 1.37, 'BE': 1.45, 'B': 1.45, 'C': 1.50,
'N': 1.50, 'O': 1.40, 'F': 1.35, 'NE': 1.30,
'NA': 1.57, 'MG': 1.36, 'AL': 1.24, 'SI': 1.17,
'P': 1.80, 'S': 1.75, 'CL': 1.70}
def surface(n):
"""Computes approximately n points on unit sphere. Code adapted from GAMESS.
Parameters
----------
n : int
approximate number of requested surface points
Returns
-------
ndarray
numpy array of xyz coordinates of surface points
"""
u = []
eps = 1e-10
nequat = int(np.sqrt(np.pi*n))
nvert = int(nequat/2)
nu = 0
for i in range(nvert+1):
fi = np.pi*i/nvert
z = np.cos(fi)
xy = np.sin(fi)
nhor = int(nequat*xy+eps)
if nhor < 1:
nhor = 1
for j in range(nhor):
fj = 2*np.pi*j/nhor
x = np.cos(fj)*xy
y = np.sin(fj)*xy
if nu >= n:
return np.array(u)
nu += 1
u.append([x, y, z])
return np.array(u)
def vdw_surface(coordinates, elements, scale_factor, density, input_radii):
"""Computes points outside the van der Waals surface of molecules.
Parameters
----------
coordinates : ndarray
cartesian coordinates of the nuclei, in units of angstrom
elements : list
The symbols (e.g. C, H) for the atoms
scale_factor : float
The points on the molecular surface are set at a distance of
scale_factor * vdw_radius away from each of the atoms.
density : float
The (approximate) number of points to generate per square angstrom
of surface area. 1.0 is the default recommended by Kollman & Singh.
input_radii : dict
dictionary of user's defined VDW radii
Returns
-------
radii : dict
A dictionary of scaled VDW radii
surface_points : ndarray
array of the coordinates of the points on the surface
"""
radii = {}
surface_points = []
# scale radii
for i in elements:
if i in radii.keys():
continue
if i in input_radii.keys():
radii[i] = input_radii[i] * scale_factor
elif i in vdw_r.keys():
radii[i] = vdw_r[i] * scale_factor
else:
raise KeyError('%s is not a supported element; ' %i
+ 'use the "VDW_RADII" option to add '
+ 'its van der Waals radius.')
# loop over atomic coordinates
for i in range(len(coordinates)):
# calculate approximate number of ESP grid points
n_points = int(density * 4.0 * np.pi* np.power(radii[elements[i]], 2))
# generate an array of n_points in a unit sphere around the atom
dots = surface(n_points)
# scale the unit sphere by the VDW radius and translate
dots = coordinates[i] + radii[elements[i]] * dots
for j in range(len(dots)):
save = True
for k in range(len(coordinates)):
if i == k:
continue
# exclude points within the scaled VDW radius of other atoms
d = np.linalg.norm(dots[j] - coordinates[k])
if d < radii[elements[k]]:
save = False
break
if save:
surface_points.append(dots[j])
return np.array(surface_points), radii
| [
"numpy.sqrt",
"numpy.power",
"numpy.array",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin"
] | [((1273, 1284), 'numpy.array', 'np.array', (['u'], {}), '(u)\n', (1281, 1284), True, 'import numpy as np\n'), ((795, 813), 'numpy.sqrt', 'np.sqrt', (['(np.pi * n)'], {}), '(np.pi * n)\n', (802, 813), True, 'import numpy as np\n'), ((918, 928), 'numpy.cos', 'np.cos', (['fi'], {}), '(fi)\n', (924, 928), True, 'import numpy as np\n'), ((942, 952), 'numpy.sin', 'np.sin', (['fi'], {}), '(fi)\n', (948, 952), True, 'import numpy as np\n'), ((3619, 3643), 'numpy.array', 'np.array', (['surface_points'], {}), '(surface_points)\n', (3627, 3643), True, 'import numpy as np\n'), ((1107, 1117), 'numpy.cos', 'np.cos', (['fj'], {}), '(fj)\n', (1113, 1117), True, 'import numpy as np\n'), ((1137, 1147), 'numpy.sin', 'np.sin', (['fj'], {}), '(fj)\n', (1143, 1147), True, 'import numpy as np\n'), ((1198, 1209), 'numpy.array', 'np.array', (['u'], {}), '(u)\n', (1206, 1209), True, 'import numpy as np\n'), ((2877, 2908), 'numpy.power', 'np.power', (['radii[elements[i]]', '(2)'], {}), '(radii[elements[i]], 2)\n', (2885, 2908), True, 'import numpy as np\n'), ((3396, 3436), 'numpy.linalg.norm', 'np.linalg.norm', (['(dots[j] - coordinates[k])'], {}), '(dots[j] - coordinates[k])\n', (3410, 3436), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Library for computing features that describe the local boundary curvature
This module provides functions that one can use to obtain, visualise and describe local curvature of a given object.
Available Functions:
-circumradius:Finds the radius of a circumcircle
-local_radius_curvature: Computes local radius of curvature
-global_curvature_features: Obtain features describing an object's local curvatures
-prominant_curvature_features: Obtain prominant (peaks) local curvature
-measure_curvature_features: Computes all curvature features
"""
# Import libraries
import numpy as np
import pandas as pd
from math import degrees, sqrt
import matplotlib.pyplot as plt
from skimage.morphology import erosion
from scipy import signal
from skimage import measure
def circumradius(T, binary_mask: np.ndarray):
"""Finds the radius of a circumcircle
This functions calculates the radius of circumcircle given the coordinates of 3 points.
The sign of the radius is positive if the circumcenter is inside the binary image.
Args:
T: (tuple) cartesian coordinatesof three points:(x1, y1), (x2, y2), (x3, y3)
binary_mask: (image_matrix) The foreground pixels have a value of one.
Returns:
Radius of the circumcircle. False if it cannot be calculated eg: if the points are colinear.
"""
(x1, y1), (x2, y2), (x3, y3) = T # extracting the points.
D = 2 * (x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)) # Diameter
if D != 0:
# Centroid of the cicumcircle
Ux = (
((x1 ** 2 + y1 ** 2) * (y2 - y3))
+ ((x2 ** 2 + y2 ** 2) * (y3 - y1))
+ ((x3 ** 2 + y3 ** 2) * (y1 - y2))
) / D
Uy = (
((x1 ** 2 + y1 ** 2) * (x3 - x2))
+ ((x2 ** 2 + y2 ** 2) * (x1 - x3))
+ ((x3 ** 2 + y3 ** 2) * (x2 - x1))
) / D
# radius
r = sqrt((Ux - x2) ** 2 + (Uy - y2) ** 2)
r = r + 1
# Determining the sign: it is positive if the centroid of the circumcricle is in the foreground
x = np.floor(Ux).astype(int)
y = np.floor(Uy).astype(int)
if x >= binary_mask.shape[0] or y >= binary_mask.shape[1]:
r = -r
elif x < 0 or y < 0:
r = -r
elif binary_mask[x, y]:
r = r
else:
r = -r
return r
else:
return False
def local_radius_curvature(binary_image: np.ndarray, step:int = 2, show_boundary:bool =False):
"""Computes local radius of curvature.
This functions calculates the local curvatures given a segmented image.
Args:
binary_image: (image_matrix) binary region image of a segmented object.
step: (integer) Step size used to obtain the vertices, use larger values for a smoother curvatures
show_boundary: (Logical) true if function should plot raduis of curvature
Returns:
List of local curvature features
"""
# obtain the edge of the given binary image.
bw = binary_image > 0
bw = np.pad(bw, pad_width=5, mode="constant", constant_values=0)
edge = np.subtract(bw * 1, erosion(bw) * 1)
(boundary_x, boundary_y) = [np.where(edge > 0)[0], np.where(edge > 0)[1]]
cenx, ceny = np.mean(boundary_x), np.mean(boundary_y)
arr1inds = np.arctan2(boundary_x - cenx, boundary_y - ceny).argsort()
boundary_x, boundary_y = boundary_x[arr1inds[::-1]], boundary_y[arr1inds[::-1]]
# obtain local radii of curvature with the given step size
cords = np.column_stack((boundary_x, boundary_y))
cords_circ = np.vstack((cords[-step:], cords, cords[:step]))
r_c = np.array(
[
circumradius(
(cords_circ[i - step], cords_circ[i], cords_circ[i + step]), bw
)
for i in range(step, cords.shape[0] + step)
]
)
# plot an image of the boundary with the curvature if asked
if show_boundary:
edge[boundary_x, boundary_y] = r_c
plt.imshow(edge)
plt.colorbar()
return r_c
def global_curvature_features(local_curvatures: np.ndarray):
"""Obtain features describing an object's local curvatures
This function computres features that describe the local curvature distributions,
Args:
local_curvatures:(Array) of ordered local curvatures
"""
# differentiate postive and negative curvatures and compute features
pos_curvature = local_curvatures[local_curvatures > 0]
neg_curvature = np.abs(local_curvatures[local_curvatures < 0])
feat = {"avg_curvature" : np.mean(local_curvatures),
"std_curvature" : np.std(local_curvatures),
"npolarity_changes" : np.where(np.diff(np.sign(local_curvatures)))[0].shape[0]
}
if pos_curvature.shape[0] > 0:
positive_feat={"max_posi_curv": np.max(pos_curvature),
"avg_posi_curv": np.mean(pos_curvature),
"med_posi_curv": np.median(pos_curvature),
"std_posi_curv": np.std(pos_curvature),
"sum_posi_curv": np.sum(pos_curvature),
"len_posi_curv": pos_curvature.shape[0]
}
else:
positive_feat={"max_posi_curv": np.nan,
"avg_posi_curv": np.nan,
"med_posi_curv": np.nan,
"std_posi_curv": np.nan,
"sum_posi_curv": np.nan,
"len_posi_curv": np.nan
}
feat.update(positive_feat)
if neg_curvature.shape[0] > 0:
negative_feat={"max_neg_curv": np.max(neg_curvature),
"avg_neg_curv": np.mean(neg_curvature),
"med_neg_curv": np.median(neg_curvature),
"std_neg_curv": np.std(neg_curvature),
"sum_neg_curv": np.sum(neg_curvature),
"len_neg_curv": neg_curvature.shape[0]
}
else:
negative_feat={"max_neg_curv": np.nan,
"avg_neg_curv": np.nan,
"med_neg_curv": np.nan,
"std_neg_curv": np.nan,
"sum_neg_curv": np.nan,
"len_neg_curv": np.nan
}
feat.update(negative_feat)
return feat
def prominant_curvature_features(
local_curvatures:np.ndarray,
show_plot:bool=False,
min_prominance:float=0.1,
min_width:int=5,
dist_bwt_peaks:int=10,
):
"""Obtain prominant (peaks) local curvature
This function finds peaks for a given list of local curvatures using scipy's signal module.
Args:
local_curvatures:(Array) of ordered local curvatures
show_plot: (logical) true if the function should plot the identified peaks
min_prominance: (numeric) minimal required prominance of peaks (Default=0.1)
min_width: (numeric) minimum width required of peaks (Deafult=5)
dist_bwt_peaks: (numeric) required minimum distance between peaks (Default=10)
Returns: Object with the values:
num_prominant_positive_curvature,
prominance_prominant_positive_curvature,
width_prominant_positive_curvature,
prominant_positive_curvature,
num_prominant_negative_curvature,
prominance_prominant_negative_curvature,
width_prominant_negative_curvature,
prominant_negative_curvature
"""
# Find positive and nevative peaks
pos_peaks, pos_prop = signal.find_peaks(
local_curvatures,
prominence=min_prominance,
distance=dist_bwt_peaks,
width=min_width,
)
neg_peaks, neg_prop = signal.find_peaks(
[local_curvatures[x] * -1 for x in range(len(local_curvatures))],
prominence=min_prominance,
distance=dist_bwt_peaks,
width=min_width,
)
# if specified show plot
if show_plot:
plt.plot(np.array(local_curvatures))
plt.plot(pos_peaks, np.array(local_curvatures)[pos_peaks], "x")
plt.plot(neg_peaks, np.array(local_curvatures)[neg_peaks], "x")
plt.ylabel = "Curvature"
plt.xlabel = "Boundary"
# compute features
num_prominant_positive_curvature = len(pos_peaks)
if len(pos_peaks) > 0:
prominance_prominant_positive_curvature = np.mean(pos_prop["prominences"])
width_prominant_positive_curvature = np.mean(pos_prop["widths"])
prominant_positive_curvature = np.mean(
[local_curvatures[pos_peaks[x]] for x in range(len(pos_peaks))]
)
elif len(pos_peaks) == 0:
prominance_prominant_positive_curvature = np.nan
width_prominant_positive_curvature = np.nan
prominant_positive_curvature = np.nan
num_prominant_negative_curvature = len(neg_peaks)
if len(neg_peaks) > 0:
prominance_prominant_negative_curvature = np.mean(neg_prop["prominences"])
width_prominant_negative_curvature = np.mean(neg_prop["widths"])
prominant_negative_curvature = np.mean(
[local_curvatures[neg_peaks[x]] for x in range(len(neg_peaks))]
)
elif len(neg_peaks) == 0:
prominance_prominant_negative_curvature = np.nan
width_prominant_negative_curvature = np.nan
prominant_negative_curvature = np.nan
feat = { "num_prominant_pos_curv" : num_prominant_positive_curvature,
"prominance_prominant_pos_curv" : prominance_prominant_positive_curvature,
"width_prominant_pos_curv" : width_prominant_positive_curvature,
"prominant_pos_curv": prominant_positive_curvature,
"num_prominant_neg_curv" : num_prominant_negative_curvature,
"prominance_prominant_neg_curv" : prominance_prominant_negative_curvature,
"width_prominant_neg_curv" : width_prominant_negative_curvature,
"prominant_neg_curv": prominant_negative_curvature,
}
return feat
def measure_curvature_features(
binary_image:np.ndarray, step:int = 2, prominance:float = 0.1, width:int = 5, dist_bt_peaks:int =10):
"""Comupte all curvature features
This function computes all features that describe the local boundary features
Args:
binary_image:(image_array) Binary image
step: (integer) Step size used to obtain the vertices, use larger values for a smoother curvatures
prominance: (numeric) minimal required prominance of peaks (Default=0.1)
width: (numeric) minimum width required of peaks (Deafult=5)
dist_bt_peaks: (numeric) required minimum distance between peaks (Default=10)
Returns: A pandas dataframe with all the features for the given image
"""
r_c = local_radius_curvature(binary_image, step, False)
# calculate local curvature features
local_curvature = np.array([
np.divide(1, r_c[x]) if r_c[x] != 0 else 0 for x in range(len(r_c))
])
feat ={}
feat.update(global_curvature_features(local_curvatures = local_curvature))
feat.update(prominant_curvature_features(local_curvatures = local_curvature,
min_prominance = prominance,
min_width = width,
dist_bwt_peaks = dist_bt_peaks))
feat = pd.DataFrame([feat])
foo = (measure.regionprops_table(binary_image.astype(int),properties=["perimeter"]))
perimeter = float(foo["perimeter"])
feat["frac_peri_w_posi_curvature"] = (feat["len_posi_curv"].replace(to_replace="NA", value=0)/ perimeter)
feat["frac_peri_w_neg_curvature"] = (feat["len_neg_curv"].replace(to_replace="NA", value=0)/perimeter)
feat["frac_peri_w_polarity_changes"] = (feat["npolarity_changes"] / perimeter)
return feat
| [
"math.sqrt",
"numpy.column_stack",
"numpy.array",
"numpy.arctan2",
"numpy.divide",
"matplotlib.pyplot.imshow",
"numpy.mean",
"numpy.where",
"skimage.morphology.erosion",
"numpy.max",
"numpy.vstack",
"scipy.signal.find_peaks",
"pandas.DataFrame",
"numpy.abs",
"numpy.floor",
"numpy.sign"... | [((3099, 3158), 'numpy.pad', 'np.pad', (['bw'], {'pad_width': '(5)', 'mode': '"""constant"""', 'constant_values': '(0)'}), "(bw, pad_width=5, mode='constant', constant_values=0)\n", (3105, 3158), True, 'import numpy as np\n'), ((3577, 3618), 'numpy.column_stack', 'np.column_stack', (['(boundary_x, boundary_y)'], {}), '((boundary_x, boundary_y))\n', (3592, 3618), True, 'import numpy as np\n'), ((3636, 3683), 'numpy.vstack', 'np.vstack', (['(cords[-step:], cords, cords[:step])'], {}), '((cords[-step:], cords, cords[:step]))\n', (3645, 3683), True, 'import numpy as np\n'), ((4553, 4599), 'numpy.abs', 'np.abs', (['local_curvatures[local_curvatures < 0]'], {}), '(local_curvatures[local_curvatures < 0])\n', (4559, 4599), True, 'import numpy as np\n'), ((7605, 7714), 'scipy.signal.find_peaks', 'signal.find_peaks', (['local_curvatures'], {'prominence': 'min_prominance', 'distance': 'dist_bwt_peaks', 'width': 'min_width'}), '(local_curvatures, prominence=min_prominance, distance=\n dist_bwt_peaks, width=min_width)\n', (7622, 7714), False, 'from scipy import signal\n'), ((11428, 11448), 'pandas.DataFrame', 'pd.DataFrame', (['[feat]'], {}), '([feat])\n', (11440, 11448), True, 'import pandas as pd\n'), ((1930, 1967), 'math.sqrt', 'sqrt', (['((Ux - x2) ** 2 + (Uy - y2) ** 2)'], {}), '((Ux - x2) ** 2 + (Uy - y2) ** 2)\n', (1934, 1967), False, 'from math import degrees, sqrt\n'), ((3302, 3321), 'numpy.mean', 'np.mean', (['boundary_x'], {}), '(boundary_x)\n', (3309, 3321), True, 'import numpy as np\n'), ((3323, 3342), 'numpy.mean', 'np.mean', (['boundary_y'], {}), '(boundary_y)\n', (3330, 3342), True, 'import numpy as np\n'), ((4044, 4060), 'matplotlib.pyplot.imshow', 'plt.imshow', (['edge'], {}), '(edge)\n', (4054, 4060), True, 'import matplotlib.pyplot as plt\n'), ((4069, 4083), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (4081, 4083), True, 'import matplotlib.pyplot as plt\n'), ((4631, 4656), 'numpy.mean', 'np.mean', (['local_curvatures'], {}), '(local_curvatures)\n', (4638, 4656), True, 'import numpy as np\n'), ((4689, 4713), 'numpy.std', 'np.std', (['local_curvatures'], {}), '(local_curvatures)\n', (4695, 4713), True, 'import numpy as np\n'), ((8423, 8455), 'numpy.mean', 'np.mean', (["pos_prop['prominences']"], {}), "(pos_prop['prominences'])\n", (8430, 8455), True, 'import numpy as np\n'), ((8501, 8528), 'numpy.mean', 'np.mean', (["pos_prop['widths']"], {}), "(pos_prop['widths'])\n", (8508, 8528), True, 'import numpy as np\n'), ((8980, 9012), 'numpy.mean', 'np.mean', (["neg_prop['prominences']"], {}), "(neg_prop['prominences'])\n", (8987, 9012), True, 'import numpy as np\n'), ((9058, 9085), 'numpy.mean', 'np.mean', (["neg_prop['widths']"], {}), "(neg_prop['widths'])\n", (9065, 9085), True, 'import numpy as np\n'), ((3190, 3201), 'skimage.morphology.erosion', 'erosion', (['bw'], {}), '(bw)\n', (3197, 3201), False, 'from skimage.morphology import erosion\n'), ((3239, 3257), 'numpy.where', 'np.where', (['(edge > 0)'], {}), '(edge > 0)\n', (3247, 3257), True, 'import numpy as np\n'), ((3262, 3280), 'numpy.where', 'np.where', (['(edge > 0)'], {}), '(edge > 0)\n', (3270, 3280), True, 'import numpy as np\n'), ((3358, 3406), 'numpy.arctan2', 'np.arctan2', (['(boundary_x - cenx)', '(boundary_y - ceny)'], {}), '(boundary_x - cenx, boundary_y - ceny)\n', (3368, 3406), True, 'import numpy as np\n'), ((4915, 4936), 'numpy.max', 'np.max', (['pos_curvature'], {}), '(pos_curvature)\n', (4921, 4936), True, 'import numpy as np\n'), ((4978, 5000), 'numpy.mean', 'np.mean', (['pos_curvature'], {}), '(pos_curvature)\n', (4985, 5000), True, 'import numpy as np\n'), ((5042, 5066), 'numpy.median', 'np.median', (['pos_curvature'], {}), '(pos_curvature)\n', (5051, 5066), True, 'import numpy as np\n'), ((5108, 5129), 'numpy.std', 'np.std', (['pos_curvature'], {}), '(pos_curvature)\n', (5114, 5129), True, 'import numpy as np\n'), ((5171, 5192), 'numpy.sum', 'np.sum', (['pos_curvature'], {}), '(pos_curvature)\n', (5177, 5192), True, 'import numpy as np\n'), ((5685, 5706), 'numpy.max', 'np.max', (['neg_curvature'], {}), '(neg_curvature)\n', (5691, 5706), True, 'import numpy as np\n'), ((5747, 5769), 'numpy.mean', 'np.mean', (['neg_curvature'], {}), '(neg_curvature)\n', (5754, 5769), True, 'import numpy as np\n'), ((5810, 5834), 'numpy.median', 'np.median', (['neg_curvature'], {}), '(neg_curvature)\n', (5819, 5834), True, 'import numpy as np\n'), ((5875, 5896), 'numpy.std', 'np.std', (['neg_curvature'], {}), '(neg_curvature)\n', (5881, 5896), True, 'import numpy as np\n'), ((5937, 5958), 'numpy.sum', 'np.sum', (['neg_curvature'], {}), '(neg_curvature)\n', (5943, 5958), True, 'import numpy as np\n'), ((8032, 8058), 'numpy.array', 'np.array', (['local_curvatures'], {}), '(local_curvatures)\n', (8040, 8058), True, 'import numpy as np\n'), ((2103, 2115), 'numpy.floor', 'np.floor', (['Ux'], {}), '(Ux)\n', (2111, 2115), True, 'import numpy as np\n'), ((2140, 2152), 'numpy.floor', 'np.floor', (['Uy'], {}), '(Uy)\n', (2148, 2152), True, 'import numpy as np\n'), ((8088, 8114), 'numpy.array', 'np.array', (['local_curvatures'], {}), '(local_curvatures)\n', (8096, 8114), True, 'import numpy as np\n'), ((8160, 8186), 'numpy.array', 'np.array', (['local_curvatures'], {}), '(local_curvatures)\n', (8168, 8186), True, 'import numpy as np\n'), ((10953, 10973), 'numpy.divide', 'np.divide', (['(1)', 'r_c[x]'], {}), '(1, r_c[x])\n', (10962, 10973), True, 'import numpy as np\n'), ((4766, 4791), 'numpy.sign', 'np.sign', (['local_curvatures'], {}), '(local_curvatures)\n', (4773, 4791), True, 'import numpy as np\n')] |
'''
TIEGCM Kamodo reader, adapted to new structure for satellite flythrough software
Initial version - <NAME> (?)
Initial version of model_varnames contributed by <NAME>
New code: <NAME> (June 2021 and on)
NOTE: The current logic for variables that depend on imlev slices off self._imlev coordinate
This only works because there is one variable that depends on imlev: H_imlev
The logic on lines 311-313 will have to be reworked a bit if other variables depend on imlev later.
Remaining tasks:
- check variable dictionary for proper naming, and use of units in Kamodo
'''
from numpy import vectorize
from datetime import datetime, timezone, timedelta
### Make a dict of the possible variable names in TIEGCM
### the intended format is: "Output VarName":['Latex_Varname', 'Latex_Unit' ]
### The constituent species are output in units of mass mixing ratio (mmr).
### Denoted by \psi_i, mmr is the fractional contribution of
### a species i to the total mass density \rho_{total}, and
### is calculated as \psi_i = \rho_i / \rho_{total}, where
### \rho_i is the mass density of a constituent species.
model_varnames={
### 4D Variables, vertical coordinate on midpoint levels (lev)
"ZGMID" : ["H_ilev",'variable description',0,'GDZ','sph',['time','lon','lat','ilev'],"cm"], # geometric height- interpolated to the mid points
"TN" : ["T_n",'variable description',1,'GDZ','sph',['time','lon','lat','ilev'],"K"], # neutral temperature
"O2" : ["psi_O2",'variable description',2,'GDZ','sph',['time','lon','lat','ilev'],""], # molecular oxygen, mmr
"O1" : ["psi_O",'variable description',3,'GDZ','sph',['time','lon','lat','ilev'],""], # atomic oxygen , mmr
"N2" : ["psi_N2",'variable description',4,'GDZ','sph',['time','lon','lat','ilev'],""], # molecular nitrogen,mmr
"HE" : ["psi_He",'variable description',5,'GDZ','sph',['time','lon','lat','ilev'],""], # helium , mmr
"NO" : ["psi_NO",'variable description',6,'GDZ','sph',['time','lon','lat','ilev'],""], # nitric oxide , mmr
"N4S" : ["psi_N4S",'variable description',7,'GDZ','sph',['time','lon','lat','ilev'],""], # N4S ?,mmr
"N2D" : ["psi_N2D", 'variable description',8,'GDZ','sph',['time','lon','lat','ilev'],""], # N(2D) mmr
"TE" : ["T_e",'variable description',9,'GDZ','sph',['time','lon','lat','ilev'],"K"], # ELECTRON TEMPERATURE,
"TI" : ["T_i",'variable description',10,'GDZ','sph',['time','lon','lat','ilev'],"K"], # ION TEMPERATURE
"O2P" : ["N_O2plus",'variable description',11,'GDZ','sph',['time','lon','lat','ilev'],"1/cm**3"], # O2+ ION
"OP" : ["N_Oplus",'variable description',12,'GDZ','sph',['time','lon','lat','ilev'],"1/cm**3"], # O+ ION
"N2N" : ["N_N2",'variable description',13,'GDZ','sph',['time','lon','lat','ilev'],"1/cm**3"], # molecular nitrogen (maybe number density),mmr
"CO2_COOL" : ["Q_CO2cool",'variable description',14,'GDZ','sph',['time','lon','lat','ilev'],"erg/g/s"], # CO2 cooling rates
"NO_COOL" : ["Q_NOcool",'variable description',15,'GDZ','sph',['time','lon','lat','ilev'],"erg/g/s"], # NO cooling rates
"UN" : ["u_n",'variable description',16,'GDZ','sph',['time','lon','lat','ilev'],"cm/s"], # neutral ZONAL wind (+EAST)
"VN" : ["v_n",'variable description',17,'GDZ','sph',['time','lon','lat','ilev'],"cm/s"], # neutral MERIDIONAL wind (+NORTH)
"O2P_ELD" : ['O2P_ELD','variable description',18,'GDZ','sph',['time','lon','lat','ilev'],''], #NO DESCRIPTION GIVEN
"N2P_ELD" :['N2P_ELD','variable description',19,'GDZ','sph',['time','lon','lat','ilev'],''], #NO DESCRIPTION GIVEN
"NPLUS" :['N_Nplus','variable description',20,'GDZ','sph',['time','lon','lat','ilev'],'1/cm**3'], #GUESS ONLY based on other number densities
"NOP_ELD" :['NOP_ELD','variable description',21,'GDZ','sph',['time','lon','lat','ilev'],''], #NO DESCRIPTION GIVEN
"SIGMA_PED" :['Sigma_P','variable description',22,'GDZ','sph',['time','lon','lat','ilev'],'S/m'], #Pedersen Conductivity
"SIGMA_HAL" :['Sigma_H','variable description',23,'GDZ','sph',['time','lon','lat','ilev'],'S/m'], #Hall Conductivity
"QJOULE" :['Q_Joule','variable description',24,'GDZ','sph',['time','lon','lat','ilev'],'erg/g/s'], #Joule Heating
"O_N2" :['psi_ON2','variable description',25,'GDZ','sph',['time','lon','lat','ilev'],''], #O/N2 RATIO
"N2D_ELD" :['N2D_ELD','variable description',26,'GDZ','sph',['time','lon','lat','ilev'],''], #NO DESCRIPTION GIVEN
"O2N" :['r_OtoN','variable description',27,'GDZ','sph',['time','lon','lat','ilev'],'1/cm**3'], #GUESS ONLY
#
### 4D Variables, vertical coordinate on interface levels (ilev)
"DEN" :["rho",'variable description',28,'GDZ','sph',['time','lon','lat','ilev1'],"g/cm**3"], # total neutral mass density
"ZG" :["H_ilev1",'variable description',29,'GDZ','sph',['time','lon','lat','ilev1'],"cm"], # geometric height
"Z" :["H_geopot",'variable description',30,'GDZ','sph',['time','lon','lat','ilev1'],"cm"], # geopotential height (cm)
"NE" : ["N_e",'variable description',31,'GDZ','sph',['time','lon','lat','ilev1'],"1/cm**3"], # ELECTRON DENSITY
"OMEGA" : ["omega",'variable description',32,'GDZ','sph',['time','lon','lat','ilev1'],"1/s"], # VERTICAL MOTION
"POTEN" : ["V",'variable description',33,'GDZ','sph',['time','lon','lat','ilev1'],"V"], # ELECTRIC POTENTIAL
"UI_ExB" : ["u_iExB",'variable description',34,'GDZ','sph',['time','lon','lat','ilev1'],'cm/s'], #Zonal ExB Velocity
"VI_ExB" :["v_iExB",'variable description',35,'GDZ','sph',['time','lon','lat','ilev1'],'cm/s'], #Meridional ExB Velocity
"WI_ExB" :["w_iExB", 'variable description',36,'GDZ','sph',['time','lon','lat','ilev1'], 'cm/s'], #Vertical ExB Velocity
### 4D Variables, vertical coordinate on interface mag levels (imlev)
"ZMAG" : ["H_mag",'variable description',37,'MAG','sph',['time','mlon','mlat','milev'],"km"], # Geopotential Height on Geomagnetic Grid
#
### 3D Variables, (time, lat, lon)
"TEC" : ["TEC",'variable description',38,'GDZ','sph',['time','lon','lat'],"1/cm**2"], # Total Electron Content
"TLBC" : ["T_nLBC",'variable description',39,'GDZ','sph',['time','lon','lat'],"K"], # Lower boundary condition for TN
"ULBC" : ["u_nLBC",'variable description',40,'GDZ','sph',['time','lon','lat'],"cm/s"], # Lower boundary condition for UN
"VLBC" : ["v_nLBC",'variable description',41,'GDZ','sph',['time','lon','lat'],"cm/s"], # Lower boundary condition for VN
"TLBC_NM" : ["T_nLBCNM",'variable description',42,'GDZ','sph',['time','lon','lat'],"K"], # Lower boundary condition for TN (TIME N-1)
"ULBC_NM" : ["u_nLBCNM",'variable description',43,'GDZ','sph',['time','lon','lat'],"cm/s"], # Lower boundary condition for UN (TIME N-1)
"VLBC_NM" : ["v_nLBCNM",'variable description',44,'GDZ','sph',['time','lon','lat'],"cm/s"], # Lower boundary condition for VN (TIME N-1)
"QJOULE_INTEG":["W_Joule",'variable description',45,'GDZ','sph',['time','lon','lat'],'erg/cm**2/s'], #Height-integrated Joule Heating
"EFLUX" :['Eflux_aurora','variable description',46,'GDZ','sph',['time','lon','lat'],'erg/cm**2/s'], #Aurora Energy Flux
"HMF2" :['HmF2','variable description',47,'GDZ','sph',['time','lon','lat'],'km'], # Height of the F2 Layer
"NMF2" :['NmF2','variable description',48,'GDZ','sph',['time','lon','lat'],'1/cm**3'], #Peak Density of the F2 Layer
}
#####--------------------------------------------------------------------------------------
##### Define some helpful functions for dealing with time systems
def dts_to_ts(file_dts):
'''Get datetime timestamp in UTC from datetime string'''
return datetime.timestamp(datetime.strptime(file_dts, '%Y-%m-%d %H:%M:%S'
).replace(tzinfo=timezone.utc))
def year_mtime_todt0(year, mtime): #self.filedate
'''Convert year and day to datetime object in UTC at midnight'''
day, hour, minute = mtime #unpack mtime values
return datetime(int(year),1,1).replace(tzinfo=timezone.utc)+\
timedelta(days=int(day-1))
def year_mtime_todt(year, mtime):
'''Convert year and [day,hour,minute] to datetime object in UTC'''
day, hour, minute = mtime #unpack mtime values
return datetime(int(year),1,1).replace(tzinfo=timezone.utc)+\
timedelta(days=int(day-1),hours=int(hour),minutes=int(minute))
def year_mtime_todts(year, mtime):
'''Convert year and mtime to a datetime string'''
return datetime.strftime(year_mtime_todt(year, mtime), '%Y-%m-%d %H:%M:%S')
def year_mtime_todate(year, mtime):
'''Use year and mtime to determine the date in the file. Returns a datetime object.'''
date_string = datetime.strftime(year_mtime_todt(year, mtime), '%Y-%m-%d') #'YYYY-MM-DD'
return datetime.strptime(date_string, '%Y-%m-%d').replace(tzinfo=timezone.utc)
@vectorize
def year_mtime_tohrs(year, day, hour, minute, filedate):
'''Convert year and mtime to hours since midnight using predetermined datetime object.'''
mtime = [day, hour, minute]
return (year_mtime_todt(year, mtime)-filedate).total_seconds()/3600.
def ts_to_hrs(time_val, filedate):
'''Convert utc timestamp to hours since midnight on filedate.'''
return (datetime.utcfromtimestamp(time_val).replace(tzinfo=timezone.utc)-filedate).total_seconds()/3600.
def MODEL():
from time import perf_counter
from os.path import basename
from numpy import zeros, transpose, array, append, insert, where, unique
from numpy import NaN, diff, abs, mean, broadcast_to, cos, sin, repeat, sqrt, sum
from numpy import pi as nppi
from netCDF4 import Dataset
from kamodo import Kamodo
#print('KAMODO IMPORTED!')
from kamodo.readers.reader_utilities import regdef_3D_interpolators, regdef_4D_interpolators
class MODEL(Kamodo):
'''TIEGCM model data reader.'''
def __init__(self, full_filename, variables_requested=[], runname="noname",
filetime=False, verbose=False, gridded_int=True, printfiles=False,
fulltime=True, **kwargs): #filename should include the full path
#### Use a super init so that your class inherits any methods from Kamodo
super(MODEL, self).__init__()
#store time information for satellite flythrough layer to choose the right file
t0 = perf_counter()
filename = basename(full_filename)
file_dir = full_filename.split(filename)[0]
cdf_data = Dataset(full_filename, 'r')
#calculate time information
year = array(cdf_data.variables['year'])
mtime = array(cdf_data.variables['mtime'])
day, hour, minute = mtime.T #only matters for the vectorized function
self.filedate = year_mtime_todt0(year[0], mtime[0]) #datetime object for file date at midnight UTC
self.datetimes = [year_mtime_todts(y, m) for y, m \
in zip([year[0], year[-1]],[mtime[0],mtime[-1]])] #strings in format = YYYY-MM-DD HH:MM:SS
self.filetimes=[dts_to_ts(file_dts) for file_dts in self.datetimes] #timestamps in UTC
time = year_mtime_tohrs(year, day, hour, minute, self.filedate)
#time = array([year_mtime_tohrs(y, m, self.filedate) for y, m in \
# zip(year, mtime)]) #hours since midnight of self.filedate
self.dt = diff(time).max()*3600. #time is in hours
if filetime and not fulltime: #(used when searching for neighboring files below)
return #return times as is to prevent recursion
#if variables are given as integers, convert to standard names
if len(variables_requested)>0:
if isinstance(variables_requested[0], int):
tmp_var = [value[0] for key, value in model_varnames.items()\
if value[2] in variables_requested]
variables_requested = tmp_var
if fulltime: #add boundary time (default value)
#find other files with same pattern
from glob import glob
file_pattern = file_dir+'s*.nc' #returns a string for tiegcm
files = sorted(glob(file_pattern))
filenames = unique([basename(f) for f in files])
#find closest file by utc timestamp
#tiegcm has an open time at the beginning, so need an end time from the previous file
#files are automatically sorted by YYMMDD, so previous file is previous in the list
current_idx = where(filenames==filename)[0]
if current_idx==0:
print('No earlier file available.')
filecheck = False
if filetime:
return
else:
min_filename = file_dir+filenames[current_idx-1][0] #-1 for adding a beginning time
kamodo_test = MODEL(min_filename, filetime=True, fulltime=False)
time_test = abs(kamodo_test.filetimes[1]-self.filetimes[0])
if time_test<=self.dt: #if nearest file time at least within one timestep (hrs)
filecheck = True
#time only version if returning time for searching
if filetime:
kamodo_neighbor = MODEL(min_filename, fulltime=False, filetime=True)
self.datetimes[0] = kamodo_neighbor.datetimes[1]
self.filetimes[0] = kamodo_neighbor.filetimes[1]
return #return object with additional time (for SF code)
#get kamodo object with same requested variables to add to each array below
if verbose: print(f'Took {perf_counter()-t0:.3f}s to find closest file.')
kamodo_neighbor = MODEL(min_filename, variables_requested=variables_requested,
fulltime=False)
self.datetimes[0] = kamodo_neighbor.datetimes[1]
self.filetimes[0] = kamodo_neighbor.filetimes[1]
short_data = kamodo_neighbor.short_data
if verbose: print(f'Took {perf_counter()-t0:.3f}s to get data from previous file.')
else:
print(f'No earlier file found within {self.dt:.1f}s')
filecheck = False
if filetime:
return
#These lists need to be the standardized variable name to match that above,
#not the names from the data file.
self.ilev1_list = [value[0] for key, value in model_varnames.items() if value[5][-1]=='ilev1']
self.ilev_list = [value[0] for key, value in model_varnames.items() if value[5][-1]=='ilev']
self.milev_list = [value[0] for key, value in model_varnames.items() if value[5][-1]=='milev']
#don't need an internal coord dict because there is only one lat/lon (other than magnetic)
#perform initial check on variables_requested list
if len(variables_requested)>0 and fulltime:
test_list = [value[0] for key, value in model_varnames.items()]
err_list = [item for item in variables_requested if item not in test_list]
if len(err_list)>0: print('Variable name(s) not recognized:', err_list)
#translate from standardized variables to names in file
#remove variables requested that are not in the file
if len(variables_requested)>0:
gvar_list = [key for key, value in model_varnames.items() \
if value[0] in variables_requested and \
key in cdf_data.variables.keys()] # file variable names
#check for variables requested but not available
if len(gvar_list)!=len(variables_requested):
err_list = [value[0] for key, value in model_varnames.items() \
if value[0] in variables_requested and \
key not in cdf_data.variables.keys()]
if len(err_list)>0: print('Some requested variables are not available:', err_list)
#check that the appropriate height variable is added for the variables requested
check_list = [key for key, value in model_varnames.items()\
if value[0] in self.ilev1_list and key in gvar_list]
if 'ZG' not in gvar_list and len(check_list)>0:
gvar_list.append('ZG') #force addition of H for conversion of ilev to H and back
check_list = [key for key, value in model_varnames.items()\
if value[0] in self.ilev_list and key in gvar_list]
if 'ZGMID' not in gvar_list and len(check_list)>0:
gvar_list.append('ZGMID')
check_list = [key for key, value in model_varnames.items()\
if value[0] in self.milev_list and key in gvar_list]
if 'ZMAG' not in gvar_list and len(check_list)>0:
gvar_list.append('ZMAG')
else: #only input variables on the avoid_list if specifically requested
avoid_list = ['TLBC','ULBC','VLBC','TLBC_NM','ULBC_NM','VLBC_NM',
'NOP_ELD','O2P_ELD','N2P_ELD','N2D_ELD']
gvar_list = [key for key in cdf_data.variables.keys() \
if key in model_varnames.keys() and \
key not in avoid_list]
# Store the requested variables into a dictionary
variables = {model_varnames[key][0]:{'units':model_varnames[key][-1],
'data':array(cdf_data.variables[key])}\
for key in gvar_list} #store with key = standardized name
#prepare and return data only for last timestamp
if not fulltime:
cdf_data.close()
variables['time'] = self.filetimes[1] #utc timestamp
self.short_data = variables
return
#### Store our inputs as class attributes to the class
self.filename = full_filename
self.runname = runname
self.missing_value = NaN
self._registered = 0
self.variables = dict()
self.modelname = 'TIEGCM'
if printfiles:
print('Files:', self.filename)
#### Store coordinate data as class attributes
if filecheck: #new_time iis a utc timestamp
new_time = ts_to_hrs(short_data['time'], self.filedate) #new time in hours since midnight
self._time = insert(time, 0, new_time) #insert new value
else:
self._time = time
#store coordinates
lat = array(cdf_data.variables['lat']) #NOT FULL RANGE IN LATITIUDE!!!
lat = insert(lat, 0, -90) #insert a grid point at beginning (before -87.5)
self._lat = append(lat, 90.) #and at the end (after 87.5)
lon = array(cdf_data.variables['lon']) #NOT WRAPPED IN LONGITUDE!!!!!
self._lon = append(lon, 180.) #add 180. to end of array
self._ilev = array(cdf_data.variables['lev'])
self._ilev1 = array(cdf_data.variables['ilev'])
self._milev = array(cdf_data.variables['imlev']) #'imlev' isn't used by any of the variables except H_imlev
self._mlat = array(cdf_data.variables['mlat'])
self._mlon = array(cdf_data.variables['mlon']) #-180 to 180
#close file
cdf_data.close()
if verbose: print(f'Took {perf_counter()-t0:.6f}s to read in data')
# register interpolators for each requested variable
varname_list, self.variables = [key for key in variables.keys()], {} #store original list b/c gridded interpolators
t_reg = perf_counter()
for varname in varname_list:
if len(variables[varname]['data'].shape)==3:
if filecheck: #if neighbor found
#append data for first time stamp, transpose and register
data_shape = list(variables[varname]['data'].shape)
data_shape[0]+=1 #add space for time
new_data = zeros(data_shape)
new_data[1:,:,:] = variables[varname]['data'] #put in current data
new_data[0,:,:] = short_data[varname]['data'][-1,:,:] #add in data for additional time
variable = transpose(new_data, (0,2,1)) #(t,lat,lon) -> (t,lon,lat)
else:
variable = transpose(variables[varname]['data'], (0,2,1)) #(t,lat,lon) -> (t,lon,lat)
self.variables[varname] = dict(units = variables[varname]['units'], data = variable)
self.register_3D_variable(self.variables[varname]['units'],
self.variables[varname]['data'], varname,
gridded_int)
elif len(variables[varname]['data'].shape)==4:
if filecheck:
#append data for first time stamp, transpose and register
data_shape = list(variables[varname]['data'].shape)
data_shape[0]+=1 #add space for time
new_data = zeros(data_shape)
new_data[1:,:,:,:] = variables[varname]['data'] #put in current data
new_data[0,:,:,:] = short_data[varname]['data'][-1,:,:,:] #add in data for additional time
variable = transpose(new_data, (0,3,2,1)) #(t,h,lat,lon) -> (t,lon,lat,h)
else:
variable = transpose(variables[varname]['data'], (0,3,2,1)) #(t,h,lat,lon) -> (t,lon,lat,h)
self.variables[varname] = dict(units = variables[varname]['units'], data = variable)
self.register_4D_variable(self.variables[varname]['units'],
self.variables[varname]['data'], varname,
gridded_int)
if verbose: print(f'Took {perf_counter()-t_reg:.5f}s to register '+\
f'{len(varname_list)} variables.')
if verbose: print(f'Took a total of {perf_counter()-t0:.5f}s to kamodofy '+\
f'{len(varname_list)} variables.')
def wrap_3Dlatlon(self, varname, variable):
'''Wraps the data array in longitude (-180=180), and latitude'''
shape_list = list(variable.shape) #e.g. time, lat, lon -> time, lon, lat!!!
shape_list[2]+=2 #need two more places in latitude
shape_list[1]+=1 #need one more place in longitude
tmp_arr = zeros(shape_list) #array to set-up wrapped data in
tmp_arr[0:,:-1,1:-1]=variable #copy data into grid
#wrapping in latitude for scalar variables
# put in top values
top = mean(tmp_arr[:,:-1,1],axis=1) #same shape as time axis
tmp_arr[:,:-1,0] = broadcast_to(top, (shape_list[1]-1,shape_list[0])).T
#same for bottom, reusing variable names
top = mean(tmp_arr[:,:-1,-2],axis=1) #same shape as time axis
tmp_arr[:,:-1,-1] = broadcast_to(top, (shape_list[1]-1,shape_list[0])).T
#wrap in longitude after to prevent double counting in average
tmp_arr[:,-1,:] = variable[:,0,:]
self.variables[varname]['data'] = tmp_arr #store result
return tmp_arr
def wrap_4Dlatlon(self, varname, variable):
'''Wraps the data array in longitude (-180=180), and latitude (0=-2, -1=1)'''
shape_list = list(variable.shape) #time, lon, lat, ilev
shape_list[2]+=2 #need two more places in latitude
shape_list[1]+=1 #need one more place in longitude
tmp_arr = zeros(shape_list) #array to set-up wrapped data in
tmp_arr[:,:-1,1:-1,:] = variable #copy data into grid
#wrapping in latitude for scalar and cartesian/radial variables
if varname not in ['u_n','v_n','u_iExB','v_iExB']:
#print('Calculating scalar sum for', varname)
# put in top values
top = mean(tmp_arr[:,:-1,1,:],axis=1) #average over longitudes
tmp_arr[:,:-1,0,:] = transpose(broadcast_to(top, (shape_list[1]-1,shape_list[0],
shape_list[3])), (1,0,2))
#same for bottom, reusing variable names
top = mean(tmp_arr[:,:-1,-2,:],axis=1) #average over longitudes
tmp_arr[:,:-1,-1,:] = transpose(broadcast_to(top, (shape_list[1]-1,shape_list[0],
shape_list[3])), (1,0,2))
#wrapping in latitude for relevant vector variables
elif varname in ['u_n','v_n','u_iExB','v_iExB']:
#print('Calculating vector sum for', varname)
#calculate net vector magnitude for top
tmp_arr[:,:-1,0,:] = self.vector_average4D(tmp_arr[:,:-1,1,:],
shape_list, varname, self._lat[0])
#repeat for bottom
tmp_arr[:,:-1,-1,:] = self.vector_average4D(tmp_arr[:,:-1,-2,:],
shape_list, varname, self._lat[-1])
tmp_arr[:,-1,:,:] = tmp_arr[:,0,:,:] #wrap value in longitude
self.variables[varname]['data'] = tmp_arr #store result
return tmp_arr
def vector_average4D(self, top, shape_list, varname, latval):
'''find vector average at pole for array with shape (time, lon, height)'''
#find net x and y components, final array shapes are time, lon, and height/ilev
lon_arr = transpose(broadcast_to(self._lon[:-1], (shape_list[0],
shape_list[3], shape_list[1]-1)), (0,2,1))
#need to put 'old' shape at end in broadcast_to call ^
xval = sum(top*cos((lon_arr+180.)*nppi/180.), axis=1) #same shape as
yval = sum(top*sin((lon_arr+180.)*nppi/180.), axis=1) #time and vertical
xarr = transpose(broadcast_to(xval, (shape_list[1]-1,shape_list[0],
shape_list[3])), (1,0,2))
yarr = transpose(broadcast_to(yval, (shape_list[1]-1,shape_list[0],
shape_list[3])), (1,0,2))
#convert to proper unit vector (see wiki on spherical coordinates)
if 'u' in varname: #Zonal / east components -> convert to psi_hat vector (longitude)
# -xsin(psi)+ycos(psi), psi = longitude (0 to 360)
new_top = -xarr*sin((lon_arr+180.)*nppi/180.)+yarr*cos((lon_arr+180.)*nppi/180.)
elif 'v' in varname: #meridional/north -> convert to theta_hat vector (latitude)
# xcos(psi)cos(theta)+ysin(psi)cos(theta), sin(theta) is always zero at the poles
# theta = latitude (0 to 180), psi = longitude (0 to 360)
new_top = xarr*cos((lon_arr+180.)*nppi/180.)*cos((90.-latval)*nppi/180.)+\
yarr*sin((lon_arr+180.)*nppi/180.)*cos((90.-latval)*nppi/180.)
#flip around so values match longitude location (to keep zero reference point true)
zero_idx = min(where(self._lon>=0.)[0]) #find splitting index
top = zeros((shape_list[0],shape_list[1]-1,shape_list[3])) #empty array for destination
div = (shape_list[1]-1)%2 #deal with even or odd number of non-wrapped longitude elements
#print(shape_list[1]-1, zero_idx, div)
top[:,:zero_idx-div,:] = new_top[:,zero_idx:,:] #move last half to first half
top[:,zero_idx-div:,:] = new_top[:,:zero_idx,:] #and vice versa
return top
##### Define and register a 3D variable -----------------------------------------
def register_3D_variable(self, units, variable, varname, gridded_int):
"""Registers a 3d interpolator with 3d signature"""
#define and register the interpolators
xvec_dependencies = {'time':'hr','lon':'deg','lat':'deg'}
wrapped_data = self.wrap_3Dlatlon(varname, variable)
self = regdef_3D_interpolators(self, units, wrapped_data, self._time,
self._lon, self._lat, varname,
xvec_dependencies, gridded_int)
return
#### Define and register a 4D variable -----------------------------------------
def register_4D_variable(self, units, variable, varname, gridded_int):
"""Registers a 4d interpolator with 4d signature"""
#### Get the correct coordinates
if varname in self.ilev1_list:
h = self._ilev1
coord_lat, coord_lon = self._lat, self._lon
xvec_dependencies = {'time':'hr','lon':'deg','lat':'deg','ilev1':'m/m'}
elif varname in self.ilev_list:
h = self._ilev
coord_lat, coord_lon = self._lat, self._lon
xvec_dependencies = {'time':'hr','lon':'deg','lat':'deg','ilev':'m/m'}
elif varname in self.milev_list:
h = self._milev
coord_lat, coord_lon = self._mlat, self._mlon
xvec_dependencies = {'time':'hr','mlon':'deg','mlat':'deg','milev':'m/m'}
else:
print(varname, 'error')
#### define and register the interpolators
if 'lat' in xvec_dependencies.keys():
wrapped_data = self.wrap_4Dlatlon(varname, variable)
else:
top_shape = list(variable[:,:,:,-1].shape)
top_size = top_shape[0]*top_shape[1]*top_shape[2] #3D array
idx_top = where(variable[:,:,:,-1]>1e+35)[0]
tmp_data = variable
while top_size==len(idx_top): #then top row is undefined! Remove it.
print(f'All values at max milev are 1e+36 for {varname}. Slicing off top array.')
if self._milev.shape[0]==len(tmp_data[0,0,0,:]):
self._milev = self._milev[0:-1]
tmp_data = tmp_data[:,:,:,0:-1]
top_shape = list(tmp_data[:,:,:,-1].shape)
top_size = top_shape[0]*top_shape[1]*top_shape[2] #3D array
idx_top = where(tmp_data[:,:,:,-1]>1e+35)[0]
wrapped_data = tmp_data
h = self._milev
self = regdef_4D_interpolators(self, units, wrapped_data,
self._time, coord_lon, coord_lat, h,
varname, xvec_dependencies, gridded_int)
return
return MODEL | [
"datetime.datetime.utcfromtimestamp",
"numpy.array",
"numpy.sin",
"numpy.mean",
"numpy.where",
"netCDF4.Dataset",
"time.perf_counter",
"numpy.diff",
"glob.glob",
"numpy.abs",
"kamodo.readers.reader_utilities.regdef_4D_interpolators",
"kamodo.readers.reader_utilities.regdef_3D_interpolators",
... | [((9885, 9927), 'datetime.datetime.strptime', 'datetime.strptime', (['date_string', '"""%Y-%m-%d"""'], {}), "(date_string, '%Y-%m-%d')\n", (9902, 9927), False, 'from datetime import datetime, timezone, timedelta\n'), ((11507, 11521), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (11519, 11521), False, 'from time import perf_counter\n'), ((11545, 11568), 'os.path.basename', 'basename', (['full_filename'], {}), '(full_filename)\n', (11553, 11568), False, 'from os.path import basename\n'), ((11660, 11687), 'netCDF4.Dataset', 'Dataset', (['full_filename', '"""r"""'], {}), "(full_filename, 'r')\n", (11667, 11687), False, 'from netCDF4 import Dataset\n'), ((11760, 11793), 'numpy.array', 'array', (["cdf_data.variables['year']"], {}), "(cdf_data.variables['year'])\n", (11765, 11793), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((11814, 11848), 'numpy.array', 'array', (["cdf_data.variables['mtime']"], {}), "(cdf_data.variables['mtime'])\n", (11819, 11848), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((20670, 20702), 'numpy.array', 'array', (["cdf_data.variables['lat']"], {}), "(cdf_data.variables['lat'])\n", (20675, 20702), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((20754, 20773), 'numpy.insert', 'insert', (['lat', '(0)', '(-90)'], {}), '(lat, 0, -90)\n', (20760, 20773), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((20848, 20865), 'numpy.append', 'append', (['lat', '(90.0)'], {}), '(lat, 90.0)\n', (20854, 20865), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((20914, 20946), 'numpy.array', 'array', (["cdf_data.variables['lon']"], {}), "(cdf_data.variables['lon'])\n", (20919, 20946), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((21003, 21021), 'numpy.append', 'append', (['lon', '(180.0)'], {}), '(lon, 180.0)\n', (21009, 21021), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((21085, 21117), 'numpy.array', 'array', (["cdf_data.variables['lev']"], {}), "(cdf_data.variables['lev'])\n", (21090, 21117), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((21144, 21177), 'numpy.array', 'array', (["cdf_data.variables['ilev']"], {}), "(cdf_data.variables['ilev'])\n", (21149, 21177), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((21204, 21238), 'numpy.array', 'array', (["cdf_data.variables['imlev']"], {}), "(cdf_data.variables['imlev'])\n", (21209, 21238), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((21324, 21357), 'numpy.array', 'array', (["cdf_data.variables['mlat']"], {}), "(cdf_data.variables['mlat'])\n", (21329, 21357), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((21383, 21416), 'numpy.array', 'array', (["cdf_data.variables['mlon']"], {}), "(cdf_data.variables['mlon'])\n", (21388, 21416), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((21790, 21804), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (21802, 21804), False, 'from time import perf_counter\n'), ((24849, 24866), 'numpy.zeros', 'zeros', (['shape_list'], {}), '(shape_list)\n', (24854, 24866), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((25083, 25115), 'numpy.mean', 'mean', (['tmp_arr[:, :-1, 1]'], {'axis': '(1)'}), '(tmp_arr[:, :-1, 1], axis=1)\n', (25087, 25115), False, 'from numpy import NaN, diff, abs, mean, broadcast_to, cos, sin, repeat, sqrt, sum\n'), ((25294, 25327), 'numpy.mean', 'mean', (['tmp_arr[:, :-1, -2]'], {'axis': '(1)'}), '(tmp_arr[:, :-1, -2], axis=1)\n', (25298, 25327), False, 'from numpy import NaN, diff, abs, mean, broadcast_to, cos, sin, repeat, sqrt, sum\n'), ((26051, 26068), 'numpy.zeros', 'zeros', (['shape_list'], {}), '(shape_list)\n', (26056, 26068), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((29757, 29813), 'numpy.zeros', 'zeros', (['(shape_list[0], shape_list[1] - 1, shape_list[3])'], {}), '((shape_list[0], shape_list[1] - 1, shape_list[3]))\n', (29762, 29813), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((30645, 30774), 'kamodo.readers.reader_utilities.regdef_3D_interpolators', 'regdef_3D_interpolators', (['self', 'units', 'wrapped_data', 'self._time', 'self._lon', 'self._lat', 'varname', 'xvec_dependencies', 'gridded_int'], {}), '(self, units, wrapped_data, self._time, self._lon,\n self._lat, varname, xvec_dependencies, gridded_int)\n', (30668, 30774), False, 'from kamodo.readers.reader_utilities import regdef_3D_interpolators, regdef_4D_interpolators\n'), ((33029, 33161), 'kamodo.readers.reader_utilities.regdef_4D_interpolators', 'regdef_4D_interpolators', (['self', 'units', 'wrapped_data', 'self._time', 'coord_lon', 'coord_lat', 'h', 'varname', 'xvec_dependencies', 'gridded_int'], {}), '(self, units, wrapped_data, self._time, coord_lon,\n coord_lat, h, varname, xvec_dependencies, gridded_int)\n', (33052, 33161), False, 'from kamodo.readers.reader_utilities import regdef_3D_interpolators, regdef_4D_interpolators\n'), ((8742, 8790), 'datetime.datetime.strptime', 'datetime.strptime', (['file_dts', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(file_dts, '%Y-%m-%d %H:%M:%S')\n", (8759, 8790), False, 'from datetime import datetime, timezone, timedelta\n'), ((20506, 20531), 'numpy.insert', 'insert', (['time', '(0)', 'new_time'], {}), '(time, 0, new_time)\n', (20512, 20531), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((25170, 25223), 'numpy.broadcast_to', 'broadcast_to', (['top', '(shape_list[1] - 1, shape_list[0])'], {}), '(top, (shape_list[1] - 1, shape_list[0]))\n', (25182, 25223), False, 'from numpy import NaN, diff, abs, mean, broadcast_to, cos, sin, repeat, sqrt, sum\n'), ((25383, 25436), 'numpy.broadcast_to', 'broadcast_to', (['top', '(shape_list[1] - 1, shape_list[0])'], {}), '(top, (shape_list[1] - 1, shape_list[0]))\n', (25395, 25436), False, 'from numpy import NaN, diff, abs, mean, broadcast_to, cos, sin, repeat, sqrt, sum\n'), ((26442, 26477), 'numpy.mean', 'mean', (['tmp_arr[:, :-1, 1, :]'], {'axis': '(1)'}), '(tmp_arr[:, :-1, 1, :], axis=1)\n', (26446, 26477), False, 'from numpy import NaN, diff, abs, mean, broadcast_to, cos, sin, repeat, sqrt, sum\n'), ((26757, 26793), 'numpy.mean', 'mean', (['tmp_arr[:, :-1, -2, :]'], {'axis': '(1)'}), '(tmp_arr[:, :-1, -2, :], axis=1)\n', (26761, 26793), False, 'from numpy import NaN, diff, abs, mean, broadcast_to, cos, sin, repeat, sqrt, sum\n'), ((28090, 28169), 'numpy.broadcast_to', 'broadcast_to', (['self._lon[:-1]', '(shape_list[0], shape_list[3], shape_list[1] - 1)'], {}), '(self._lon[:-1], (shape_list[0], shape_list[3], shape_list[1] - 1))\n', (28102, 28169), False, 'from numpy import NaN, diff, abs, mean, broadcast_to, cos, sin, repeat, sqrt, sum\n'), ((28489, 28558), 'numpy.broadcast_to', 'broadcast_to', (['xval', '(shape_list[1] - 1, shape_list[0], shape_list[3])'], {}), '(xval, (shape_list[1] - 1, shape_list[0], shape_list[3]))\n', (28501, 28558), False, 'from numpy import NaN, diff, abs, mean, broadcast_to, cos, sin, repeat, sqrt, sum\n'), ((28645, 28714), 'numpy.broadcast_to', 'broadcast_to', (['yval', '(shape_list[1] - 1, shape_list[0], shape_list[3])'], {}), '(yval, (shape_list[1] - 1, shape_list[0], shape_list[3]))\n', (28657, 28714), False, 'from numpy import NaN, diff, abs, mean, broadcast_to, cos, sin, repeat, sqrt, sum\n'), ((13498, 13516), 'glob.glob', 'glob', (['file_pattern'], {}), '(file_pattern)\n', (13502, 13516), False, 'from glob import glob\n'), ((13884, 13912), 'numpy.where', 'where', (['(filenames == filename)'], {}), '(filenames == filename)\n', (13889, 13912), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((14359, 14408), 'numpy.abs', 'abs', (['(kamodo_test.filetimes[1] - self.filetimes[0])'], {}), '(kamodo_test.filetimes[1] - self.filetimes[0])\n', (14362, 14408), False, 'from numpy import NaN, diff, abs, mean, broadcast_to, cos, sin, repeat, sqrt, sum\n'), ((19460, 19490), 'numpy.array', 'array', (['cdf_data.variables[key]'], {}), '(cdf_data.variables[key])\n', (19465, 19490), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((26547, 26615), 'numpy.broadcast_to', 'broadcast_to', (['top', '(shape_list[1] - 1, shape_list[0], shape_list[3])'], {}), '(top, (shape_list[1] - 1, shape_list[0], shape_list[3]))\n', (26559, 26615), False, 'from numpy import NaN, diff, abs, mean, broadcast_to, cos, sin, repeat, sqrt, sum\n'), ((26864, 26932), 'numpy.broadcast_to', 'broadcast_to', (['top', '(shape_list[1] - 1, shape_list[0], shape_list[3])'], {}), '(top, (shape_list[1] - 1, shape_list[0], shape_list[3]))\n', (26876, 26932), False, 'from numpy import NaN, diff, abs, mean, broadcast_to, cos, sin, repeat, sqrt, sum\n'), ((28318, 28355), 'numpy.cos', 'cos', (['((lon_arr + 180.0) * nppi / 180.0)'], {}), '((lon_arr + 180.0) * nppi / 180.0)\n', (28321, 28355), False, 'from numpy import NaN, diff, abs, mean, broadcast_to, cos, sin, repeat, sqrt, sum\n'), ((28400, 28437), 'numpy.sin', 'sin', (['((lon_arr + 180.0) * nppi / 180.0)'], {}), '((lon_arr + 180.0) * nppi / 180.0)\n', (28403, 28437), False, 'from numpy import NaN, diff, abs, mean, broadcast_to, cos, sin, repeat, sqrt, sum\n'), ((29691, 29714), 'numpy.where', 'where', (['(self._lon >= 0.0)'], {}), '(self._lon >= 0.0)\n', (29696, 29714), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((32292, 32328), 'numpy.where', 'where', (['(variable[:, :, :, -1] > 1e+35)'], {}), '(variable[:, :, :, -1] > 1e+35)\n', (32297, 32328), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((12594, 12604), 'numpy.diff', 'diff', (['time'], {}), '(time)\n', (12598, 12604), False, 'from numpy import NaN, diff, abs, mean, broadcast_to, cos, sin, repeat, sqrt, sum\n'), ((13554, 13565), 'os.path.basename', 'basename', (['f'], {}), '(f)\n', (13562, 13565), False, 'from os.path import basename\n'), ((22216, 22233), 'numpy.zeros', 'zeros', (['data_shape'], {}), '(data_shape)\n', (22221, 22233), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((22489, 22519), 'numpy.transpose', 'transpose', (['new_data', '(0, 2, 1)'], {}), '(new_data, (0, 2, 1))\n', (22498, 22519), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((22607, 22655), 'numpy.transpose', 'transpose', (["variables[varname]['data']", '(0, 2, 1)'], {}), "(variables[varname]['data'], (0, 2, 1))\n", (22616, 22655), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((29049, 29086), 'numpy.sin', 'sin', (['((lon_arr + 180.0) * nppi / 180.0)'], {}), '((lon_arr + 180.0) * nppi / 180.0)\n', (29052, 29086), False, 'from numpy import NaN, diff, abs, mean, broadcast_to, cos, sin, repeat, sqrt, sum\n'), ((29084, 29121), 'numpy.cos', 'cos', (['((lon_arr + 180.0) * nppi / 180.0)'], {}), '((lon_arr + 180.0) * nppi / 180.0)\n', (29087, 29121), False, 'from numpy import NaN, diff, abs, mean, broadcast_to, cos, sin, repeat, sqrt, sum\n'), ((32903, 32939), 'numpy.where', 'where', (['(tmp_data[:, :, :, -1] > 1e+35)'], {}), '(tmp_data[:, :, :, -1] > 1e+35)\n', (32908, 32939), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((10352, 10387), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['time_val'], {}), '(time_val)\n', (10377, 10387), False, 'from datetime import datetime, timezone, timedelta\n'), ((23359, 23376), 'numpy.zeros', 'zeros', (['data_shape'], {}), '(data_shape)\n', (23364, 23376), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((23639, 23672), 'numpy.transpose', 'transpose', (['new_data', '(0, 3, 2, 1)'], {}), '(new_data, (0, 3, 2, 1))\n', (23648, 23672), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((23763, 23814), 'numpy.transpose', 'transpose', (["variables[varname]['data']", '(0, 3, 2, 1)'], {}), "(variables[varname]['data'], (0, 3, 2, 1))\n", (23772, 23814), False, 'from numpy import zeros, transpose, array, append, insert, where, unique\n'), ((29442, 29477), 'numpy.cos', 'cos', (['((90.0 - latval) * nppi / 180.0)'], {}), '((90.0 - latval) * nppi / 180.0)\n', (29445, 29477), False, 'from numpy import NaN, diff, abs, mean, broadcast_to, cos, sin, repeat, sqrt, sum\n'), ((29527, 29562), 'numpy.cos', 'cos', (['((90.0 - latval) * nppi / 180.0)'], {}), '((90.0 - latval) * nppi / 180.0)\n', (29530, 29562), False, 'from numpy import NaN, diff, abs, mean, broadcast_to, cos, sin, repeat, sqrt, sum\n'), ((21529, 21543), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (21541, 21543), False, 'from time import perf_counter\n'), ((29412, 29449), 'numpy.cos', 'cos', (['((lon_arr + 180.0) * nppi / 180.0)'], {}), '((lon_arr + 180.0) * nppi / 180.0)\n', (29415, 29449), False, 'from numpy import NaN, diff, abs, mean, broadcast_to, cos, sin, repeat, sqrt, sum\n'), ((29497, 29534), 'numpy.sin', 'sin', (['((lon_arr + 180.0) * nppi / 180.0)'], {}), '((lon_arr + 180.0) * nppi / 180.0)\n', (29500, 29534), False, 'from numpy import NaN, diff, abs, mean, broadcast_to, cos, sin, repeat, sqrt, sum\n'), ((24207, 24221), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (24219, 24221), False, 'from time import perf_counter\n'), ((24364, 24378), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (24376, 24378), False, 'from time import perf_counter\n'), ((15213, 15227), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (15225, 15227), False, 'from time import perf_counter\n'), ((15688, 15702), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (15700, 15702), False, 'from time import perf_counter\n')] |
"""
Script for calculating the IPO index in each ensemble member
Author : <NAME>
Date : 17 September 2021
Version : 12
"""
### Import packages
import sys
import math
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as stats
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import palettable.cubehelix as cm
import palettable.cartocolors.qualitative as cc
import palettable.scientific.sequential as sss
import cmocean as cmocean
import calc_Utilities as UT
import calc_dataFunctions as df
import calc_Stats as dSS
import scipy.stats as sts
import scipy.sparse as sparse
import scipy.sparse.linalg as linalg
import matplotlib
import cmasher as cmr
from eofs.standard import Eof
### Plotting defaults
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
###############################################################################
###############################################################################
###############################################################################
### Data preliminaries
modelGCMs = ['CESM2-LE']
dataset_obs = 'ERA5'
allDataLabels = modelGCMs
letters = ["a","b","c","d","e","f","g","h","i","j","k","l","m"]
datasetsingle = ['CESM2le']
monthlychoiceq = ['annual']
variables = ['SST']
reg_name = 'SMILEGlobe'
level = 'surface'
###############################################################################
###############################################################################
randomalso = False
timeper = 'hiatus'
shuffletype = 'GAUSS'
###############################################################################
###############################################################################
land_only = False
ocean_only = False
###############################################################################
###############################################################################
baseline = np.arange(1951,1980+1,1)
###############################################################################
###############################################################################
window = 0
if window == 0:
rm_standard_dev = False
ravel_modelens = False
ravelmodeltime = False
else:
rm_standard_dev = True
ravelmodeltime = False
ravel_modelens = True
yearsall = np.arange(1979+window,2099+1,1)
yearsobs = np.arange(1979+window,2020+1,1)
###############################################################################
###############################################################################
numOfEns = 40
lentime = len(yearsall)
###############################################################################
###############################################################################
dataset = datasetsingle[0]
lat_bounds,lon_bounds = UT.regions(reg_name)
###############################################################################
###############################################################################
ravelyearsbinary = False
ravelbinary = False
lensalso = True
###############################################################################
###############################################################################
### Processing data steps
rm_ensemble_mean = True
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Read in model and observational/reanalysis data
def read_primary_dataset(variq,dataset,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper,lat_bounds=lat_bounds,lon_bounds=lon_bounds):
data,lats,lons = df.readFiles(variq,dataset,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)
datar,lats,lons = df.getRegion(data,lats,lons,lat_bounds,lon_bounds)
print('\nOur dataset: ',dataset,' is shaped',data.shape)
return datar,lats,lons
def read_obs_dataset(variq,dataset_obs,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds):
data_obs,lats_obs,lons_obs = df.readFiles(variq,dataset_obs,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)
data_obs,lats_obs,lons_obs = df.getRegion(data_obs,lats_obs,lons_obs,lat_bounds,lon_bounds)
print('our OBS dataset: ',dataset_obs,' is shaped',data_obs.shape)
return data_obs,lats_obs,lons_obs
### Call functions
vv = 0
mo = 0
variq = variables[vv]
monthlychoice = monthlychoiceq[mo]
directoryfigure = '/Users/zlabe/Desktop/GmstTrendPrediction/ANN_v2/PDO/'
saveData = monthlychoice + '_' + variq + '_' + reg_name + '_' + dataset_obs
print('*Filename == < %s >' % saveData)
### Read data
models,lats,lons = read_primary_dataset(variq,dataset,monthlychoice,numOfEns,
lensalso,randomalso,ravelyearsbinary,
ravelbinary,shuffletype,timeper,
lat_bounds,lon_bounds)
obs,lats_obs,lons_obs = read_obs_dataset(variq,dataset_obs,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds)
### Slice for number of years to begin hiatus
AGWstart = 1990
yearsq_m = np.where((yearsall >= AGWstart))[0]
yearsq_o = np.where((yearsobs >= AGWstart))[0]
models_slice = models[:,yearsq_m,:,:]
obs_slice = obs[yearsq_o,:,:]
### Calculate global mean temperature
lon2,lat2 = np.meshgrid(lons,lats)
modelsm = UT.calc_weightedAve(models_slice,lat2)
obsm = UT.calc_weightedAve(obs_slice,lat2)
###############################################################################
### Calculate ensemble spread statistics
meaens = np.nanmean(modelsm[:,:],axis=0)
maxens = np.nanmax(modelsm[:,:],axis=0)
minens = np.nanmin(modelsm[:,:],axis=0)
spread = maxens - minens
###############################################################################
### Remove ensemble mean
if rm_ensemble_mean == True:
models_var = dSS.remove_ensemble_mean(models_slice,ravel_modelens,
ravelmodeltime,rm_standard_dev,
numOfEns)
###############################################################################
###############################################################################
###############################################################################
### Calculate SST regions
region1 = models_var.copy() # 25°N–45°N, 140°E–145°W
latr1 = np.where((lats >= 25) & (lats <= 45))[0]
lonr1 = np.where((lons >= 140) & (lons <= (180+(180-145))))[0]
latRegion1 = lats[latr1]
lonRegion1 = lons[lonr1]
lon2r1,lat2r1 = np.meshgrid(lonRegion1,latRegion1)
sstregion1lat = region1[:,:,latr1,:]
sstregion1 = sstregion1lat[:,:,:,lonr1]
mean_r1 = UT.calc_weightedAve(sstregion1,lat2r1)
region2 = models_var.copy() # 10°S–10°N, 170°E–90°W
latr2 = np.where((lats >= -10) & (lats <= 10))[0]
lonr2 = np.where((lons >= 170) & (lons <= (180+(180-90))))[0]
latRegion2 = lats[latr2]
lonRegion2 = lons[lonr2]
lon2r2,lat2r2 = np.meshgrid(lonRegion2,latRegion2)
sstregion2lat = region2[:,:,latr2,:]
sstregion2 = sstregion2lat[:,:,:,lonr2]
mean_r2 = UT.calc_weightedAve(sstregion2,lat2r2)
region3 = models_var.copy() # 50°S–15°S, 150°E–160°W
latr3 = np.where((lats >= -50) & (lats <= -15))[0]
lonr3 = np.where((lons >= 150) & (lons <= (180+(180-160))))[0]
latRegion3 = lats[latr3]
lonRegion3 = lons[lonr3]
lon2r3,lat2r3 = np.meshgrid(lonRegion3,latRegion3)
sstregion3lat = region3[:,:,latr3,:]
sstregion3 = sstregion3lat[:,:,:,lonr3]
mean_r3 = UT.calc_weightedAve(sstregion3,lat2r3)
###############################################################################
### Calculate IPO
IPOindex = mean_r2 - ((mean_r1 + mean_r3)/2)
IPOindexz = (IPOindex - np.mean(IPOindex))/np.std(IPOindex)
### Save IPO index
directoryoutput = '/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/IPO/'
np.savetxt(directoryoutput + 'IPO_CESM2LE_1990-2099.txt',IPOindexz )
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Read in PDO index
directorydata = '/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/'
PDO = np.genfromtxt(directorydata + '/PDO/PDO_CESM2LE_1990-2099.txt',unpack=True).transpose()
###############################################################################
### Compare PDO and IPO correlations
corr = np.empty((models_var.shape[0]))
p = np.empty((models_var.shape[0]))
for i in range(models_var.shape[0]):
corr[i],p[i] = sts.pearsonr(IPOindexz[i],PDO[i])
###############################################################################
###############################################################################
###############################################################################
### Create plot of correlations for the ensembles
fig = plt.figure()
ax = plt.subplot(111)
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 5))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
adjust_spines(ax, ['left', 'bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_color('dimgrey')
ax.spines['bottom'].set_color('dimgrey')
ax.spines['left'].set_linewidth(2)
ax.spines['bottom'].set_linewidth(2)
ax.tick_params('both',length=4,width=2,which='major',color='dimgrey')
ax.tick_params(axis='x',labelsize=7,pad=4)
ax.tick_params(axis='y',labelsize=7,pad=4)
ax.yaxis.grid(zorder=1,color='dimgrey',alpha=0.35,clip_on=False)
plt.plot(corr,color='maroon',linewidth=3,clip_on=False,alpha=1,
marker='o',markersize=7,zorder=10)
plt.xlabel(r'\textbf{CESM2-LE Ensemble Member \#}',fontsize=8,color='dimgrey')
plt.ylabel(r'\textbf{IPO \& PDO: Correlation Coefficient}',fontsize=8,color='dimgrey')
plt.yticks(np.arange(0,1.1,0.1),map(str,np.round(np.arange(0,1.1,0.1),2)))
plt.xticks(np.arange(0,39+1,3),map(str,np.arange(1,40+1,3)))
plt.xlim([0,39])
plt.ylim([0,1])
plt.subplots_adjust(bottom=0.15)
plt.savefig(directoryfigure + 'IPO-PDO_CorrelationCESM2le.png',
dpi=600)
| [
"matplotlib.pyplot.ylabel",
"calc_dataFunctions.getRegion",
"numpy.nanmean",
"scipy.stats.pearsonr",
"numpy.nanmin",
"numpy.genfromtxt",
"numpy.arange",
"numpy.mean",
"numpy.where",
"calc_Utilities.calc_weightedAve",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"calc_Stats.remove_en... | [((782, 809), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (788, 809), True, 'import matplotlib.pyplot as plt\n'), ((809, 882), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **{'family': 'sans-serif', 'sans-serif': ['Avant Garde']})\n", (815, 882), True, 'import matplotlib.pyplot as plt\n'), ((1986, 2014), 'numpy.arange', 'np.arange', (['(1951)', '(1980 + 1)', '(1)'], {}), '(1951, 1980 + 1, 1)\n', (1995, 2014), True, 'import numpy as np\n'), ((2377, 2414), 'numpy.arange', 'np.arange', (['(1979 + window)', '(2099 + 1)', '(1)'], {}), '(1979 + window, 2099 + 1, 1)\n', (2386, 2414), True, 'import numpy as np\n'), ((2420, 2457), 'numpy.arange', 'np.arange', (['(1979 + window)', '(2020 + 1)', '(1)'], {}), '(1979 + window, 2020 + 1, 1)\n', (2429, 2457), True, 'import numpy as np\n'), ((2861, 2881), 'calc_Utilities.regions', 'UT.regions', (['reg_name'], {}), '(reg_name)\n', (2871, 2881), True, 'import calc_Utilities as UT\n'), ((5728, 5751), 'numpy.meshgrid', 'np.meshgrid', (['lons', 'lats'], {}), '(lons, lats)\n', (5739, 5751), True, 'import numpy as np\n'), ((5761, 5800), 'calc_Utilities.calc_weightedAve', 'UT.calc_weightedAve', (['models_slice', 'lat2'], {}), '(models_slice, lat2)\n', (5780, 5800), True, 'import calc_Utilities as UT\n'), ((5807, 5843), 'calc_Utilities.calc_weightedAve', 'UT.calc_weightedAve', (['obs_slice', 'lat2'], {}), '(obs_slice, lat2)\n', (5826, 5843), True, 'import calc_Utilities as UT\n'), ((5984, 6017), 'numpy.nanmean', 'np.nanmean', (['modelsm[:, :]'], {'axis': '(0)'}), '(modelsm[:, :], axis=0)\n', (5994, 6017), True, 'import numpy as np\n'), ((6025, 6057), 'numpy.nanmax', 'np.nanmax', (['modelsm[:, :]'], {'axis': '(0)'}), '(modelsm[:, :], axis=0)\n', (6034, 6057), True, 'import numpy as np\n'), ((6065, 6097), 'numpy.nanmin', 'np.nanmin', (['modelsm[:, :]'], {'axis': '(0)'}), '(modelsm[:, :], axis=0)\n', (6074, 6097), True, 'import numpy as np\n'), ((6958, 6993), 'numpy.meshgrid', 'np.meshgrid', (['lonRegion1', 'latRegion1'], {}), '(lonRegion1, latRegion1)\n', (6969, 6993), True, 'import numpy as np\n'), ((7080, 7119), 'calc_Utilities.calc_weightedAve', 'UT.calc_weightedAve', (['sstregion1', 'lat2r1'], {}), '(sstregion1, lat2r1)\n', (7099, 7119), True, 'import calc_Utilities as UT\n'), ((7351, 7386), 'numpy.meshgrid', 'np.meshgrid', (['lonRegion2', 'latRegion2'], {}), '(lonRegion2, latRegion2)\n', (7362, 7386), True, 'import numpy as np\n'), ((7473, 7512), 'calc_Utilities.calc_weightedAve', 'UT.calc_weightedAve', (['sstregion2', 'lat2r2'], {}), '(sstregion2, lat2r2)\n', (7492, 7512), True, 'import calc_Utilities as UT\n'), ((7747, 7782), 'numpy.meshgrid', 'np.meshgrid', (['lonRegion3', 'latRegion3'], {}), '(lonRegion3, latRegion3)\n', (7758, 7782), True, 'import numpy as np\n'), ((7869, 7908), 'calc_Utilities.calc_weightedAve', 'UT.calc_weightedAve', (['sstregion3', 'lat2r3'], {}), '(sstregion3, lat2r3)\n', (7888, 7908), True, 'import calc_Utilities as UT\n'), ((8214, 8282), 'numpy.savetxt', 'np.savetxt', (["(directoryoutput + 'IPO_CESM2LE_1990-2099.txt')", 'IPOindexz'], {}), "(directoryoutput + 'IPO_CESM2LE_1990-2099.txt', IPOindexz)\n", (8224, 8282), True, 'import numpy as np\n'), ((8921, 8950), 'numpy.empty', 'np.empty', (['models_var.shape[0]'], {}), '(models_var.shape[0])\n', (8929, 8950), True, 'import numpy as np\n'), ((8957, 8986), 'numpy.empty', 'np.empty', (['models_var.shape[0]'], {}), '(models_var.shape[0])\n', (8965, 8986), True, 'import numpy as np\n'), ((9380, 9392), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9390, 9392), True, 'import matplotlib.pyplot as plt\n'), ((9398, 9414), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (9409, 9414), True, 'import matplotlib.pyplot as plt\n'), ((10323, 10432), 'matplotlib.pyplot.plot', 'plt.plot', (['corr'], {'color': '"""maroon"""', 'linewidth': '(3)', 'clip_on': '(False)', 'alpha': '(1)', 'marker': '"""o"""', 'markersize': '(7)', 'zorder': '(10)'}), "(corr, color='maroon', linewidth=3, clip_on=False, alpha=1, marker=\n 'o', markersize=7, zorder=10)\n", (10331, 10432), True, 'import matplotlib.pyplot as plt\n'), ((10432, 10518), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""\\\\textbf{CESM2-LE Ensemble Member \\\\#}"""'], {'fontsize': '(8)', 'color': '"""dimgrey"""'}), "('\\\\textbf{CESM2-LE Ensemble Member \\\\#}', fontsize=8, color=\n 'dimgrey')\n", (10442, 10518), True, 'import matplotlib.pyplot as plt\n'), ((10511, 10604), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""\\\\textbf{IPO \\\\& PDO: Correlation Coefficient}"""'], {'fontsize': '(8)', 'color': '"""dimgrey"""'}), "('\\\\textbf{IPO \\\\& PDO: Correlation Coefficient}', fontsize=8,\n color='dimgrey')\n", (10521, 10604), True, 'import matplotlib.pyplot as plt\n'), ((10734, 10751), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 39]'], {}), '([0, 39])\n', (10742, 10751), True, 'import matplotlib.pyplot as plt\n'), ((10754, 10770), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (10762, 10770), True, 'import matplotlib.pyplot as plt\n'), ((10771, 10803), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.15)'}), '(bottom=0.15)\n', (10790, 10803), True, 'import matplotlib.pyplot as plt\n'), ((10804, 10876), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(directoryfigure + 'IPO-PDO_CorrelationCESM2le.png')"], {'dpi': '(600)'}), "(directoryfigure + 'IPO-PDO_CorrelationCESM2le.png', dpi=600)\n", (10815, 10876), True, 'import matplotlib.pyplot as plt\n'), ((3883, 4015), 'calc_dataFunctions.readFiles', 'df.readFiles', (['variq', 'dataset', 'monthlychoice', 'numOfEns', 'lensalso', 'randomalso', 'ravelyearsbinary', 'ravelbinary', 'shuffletype', 'timeper'], {}), '(variq, dataset, monthlychoice, numOfEns, lensalso, randomalso,\n ravelyearsbinary, ravelbinary, shuffletype, timeper)\n', (3895, 4015), True, 'import calc_dataFunctions as df\n'), ((4025, 4079), 'calc_dataFunctions.getRegion', 'df.getRegion', (['data', 'lats', 'lons', 'lat_bounds', 'lon_bounds'], {}), '(data, lats, lons, lat_bounds, lon_bounds)\n', (4037, 4079), True, 'import calc_dataFunctions as df\n'), ((4355, 4491), 'calc_dataFunctions.readFiles', 'df.readFiles', (['variq', 'dataset_obs', 'monthlychoice', 'numOfEns', 'lensalso', 'randomalso', 'ravelyearsbinary', 'ravelbinary', 'shuffletype', 'timeper'], {}), '(variq, dataset_obs, monthlychoice, numOfEns, lensalso,\n randomalso, ravelyearsbinary, ravelbinary, shuffletype, timeper)\n', (4367, 4491), True, 'import calc_dataFunctions as df\n'), ((4512, 4578), 'calc_dataFunctions.getRegion', 'df.getRegion', (['data_obs', 'lats_obs', 'lons_obs', 'lat_bounds', 'lon_bounds'], {}), '(data_obs, lats_obs, lons_obs, lat_bounds, lon_bounds)\n', (4524, 4578), True, 'import calc_dataFunctions as df\n'), ((5526, 5556), 'numpy.where', 'np.where', (['(yearsall >= AGWstart)'], {}), '(yearsall >= AGWstart)\n', (5534, 5556), True, 'import numpy as np\n'), ((5573, 5603), 'numpy.where', 'np.where', (['(yearsobs >= AGWstart)'], {}), '(yearsobs >= AGWstart)\n', (5581, 5603), True, 'import numpy as np\n'), ((6275, 6376), 'calc_Stats.remove_ensemble_mean', 'dSS.remove_ensemble_mean', (['models_slice', 'ravel_modelens', 'ravelmodeltime', 'rm_standard_dev', 'numOfEns'], {}), '(models_slice, ravel_modelens, ravelmodeltime,\n rm_standard_dev, numOfEns)\n', (6299, 6376), True, 'import calc_Stats as dSS\n'), ((6788, 6825), 'numpy.where', 'np.where', (['((lats >= 25) & (lats <= 45))'], {}), '((lats >= 25) & (lats <= 45))\n', (6796, 6825), True, 'import numpy as np\n'), ((6837, 6890), 'numpy.where', 'np.where', (['((lons >= 140) & (lons <= 180 + (180 - 145)))'], {}), '((lons >= 140) & (lons <= 180 + (180 - 145)))\n', (6845, 6890), True, 'import numpy as np\n'), ((7181, 7219), 'numpy.where', 'np.where', (['((lats >= -10) & (lats <= 10))'], {}), '((lats >= -10) & (lats <= 10))\n', (7189, 7219), True, 'import numpy as np\n'), ((7231, 7283), 'numpy.where', 'np.where', (['((lons >= 170) & (lons <= 180 + (180 - 90)))'], {}), '((lons >= 170) & (lons <= 180 + (180 - 90)))\n', (7239, 7283), True, 'import numpy as np\n'), ((7575, 7614), 'numpy.where', 'np.where', (['((lats >= -50) & (lats <= -15))'], {}), '((lats >= -50) & (lats <= -15))\n', (7583, 7614), True, 'import numpy as np\n'), ((7626, 7679), 'numpy.where', 'np.where', (['((lons >= 150) & (lons <= 180 + (180 - 160)))'], {}), '((lons >= 150) & (lons <= 180 + (180 - 160)))\n', (7634, 7679), True, 'import numpy as np\n'), ((8095, 8111), 'numpy.std', 'np.std', (['IPOindex'], {}), '(IPOindex)\n', (8101, 8111), True, 'import numpy as np\n'), ((9045, 9079), 'scipy.stats.pearsonr', 'sts.pearsonr', (['IPOindexz[i]', 'PDO[i]'], {}), '(IPOindexz[i], PDO[i])\n', (9057, 9079), True, 'import scipy.stats as sts\n'), ((10609, 10631), 'numpy.arange', 'np.arange', (['(0)', '(1.1)', '(0.1)'], {}), '(0, 1.1, 0.1)\n', (10618, 10631), True, 'import numpy as np\n'), ((10684, 10707), 'numpy.arange', 'np.arange', (['(0)', '(39 + 1)', '(3)'], {}), '(0, 39 + 1, 3)\n', (10693, 10707), True, 'import numpy as np\n'), ((8076, 8093), 'numpy.mean', 'np.mean', (['IPOindex'], {}), '(IPOindex)\n', (8083, 8093), True, 'import numpy as np\n'), ((8708, 8784), 'numpy.genfromtxt', 'np.genfromtxt', (["(directorydata + '/PDO/PDO_CESM2LE_1990-2099.txt')"], {'unpack': '(True)'}), "(directorydata + '/PDO/PDO_CESM2LE_1990-2099.txt', unpack=True)\n", (8721, 8784), True, 'import numpy as np\n'), ((10712, 10735), 'numpy.arange', 'np.arange', (['(1)', '(40 + 1)', '(3)'], {}), '(1, 40 + 1, 3)\n', (10721, 10735), True, 'import numpy as np\n'), ((10647, 10669), 'numpy.arange', 'np.arange', (['(0)', '(1.1)', '(0.1)'], {}), '(0, 1.1, 0.1)\n', (10656, 10669), True, 'import numpy as np\n')] |
import argparse
import logging
import os
import shutil
import sys
import tensorboardX as tb
import numpy as np
import torch
import yaml
from configs import *
from runners import *
def parse_args_and_config():
parser = argparse.ArgumentParser(description=globals()['__doc__'])
parser.add_argument('--config', type=str, required=True, help='Path to the config file')
parser.add_argument('--seed', type=int, default=1234, help='Random seed')
parser.add_argument('--exp', type=str, default='/content/drive/MyDrive/AdvSM/exp', help='Path for saving running related data.')
parser.add_argument('--doc', type=str, required=True, help='A string for documentation purpose. '
'Will be the name of the log folder.')
parser.add_argument('--comment', type=str, default='', help='A string for experiment comment')
parser.add_argument('--verbose', type=str, default='warning', help='Verbose level: info | debug | warning | critical')
# Task to do
parser.add_argument('--train', action='store_true', help='Whether to train the model')
parser.add_argument('--sample', action='store_true', help='Whether to produce samples from the model')
parser.add_argument('--fast_fid', action='store_true', help='Whether to evaluate the FID metric')
parser.add_argument('--inception', action='store_true', help='Whether to evaluate the inception metric')
parser.add_argument('--eval', action='store_true', help='')
parser.add_argument('--stackedmnist', action='store_true', help='Whether to execute the StackedMNIST task')
parser.add_argument('--resume_training', action='store_true', help='Whether to resume training')
parser.add_argument('-i', '--image_folder', type=str, default='/content/drive/MyDrive/AdvSM/exp/images', help="The folder name of samples")
parser.add_argument('--data_folder', default='/Datasets')
parser.add_argument('--ni', action='store_true', help="No interaction. Suitable for Slurm Job launcher")
parser.add_argument('--fid_folder', default='/scratch', help='where to store FID results')
# Override parameters
parser.add_argument('--consistent', action='store_true', help='If True, overwrite yml file and use consistent sampling')
parser.add_argument('--nsigma', type=int, default=0, help='number of steps per sigmas (or multiplier on number of sigmas if consistent=true)')
parser.add_argument('--step_lr', type=float, default=0, help='sampling.step_lr')
parser.add_argument('--batch_size', type=int, default=0, help='sampling/fid batch size')
parser.add_argument('--begin_ckpt', type=int, default=0, help='sampling.begin_ckpt')
parser.add_argument('--end_ckpt', type=int, default=0, help='sampling.end_ckpt')
parser.add_argument('--fid_num_samples', type=int, default=0, help='fast_fid.num_samples')
parser.add_argument('--adam', action='store_true', help='If True, uses adam_beta instead of the ones in config')
parser.add_argument('--adam_beta', nargs=2, type=float, default=(0.9, 0.999))
parser.add_argument('--D_adam', action='store_true', help='If True, uses D_adam_beta with Discriminator instead of the ones in config')
parser.add_argument('--D_adam_beta', nargs=2, type=float, default=(0.9, 0.999))
parser.add_argument('--D_steps', type=int, default=0, help='Number of discriminator per score network steps')
# Adversarial option
parser.add_argument('--adversarial', action='store_true', help='Adversarial Denoising autoencoder')
# Loss functions options
parser.add_argument('--target', type=str, default='gaussian', help='dae: predict x and apply a sigmoid, gaussian:(x - x_tilde)/sigma')
parser.add_argument('--loss', type=str, default='l2', help='Amongst "l1", "l2" and "l1_msssim"')
# Model
parser.add_argument('--std', action='store_true', help='divide score network by variance (so its always standardized)')
parser.add_argument('--no_dilation', action='store_true',
help='If True, do not use dilation anywhere in architecture. This should reduces memory requirement and increase speed significantly.')
# Find best hyperameters
parser.add_argument('--compute_approximate_sigma_max', action='store_true',
help='sigma_max must be the maximum pairwise eucledian distance between points, we can get a really rough estimate by only looking at mini-batches pairwise distances. This will run for one epoch.')
args = parser.parse_args()
args.log_path = os.path.join(args.exp, 'logs', args.doc)
#### Specific to ComputeCanada Beluga server
#args.data_folder = os.environ["SLURM_TMPDIR"] + "/" + args.data_folder
#args.fid_folder = args.fid_folder + "/" + os.environ["USER"] + "/Output" + "/Extra"
os.makedirs(args.fid_folder, exist_ok=True)
config = get_config(args)
args.image_folder_denoised = args.image_folder
tb_path = os.path.join(args.exp, 'tensorboard', args.doc)
args.level = getattr(logging, args.verbose.upper(), None)
validate_args(args)
if args.train:
if not args.resume_training:
ask_overwrite_folder(args.log_path, no_interactions=args.ni)
ask_overwrite_folder(tb_path, no_interactions=args.ni)
with open(os.path.join(args.log_path, 'config.yml'), 'w') as f:
yaml.dump(config, f, default_flow_style=False)
args.tb_logger = tb.SummaryWriter(log_dir=tb_path)
args.log_sample_path = os.path.join(args.log_path, 'samples')
os.makedirs(args.log_sample_path, exist_ok=True)
elif not args.eval:
path_dir = 'image_samples'
os.makedirs(os.path.join(args.exp, path_dir), exist_ok=True)
args.image_folder = os.path.join(args.exp, path_dir, args.image_folder)
args.image_folder_denoised = os.path.join(args.exp, path_dir + '_denoised', args.image_folder_denoised)
ask_overwrite_folder(args.image_folder, no_interactions=args.ni)
ask_overwrite_folder(args.image_folder_denoised, no_interactions=args.ni)
# setup logger
logger = logging.getLogger()
logger.setLevel(args.level)
handlers = [logging.StreamHandler()]
if args.train:
handlers += [logging.FileHandler(os.path.join(args.log_path, 'stdout.txt'))]
formatter = logging.Formatter('%(levelname)s - %(filename)s - %(asctime)s - %(message)s')
for h in handlers:
h.setFormatter(formatter)
logger.addHandler(h)
# add device
# device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
args.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
logging.info("Using device: {}".format(args.device)) # TODO can i do that instead of args.device
# set random seed
torch.manual_seed(args.seed)
np.random.seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.benchmark = True
return args, config
def ask_overwrite_folder(folder, no_interactions, fatal=True):
if not os.path.exists(folder):
os.makedirs(folder)
elif no_interactions:
shutil.rmtree(folder)
os.makedirs(folder)
else:
response = input(f"Folder '{folder}' already exists. Overwrite? (Y/N)")
if response.upper() == 'Y':
shutil.rmtree(folder)
os.makedirs(folder)
elif fatal:
print("Output image folder exists. Program halted.")
sys.exit(0)
def validate_args(args):
assert args.target in ["gaussian", "dae"]
assert isinstance(args.level, int)
def get_runner(args):
assert args.train + args.sample + args.fast_fid + args.inception + args.eval + args.stackedmnist == 1
if args.sample:
return SampleRunner
elif args.fast_fid:
return FidRunner
elif args.eval:
return EvalRunner
elif args.inception:
return InceptionRunner
elif args.stackedmnist:
return StackedMNISTRunner
else:
return TrainRunner
def main():
args, config = parse_args_and_config()
print_config(config)
get_runner(args)(args, config).run()
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"logging.getLogger",
"torch.manual_seed",
"torch.cuda.manual_seed_all",
"logging.StreamHandler",
"os.path.exists",
"tensorboardX.SummaryWriter",
"os.makedirs",
"yaml.dump",
"sys.exit",
"logging.Formatter",
"os.path.join",
"torch.cuda.is_available",
"numpy.random.seed",
"shutil.rmtree",
"... | [((4556, 4596), 'os.path.join', 'os.path.join', (['args.exp', '"""logs"""', 'args.doc'], {}), "(args.exp, 'logs', args.doc)\n", (4568, 4596), False, 'import os\n'), ((4821, 4864), 'os.makedirs', 'os.makedirs', (['args.fid_folder'], {'exist_ok': '(True)'}), '(args.fid_folder, exist_ok=True)\n', (4832, 4864), False, 'import os\n'), ((4963, 5010), 'os.path.join', 'os.path.join', (['args.exp', '"""tensorboard"""', 'args.doc'], {}), "(args.exp, 'tensorboard', args.doc)\n", (4975, 5010), False, 'import os\n'), ((6134, 6153), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (6151, 6153), False, 'import logging\n'), ((6349, 6426), 'logging.Formatter', 'logging.Formatter', (['"""%(levelname)s - %(filename)s - %(asctime)s - %(message)s"""'], {}), "('%(levelname)s - %(filename)s - %(asctime)s - %(message)s')\n", (6366, 6426), False, 'import logging\n'), ((6845, 6873), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (6862, 6873), False, 'import torch\n'), ((6878, 6903), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (6892, 6903), True, 'import numpy as np\n'), ((6911, 6936), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6934, 6936), False, 'import torch\n'), ((5461, 5494), 'tensorboardX.SummaryWriter', 'tb.SummaryWriter', ([], {'log_dir': 'tb_path'}), '(log_dir=tb_path)\n', (5477, 5494), True, 'import tensorboardX as tb\n'), ((5527, 5565), 'os.path.join', 'os.path.join', (['args.log_path', '"""samples"""'], {}), "(args.log_path, 'samples')\n", (5539, 5565), False, 'import os\n'), ((5574, 5622), 'os.makedirs', 'os.makedirs', (['args.log_sample_path'], {'exist_ok': '(True)'}), '(args.log_sample_path, exist_ok=True)\n', (5585, 5622), False, 'import os\n'), ((6203, 6226), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (6224, 6226), False, 'import logging\n'), ((6665, 6690), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6688, 6690), False, 'import torch\n'), ((6641, 6661), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (6653, 6661), False, 'import torch\n'), ((6696, 6715), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6708, 6715), False, 'import torch\n'), ((6946, 6983), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (6972, 6983), False, 'import torch\n'), ((7128, 7150), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (7142, 7150), False, 'import os\n'), ((7160, 7179), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (7171, 7179), False, 'import os\n'), ((5781, 5832), 'os.path.join', 'os.path.join', (['args.exp', 'path_dir', 'args.image_folder'], {}), '(args.exp, path_dir, args.image_folder)\n', (5793, 5832), False, 'import os\n'), ((5870, 5944), 'os.path.join', 'os.path.join', (['args.exp', "(path_dir + '_denoised')", 'args.image_folder_denoised'], {}), "(args.exp, path_dir + '_denoised', args.image_folder_denoised)\n", (5882, 5944), False, 'import os\n'), ((7214, 7235), 'shutil.rmtree', 'shutil.rmtree', (['folder'], {}), '(folder)\n', (7227, 7235), False, 'import shutil\n'), ((7244, 7263), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (7255, 7263), False, 'import os\n'), ((5388, 5434), 'yaml.dump', 'yaml.dump', (['config', 'f'], {'default_flow_style': '(False)'}), '(config, f, default_flow_style=False)\n', (5397, 5434), False, 'import yaml\n'), ((5704, 5736), 'os.path.join', 'os.path.join', (['args.exp', 'path_dir'], {}), '(args.exp, path_dir)\n', (5716, 5736), False, 'import os\n'), ((6288, 6329), 'os.path.join', 'os.path.join', (['args.log_path', '"""stdout.txt"""'], {}), "(args.log_path, 'stdout.txt')\n", (6300, 6329), False, 'import os\n'), ((7402, 7423), 'shutil.rmtree', 'shutil.rmtree', (['folder'], {}), '(folder)\n', (7415, 7423), False, 'import shutil\n'), ((7436, 7455), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (7447, 7455), False, 'import os\n'), ((5318, 5359), 'os.path.join', 'os.path.join', (['args.log_path', '"""config.yml"""'], {}), "(args.log_path, 'config.yml')\n", (5330, 5359), False, 'import os\n'), ((7553, 7564), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (7561, 7564), False, 'import sys\n')] |
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2021 Scipp contributors (https://github.com/scipp)
# @file
# @author <NAME>
import numpy as np
import pytest
import scipp as sc
def test_shape():
a = sc.Variable(value=1)
d = sc.Dataset(data={'a': a})
assert d.shape == []
a = sc.Variable(['x'], shape=[2])
b = sc.Variable(['y', 'z'], shape=[3, 4])
d = sc.Dataset(data={'a': a, 'b': b})
assert not bool(set(d.shape) - set([2, 3, 4]))
def test_sizes():
d = sc.Dataset(data={'a': sc.scalar(value=1)})
assert d.sizes == {}
a = sc.Variable(['x'], shape=[2])
b = sc.Variable(['y', 'z'], shape=[3, 4])
d = sc.Dataset(data={'a': a, 'b': b})
assert d.sizes == {'x': 2, 'y': 3, 'z': 4}
def test_create_empty():
d = sc.Dataset()
assert len(d) == 0
assert len(d.coords) == 0
assert len(d.dims) == 0
def test_create():
x = sc.Variable(dims=['x'], values=np.arange(3))
y = sc.Variable(dims=['y'], values=np.arange(4))
xy = sc.Variable(dims=['x', 'y'], values=np.arange(12).reshape(3, 4))
d = sc.Dataset({'xy': xy, 'x': x}, coords={'x': x, 'y': y})
assert len(d) == 2
assert sc.identical(d.coords['x'], x)
assert sc.identical(d.coords['y'], y)
assert sc.identical(d['xy'].data, xy)
assert sc.identical(d['x'].data, x)
assert set(d.dims) == set(['y', 'x'])
def test_create_from_data_array():
var = sc.Variable(dims=['x'], values=np.arange(4))
da = sc.DataArray(var, coords={'x': var, 'aux': var})
d = sc.Dataset({da.name: da})
assert sc.identical(d[''], da)
def test_create_from_data_arrays():
var1 = sc.Variable(dims=['x'], values=np.arange(4))
var2 = sc.Variable(dims=['x'], values=np.ones(4))
da1 = sc.DataArray(var1, coords={'x': var1, 'aux': var2})
da2 = sc.DataArray(var1, coords={'x': var1, 'aux': var2})
d = sc.Dataset()
d['a'] = da1
d['b'] = da2
assert sc.identical(
d,
sc.Dataset(data={
'a': var1,
'b': var1
},
coords={
'x': var1,
'aux': var2
}))
def test_create_from_data_array_and_variable_mix():
var_1 = sc.Variable(dims=['x'], values=np.arange(4))
var_2 = sc.Variable(dims=['x'], values=np.arange(4))
da = sc.DataArray(data=var_1, coords={'x': var_1, 'aux': var_1})
d = sc.Dataset({'array': da, 'variable': var_2})
assert sc.identical(d['array'], da)
assert sc.identical(d['variable'].data, var_2)
def test_create_with_data_array_and_additional_coords():
var = sc.Variable(dims=['x'], values=np.arange(4))
coord = sc.Variable(dims=['x'], values=np.arange(4))
da = sc.DataArray(data=var, coords={'x': var, 'aux': var})
d = sc.Dataset(data={'array': da}, coords={'y': coord})
da.coords['y'] = coord
assert sc.identical(d['array'], da)
assert sc.identical(d.coords['y'], coord)
assert sc.identical(d.coords['x'], var)
assert sc.identical(d.coords['aux'], var)
def test_clear():
d = sc.Dataset()
d['a'] = sc.Variable(dims=['x'], values=np.arange(3))
assert 'a' in d
d.clear()
assert len(d) == 0
def test_setitem():
d = sc.Dataset()
d['a'] = sc.Variable(1.0)
assert len(d) == 1
assert sc.identical(d['a'].data, sc.Variable(1.0))
assert len(d.coords) == 0
def test_del_item():
d = sc.Dataset()
d['a'] = sc.Variable(1.0)
assert 'a' in d
del d['a']
assert 'a' not in d
def test_del_item_missing():
d = sc.Dataset()
with pytest.raises(RuntimeError):
del d['not an item']
def test_coord_setitem():
var = sc.Variable(dims=['x'], values=np.arange(4))
d = sc.Dataset({'a': var}, coords={'x': var})
with pytest.raises(RuntimeError):
d['x', 2:3].coords['y'] = sc.Variable(1.0)
assert 'y' not in d.coords
d.coords['y'] = sc.Variable(1.0)
assert len(d) == 1
assert len(d.coords) == 2
assert sc.identical(d.coords['y'], sc.Variable(1.0))
def test_contains_coord():
d = sc.Dataset()
assert 'x' not in d.coords
d.coords['x'] = sc.Variable(1.0)
assert 'x' in d.coords
def test_coords_keys():
d = sc.Dataset()
d.coords['x'] = sc.Variable(1.0)
assert len(d.coords.keys()) == 1
assert 'x' in d.coords.keys()
def test_slice_item():
d = sc.Dataset(
coords={'x': sc.Variable(dims=['x'], values=np.arange(4, 8))})
d['a'] = sc.Variable(dims=['x'], values=np.arange(4))
assert sc.identical(d['a']['x', 2:4].data,
sc.Variable(dims=['x'], values=np.arange(2, 4)))
assert sc.identical(d['a']['x', 2:4].coords['x'],
sc.Variable(dims=['x'], values=np.arange(6, 8)))
def test_set_item_slice_from_numpy():
d = sc.Dataset(
coords={'x': sc.Variable(dims=['x'], values=np.arange(4, 8))})
d['a'] = sc.Variable(dims=['x'], values=np.arange(4))
d['a']['x', 2:4] = np.arange(2)
assert sc.identical(d['a'].data,
sc.Variable(dims=['x'], values=np.array([0, 1, 0, 1])))
def test_set_item_slice_with_variances_from_numpy():
d = sc.Dataset(
coords={'x': sc.Variable(dims=['x'], values=np.arange(4, 8))})
d['a'] = sc.Variable(dims=['x'],
values=np.arange(4.0),
variances=np.arange(4.0))
d['a']['x', 2:4].values = np.arange(2)
d['a']['x', 2:4].variances = np.arange(2, 4)
assert np.array_equal(d['a'].values, np.array([0.0, 1.0, 0.0, 1.0]))
assert np.array_equal(d['a'].variances, np.array([0.0, 1.0, 2.0, 3.0]))
def test_iadd_slice():
d = sc.Dataset(
coords={'x': sc.Variable(dims=['x'], values=np.arange(4, 8))})
d['a'] = sc.Variable(dims=['x'], values=np.arange(4))
d['a']['x', 1] += d['a']['x', 2]
assert sc.identical(d['a'].data,
sc.Variable(dims=['x'], values=np.array([0, 3, 2, 3])))
def test_iadd_range():
d = sc.Dataset(
coords={'x': sc.Variable(dims=['x'], values=np.arange(4, 8))})
d['a'] = sc.Variable(dims=['x'], values=np.arange(4))
with pytest.raises(RuntimeError):
d['a']['x', 2:4] += d['a']['x', 1:3]
d['a']['x', 2:4] += d['a']['x', 2:4]
assert sc.identical(d['a'].data,
sc.Variable(dims=['x'], values=np.array([0, 1, 4, 6])))
def test_contains():
d = sc.Dataset()
assert 'a' not in d
d['a'] = sc.Variable(1.0)
assert 'a' in d
assert 'b' not in d
d['b'] = sc.Variable(1.0)
assert 'a' in d
assert 'b' in d
def test_slice():
d = sc.Dataset(
{
'a': sc.Variable(dims=['x'], values=np.arange(10.0)),
'b': sc.Variable(1.0)
},
coords={'x': sc.Variable(dims=['x'], values=np.arange(10.0))})
expected = sc.Dataset({
'a':
sc.DataArray(1.0 * sc.units.one, attrs={'x': 1.0 * sc.units.one}),
'b':
sc.Variable(1.0)
})
assert sc.identical(d['x', 1], expected)
def test_chained_slicing():
x = sc.Variable(dims=['x'], values=np.arange(11.0))
y = sc.Variable(dims=['y'], values=np.arange(11.0))
z = sc.Variable(dims=['z'], values=np.arange(11.0))
d = sc.Dataset(
{
'a':
sc.Variable(dims=['z', 'y', 'x'],
values=np.arange(1000.0).reshape(10, 10, 10)),
'b':
sc.Variable(dims=['x', 'z'],
values=np.arange(0.0, 10.0, 0.1).reshape(10, 10))
},
coords={
'x': x,
'y': y,
'z': z
})
expected = sc.Dataset()
expected['a'] = sc.Variable(dims=['y'],
values=np.arange(501.0, 600.0, 10.0))
expected['b'] = sc.Variable(1.5)
expected['a'].attrs['x'] = x['x', 1:3]
expected['b'].attrs['x'] = x['x', 1:3]
expected['a'].attrs['z'] = z['z', 5:7]
expected['b'].attrs['z'] = z['z', 5:7]
expected.coords['y'] = sc.Variable(dims=['y'], values=np.arange(11.0))
assert sc.identical(d['x', 1]['z', 5], expected)
def test_coords_view_comparison_operators():
d = sc.Dataset(
{
'a': sc.Variable(dims=['x'], values=np.arange(10.0)),
'b': sc.Variable(1.0)
},
coords={'x': sc.Variable(dims=['x'], values=np.arange(10.0))})
d1 = sc.Dataset(
{
'a': sc.Variable(dims=['x'], values=np.arange(10.0)),
'b': sc.Variable(1.0)
},
coords={'x': sc.Variable(dims=['x'], values=np.arange(10.0))})
assert d1['a'].coords == d['a'].coords
def test_sum_mean():
d = sc.Dataset(
{
'a':
sc.Variable(dims=['x', 'y'],
values=np.arange(6, dtype=np.int64).reshape(2, 3)),
'b':
sc.Variable(dims=['y'], values=np.arange(3, dtype=np.int64))
},
coords={
'x':
sc.Variable(dims=['x'], values=np.arange(2, dtype=np.int64)),
'y':
sc.Variable(dims=['y'], values=np.arange(3, dtype=np.int64)),
'l1':
sc.Variable(dims=['x', 'y'],
values=np.arange(6, dtype=np.int64).reshape(2, 3)),
'l2':
sc.Variable(dims=['x'], values=np.arange(2, dtype=np.int64))
})
d_ref = sc.Dataset(
{
'a': sc.Variable(dims=['x'],
values=np.array([3, 12], dtype=np.int64)),
'b': sc.Variable(3)
},
coords={
'x': sc.Variable(dims=['x'], values=np.arange(2, dtype=np.int64)),
'l2': sc.Variable(dims=['x'], values=np.arange(2, dtype=np.int64))
})
assert sc.identical(sc.sum(d, 'y'), d_ref)
assert (sc.mean(d, 'y')['a'].values == [1.0, 4.0]).all()
assert sc.mean(d, 'y')['b'].value == 1.0
def test_sum_masked():
d = sc.Dataset({
'a':
sc.Variable(dims=['x'],
values=np.array([1, 5, 4, 5, 1], dtype=np.int64))
})
d['a'].masks['m1'] = sc.Variable(dims=['x'],
values=np.array(
[False, True, False, True, False]))
d_ref = sc.Dataset({'a': sc.Variable(np.int64(6))})
result = sc.sum(d, 'x')['a']
assert sc.identical(result, d_ref['a'])
def test_sum_all():
da = sc.DataArray(sc.Variable(['x', 'y'], values=np.ones(10).reshape(5,
2)))
ds = sc.Dataset({'a': da})
assert sc.identical(sc.sum(da).data, sc.Variable(value=10.0))
assert sc.identical(sc.sum(da), sc.sum(ds)['a'])
def test_nansum_masked():
d = sc.Dataset({
'a':
sc.Variable(dims=['x'],
values=np.array([1, 5, np.nan, np.nan, 1],
dtype=np.float64))
})
d['a'].masks['m1'] = sc.Variable(dims=['x'],
values=np.array(
[False, True, False, True, False]))
d_ref = sc.Dataset({'a': sc.Variable(np.float64(2))})
result = sc.nansum(d, 'x')['a']
assert sc.identical(result, d_ref['a'])
def test_nansum_all():
da = sc.DataArray(sc.Variable(['x', 'y'], values=np.ones(10).reshape(5,
2)))
da.data.values[0, 0] = np.nan
ds = sc.Dataset({'a': da})
assert np.isnan(sc.sum(da).data.value) # sanity check
assert sc.identical(sc.nansum(da).data, sc.Variable(value=9.0))
assert sc.identical(sc.nansum(da), sc.nansum(ds)['a'])
def test_mean_masked():
d = sc.Dataset({
'a':
sc.Variable(dims=['x'],
values=np.array([1, 5, 4, 5, 1]),
dtype=sc.dtype.float64)
})
d['a'].masks['m1'] = sc.Variable(dims=['x'],
values=np.array(
[False, True, False, True, False]))
d_ref = sc.Dataset({'a': sc.Variable(2.0)})
assert sc.identical(sc.mean(d, 'x')['a'], d_ref['a'])
assert sc.identical(sc.nanmean(d, 'x')['a'], d_ref['a'])
def test_mean_all():
var = sc.Variable(['x', 'y'], values=np.arange(4.0).reshape(2, 2))
mask = sc.Variable(['x', 'y'],
values=np.array([[False, False], [True, False]]))
da = sc.DataArray(var, masks={'m': mask}) # Add masks
assert sc.sum(da).data.value == 0 + 1 + 3 # 2.0 masked
sc.mean(da).data.value == 4 / 3
def test_nanmean_all():
var = sc.Variable(['x', 'y'], values=np.arange(4.0).reshape(2, 2))
var['x', 0]['y', 1].value = np.nan
mask = sc.Variable(['x', 'y'],
values=np.array([[False, False], [True, False]]))
da = sc.DataArray(var, masks={'m': mask}) # Add masks
assert sc.nansum(da).data.value == 0 + 3 # 2.0 masked, 1.0 is nan
sc.mean(da).data.value == 3 / 2
def test_dataset_merge():
a = sc.Dataset({'d1': sc.Variable(dims=['x'], values=np.array([1, 2, 3]))})
b = sc.Dataset({'d2': sc.Variable(dims=['x'], values=np.array([4, 5, 6]))})
c = sc.merge(a, b)
assert sc.identical(a['d1'], c['d1'])
assert sc.identical(b['d2'], c['d2'])
def test_dataset_concatenate():
a = sc.Dataset(
{'data': sc.Variable(dims=['x'], values=np.array([11, 12]))},
coords={'x': sc.Variable(dims=['x'], values=np.array([1, 2]))})
b = sc.Dataset(
{'data': sc.Variable(dims=['x'], values=np.array([13, 14]))},
coords={'x': sc.Variable(dims=['x'], values=np.array([3, 4]))})
c = sc.concatenate(a, b, 'x')
assert np.array_equal(c.coords['x'].values, np.array([1, 2, 3, 4]))
assert np.array_equal(c['data'].values, np.array([11, 12, 13, 14]))
def test_dataset_set_data():
d1 = sc.Dataset(
{
'a': sc.Variable(dims=['x', 'y'], values=np.random.rand(2, 3)),
'b': sc.Variable(1.0)
},
coords={
'x': sc.Variable(dims=['x'],
values=np.arange(2.0),
unit=sc.units.m),
'y': sc.Variable(dims=['y'],
values=np.arange(3.0),
unit=sc.units.m),
'aux': sc.Variable(dims=['y'], values=np.arange(3))
})
d2 = sc.Dataset(
{
'a': sc.Variable(dims=['x', 'y'], values=np.random.rand(2, 3)),
'b': sc.Variable(1.0)
},
coords={
'x': sc.Variable(dims=['x'],
values=np.arange(2.0),
unit=sc.units.m),
'y': sc.Variable(dims=['y'],
values=np.arange(3.0),
unit=sc.units.m),
'aux': sc.Variable(dims=['y'], values=np.arange(3))
})
d3 = sc.Dataset()
d3['b'] = d1['a']
assert sc.identical(d3['b'].data, d1['a'].data)
assert d3['b'].coords == d1['a'].coords
d1['a'] = d2['a']
d1['c'] = d2['a']
assert sc.identical(d2['a'].data, d1['a'].data)
assert sc.identical(d2['a'].data, d1['c'].data)
d = sc.Dataset()
d['a'] = sc.Variable(dims=['row'],
values=np.arange(10.0),
variances=np.arange(10.0))
d['b'] = sc.Variable(dims=['row'], values=np.arange(10.0, 20.0))
d.coords['row'] = sc.Variable(dims=['row'], values=np.arange(10.0))
d1 = d['row', 0:1]
d2 = sc.Dataset({'a': d1['a'].data}, coords={'row': d1['a'].coords['row']})
d2['b'] = d1['b']
expected = sc.Dataset()
expected['a'] = sc.Variable(dims=['row'],
values=np.arange(1.0),
variances=np.arange(1.0))
expected['b'] = sc.Variable(dims=['row'], values=np.arange(10.0, 11.0))
expected.coords['row'] = sc.Variable(dims=['row'], values=np.arange(1.0))
assert sc.identical(d2, expected)
def test_binary_with_broadcast():
d = sc.Dataset(
{'data': sc.Variable(dims=['x'], values=np.arange(10.0))},
coords={'x': sc.Variable(dims=['x'], values=np.arange(10.0))})
d2 = d - d['x', 0]
d -= d['x', 0]
assert sc.identical(d, d2)
def test_binary__with_dataarray():
da = sc.DataArray(
data=sc.Variable(dims=['x'], values=np.arange(1.0, 10.0)),
coords={'x': sc.Variable(dims=['x'], values=np.arange(1.0, 10.0))})
ds = sc.Dataset({da.name: da.copy()})
orig = ds.copy()
ds += da
ds -= da
ds *= da
ds /= da
assert sc.identical(ds, orig)
def test_binary_of_item_with_variable():
d = sc.Dataset(
{'data': sc.Variable(dims=['x'], values=np.arange(10.0))},
coords={'x': sc.Variable(dims=['x'], values=np.arange(10.0))})
copy = d.copy()
d['data'] += 2.0 * sc.units.dimensionless
d['data'] *= 2.0 * sc.units.m
d['data'] -= 4.0 * sc.units.m
d['data'] /= 2.0 * sc.units.m
assert sc.identical(d, copy)
def test_in_place_binary_with_scalar():
d = sc.Dataset({'data': sc.Variable(dims=['x'], values=[10.0])},
coords={'x': sc.Variable(dims=['x'], values=[10])})
copy = d.copy()
d += 2
d *= 2
d -= 4
d /= 2
assert sc.identical(d, copy)
def test_view_in_place_binary_with_scalar():
d = sc.Dataset({'data': sc.Variable(dims=['x'], values=[10.0])},
coords={'x': sc.Variable(dims=['x'], values=[10])})
copy = d.copy()
d['data'] += 2
d['data'] *= 2
d['data'] -= 4
d['data'] /= 2
assert sc.identical(d, copy)
def test_add_sum_of_columns():
d = sc.Dataset({
'a': sc.Variable(dims=['x'], values=np.arange(10.0)),
'b': sc.Variable(dims=['x'], values=np.ones(10))
})
d['sum'] = d['a'] + d['b']
d['a'] += d['b']
assert sc.identical(d['sum'], d['a'])
def test_name():
d = sc.Dataset(
{
'a': sc.Variable(dims=['x'], values=np.arange(10.0)),
'b': sc.Variable(dims=['x'], values=np.ones(10))
},
coords={'x': sc.Variable(dims=['x'], values=np.arange(10))})
assert d['a'].name == 'a'
assert d['b'].name == 'b'
assert d['x', 2]['b'].name == 'b'
assert d['b']['x', 2].name == 'b'
array = d['a'] + d['b']
assert array.name == ''
def make_simple_dataset(dim1='x', dim2='y', seed=None):
if seed is not None:
np.random.seed(seed)
return sc.Dataset(
{
'a': sc.Variable(dims=[dim1, dim2], values=np.random.rand(2, 3)),
'b': sc.Variable(1.0)
},
coords={
dim1: sc.Variable(dims=[dim1],
values=np.arange(2.0),
unit=sc.units.m),
dim2: sc.Variable(dims=[dim2],
values=np.arange(3.0),
unit=sc.units.m),
'aux': sc.Variable(dims=[dim2], values=np.random.rand(3))
})
def test_dataset_view_set_variance():
d = make_simple_dataset()
variances = np.arange(6).reshape(2, 3)
assert d['a'].variances is None
d['a'].variances = variances
assert d['a'].variances is not None
np.testing.assert_array_equal(d['a'].variances, variances)
d['a'].variances = None
assert d['a'].variances is None
def test_sort():
d = sc.Dataset(
{
'a': sc.Variable(dims=['x', 'y'],
values=np.arange(6).reshape(2, 3)),
'b': sc.Variable(dims=['x'], values=['b', 'a'])
},
coords={
'x': sc.Variable(dims=['x'],
values=np.arange(2.0),
unit=sc.units.m),
'y': sc.Variable(dims=['y'],
values=np.arange(3.0),
unit=sc.units.m),
'aux': sc.Variable(dims=['x'], values=np.arange(2.0))
})
expected = sc.Dataset(
{
'a':
sc.Variable(dims=['x', 'y'],
values=np.flip(np.arange(6).reshape(2, 3), axis=0)),
'b':
sc.Variable(dims=['x'], values=['a', 'b'])
},
coords={
'x': sc.Variable(dims=['x'], values=[1.0, 0.0], unit=sc.units.m),
'y': sc.Variable(dims=['y'],
values=np.arange(3.0),
unit=sc.units.m),
'aux': sc.Variable(dims=['x'], values=[1.0, 0.0])
})
assert sc.identical(sc.sort(d, d['b'].data), expected)
def test_rename_dims():
d = make_simple_dataset('x', 'y', seed=0)
d.rename_dims({'y': 'z'})
assert sc.identical(d, make_simple_dataset('x', 'z', seed=0))
d.rename_dims(dims_dict={'x': 'y', 'z': 'x'})
assert sc.identical(d, make_simple_dataset('y', 'x', seed=0))
def test_coord_delitem():
var = sc.Variable(dims=['x'], values=np.arange(4))
d = sc.Dataset({'a': var}, coords={'x': var})
dref = d.copy()
d.coords['y'] = sc.Variable(1.0)
assert dref != d
del d.coords['y']
assert sc.identical(dref, d)
def test_coords_delitem():
var = sc.Variable(dims=['x'], values=np.arange(4))
d = sc.Dataset({'a': var}, coords={'x': var})
dref = d.copy()
dref.coords['x'] = sc.Variable(dims=['x'], values=np.arange(1, 5))
assert not sc.identical(d, dref)
del dref.coords['x']
assert not sc.identical(d, dref)
dref.coords['x'] = d.coords['x']
assert sc.identical(d, dref)
def test_masks_delitem():
var = sc.Variable(dims=['x'], values=np.array([True, True, False]))
d = sc.Dataset({'a': var}, coords={'x': var})
dref = d.copy()
d['a'].masks['masks'] = var
assert not sc.identical(d, dref)
del d['a'].masks['masks']
assert sc.identical(d, dref)
def test_replace():
v1 = sc.Variable(['x'], values=np.array([1, 2, 3]))
d = sc.Dataset({'a': v1})
d['a'].data == v1
v2 = sc.Variable(['x'], values=np.array([4, 5, 6]))
d['a'].data == v2
def test_rebin():
dataset = sc.Dataset()
dataset['data'] = sc.Variable(['x'],
values=np.array(10 * [1.0]),
unit=sc.units.counts)
dataset.coords['x'] = sc.Variable(['x'], values=np.arange(11.0))
new_coord = sc.Variable(dims=['x'], values=np.arange(0.0, 11, 2))
dataset = sc.rebin(dataset, 'x', new_coord)
np.testing.assert_array_equal(dataset['data'].values, np.array(5 * [2]))
np.testing.assert_array_equal(dataset.coords['x'].values,
np.arange(0, 11, 2))
def _is_copy_of(orig, copy):
assert sc.identical(orig, copy)
assert not id(orig) == id(copy)
orig += 1
assert sc.identical(orig, copy)
def _is_deep_copy_of(orig, copy):
assert sc.identical(orig, copy)
assert not id(orig) == id(copy)
orig += 1
assert not sc.identical(orig, copy)
def test_copy():
import copy
a = sc.Dataset()
a['x'] = sc.Variable(value=1)
_is_copy_of(a, a.copy(deep=False))
_is_deep_copy_of(a, a.copy())
_is_copy_of(a, copy.copy(a))
_is_deep_copy_of(a, copy.deepcopy(a))
def test_correct_temporaries():
N = 6
M = 4
d1 = sc.Dataset()
d1['x'] = sc.Variable(['x'], values=np.arange(N).astype(np.float64))
d1['y'] = sc.Variable(['y'], values=np.arange(M).astype(np.float64))
arr1 = np.arange(N * M).reshape(N, M).astype(np.float64) + 1
d1['A'] = sc.Variable(['x', 'y'], values=arr1)
d1 = d1['x', 1:2]
assert d1['A'].values.tolist() == [[5.0, 6.0, 7.0, 8.0]]
d1 = d1['y', 2:3]
assert list(d1['A'].values) == [7]
def test_iteration():
var = sc.Variable(value=1)
d = sc.Dataset(data={'a': var, 'b': var})
expected = ['a', 'b']
for k in d:
assert k in expected
| [
"numpy.random.rand",
"scipp.DataArray",
"numpy.array",
"copy.deepcopy",
"copy.copy",
"numpy.arange",
"numpy.int64",
"scipp.mean",
"numpy.float64",
"scipp.merge",
"scipp.sum",
"scipp.concatenate",
"numpy.random.seed",
"numpy.testing.assert_array_equal",
"scipp.Dataset",
"scipp.rebin",
... | [((213, 233), 'scipp.Variable', 'sc.Variable', ([], {'value': '(1)'}), '(value=1)\n', (224, 233), True, 'import scipp as sc\n'), ((242, 267), 'scipp.Dataset', 'sc.Dataset', ([], {'data': "{'a': a}"}), "(data={'a': a})\n", (252, 267), True, 'import scipp as sc\n'), ((301, 330), 'scipp.Variable', 'sc.Variable', (["['x']"], {'shape': '[2]'}), "(['x'], shape=[2])\n", (312, 330), True, 'import scipp as sc\n'), ((339, 376), 'scipp.Variable', 'sc.Variable', (["['y', 'z']"], {'shape': '[3, 4]'}), "(['y', 'z'], shape=[3, 4])\n", (350, 376), True, 'import scipp as sc\n'), ((385, 418), 'scipp.Dataset', 'sc.Dataset', ([], {'data': "{'a': a, 'b': b}"}), "(data={'a': a, 'b': b})\n", (395, 418), True, 'import scipp as sc\n'), ((574, 603), 'scipp.Variable', 'sc.Variable', (["['x']"], {'shape': '[2]'}), "(['x'], shape=[2])\n", (585, 603), True, 'import scipp as sc\n'), ((612, 649), 'scipp.Variable', 'sc.Variable', (["['y', 'z']"], {'shape': '[3, 4]'}), "(['y', 'z'], shape=[3, 4])\n", (623, 649), True, 'import scipp as sc\n'), ((658, 691), 'scipp.Dataset', 'sc.Dataset', ([], {'data': "{'a': a, 'b': b}"}), "(data={'a': a, 'b': b})\n", (668, 691), True, 'import scipp as sc\n'), ((774, 786), 'scipp.Dataset', 'sc.Dataset', ([], {}), '()\n', (784, 786), True, 'import scipp as sc\n'), ((1077, 1132), 'scipp.Dataset', 'sc.Dataset', (["{'xy': xy, 'x': x}"], {'coords': "{'x': x, 'y': y}"}), "({'xy': xy, 'x': x}, coords={'x': x, 'y': y})\n", (1087, 1132), True, 'import scipp as sc\n'), ((1167, 1197), 'scipp.identical', 'sc.identical', (["d.coords['x']", 'x'], {}), "(d.coords['x'], x)\n", (1179, 1197), True, 'import scipp as sc\n'), ((1209, 1239), 'scipp.identical', 'sc.identical', (["d.coords['y']", 'y'], {}), "(d.coords['y'], y)\n", (1221, 1239), True, 'import scipp as sc\n'), ((1251, 1281), 'scipp.identical', 'sc.identical', (["d['xy'].data", 'xy'], {}), "(d['xy'].data, xy)\n", (1263, 1281), True, 'import scipp as sc\n'), ((1293, 1321), 'scipp.identical', 'sc.identical', (["d['x'].data", 'x'], {}), "(d['x'].data, x)\n", (1305, 1321), True, 'import scipp as sc\n'), ((1465, 1513), 'scipp.DataArray', 'sc.DataArray', (['var'], {'coords': "{'x': var, 'aux': var}"}), "(var, coords={'x': var, 'aux': var})\n", (1477, 1513), True, 'import scipp as sc\n'), ((1522, 1547), 'scipp.Dataset', 'sc.Dataset', (['{da.name: da}'], {}), '({da.name: da})\n', (1532, 1547), True, 'import scipp as sc\n'), ((1559, 1582), 'scipp.identical', 'sc.identical', (["d['']", 'da'], {}), "(d[''], da)\n", (1571, 1582), True, 'import scipp as sc\n'), ((1741, 1792), 'scipp.DataArray', 'sc.DataArray', (['var1'], {'coords': "{'x': var1, 'aux': var2}"}), "(var1, coords={'x': var1, 'aux': var2})\n", (1753, 1792), True, 'import scipp as sc\n'), ((1803, 1854), 'scipp.DataArray', 'sc.DataArray', (['var1'], {'coords': "{'x': var1, 'aux': var2}"}), "(var1, coords={'x': var1, 'aux': var2})\n", (1815, 1854), True, 'import scipp as sc\n'), ((1863, 1875), 'scipp.Dataset', 'sc.Dataset', ([], {}), '()\n', (1873, 1875), True, 'import scipp as sc\n'), ((2325, 2384), 'scipp.DataArray', 'sc.DataArray', ([], {'data': 'var_1', 'coords': "{'x': var_1, 'aux': var_1}"}), "(data=var_1, coords={'x': var_1, 'aux': var_1})\n", (2337, 2384), True, 'import scipp as sc\n'), ((2393, 2437), 'scipp.Dataset', 'sc.Dataset', (["{'array': da, 'variable': var_2}"], {}), "({'array': da, 'variable': var_2})\n", (2403, 2437), True, 'import scipp as sc\n'), ((2449, 2477), 'scipp.identical', 'sc.identical', (["d['array']", 'da'], {}), "(d['array'], da)\n", (2461, 2477), True, 'import scipp as sc\n'), ((2489, 2528), 'scipp.identical', 'sc.identical', (["d['variable'].data", 'var_2'], {}), "(d['variable'].data, var_2)\n", (2501, 2528), True, 'import scipp as sc\n'), ((2709, 2762), 'scipp.DataArray', 'sc.DataArray', ([], {'data': 'var', 'coords': "{'x': var, 'aux': var}"}), "(data=var, coords={'x': var, 'aux': var})\n", (2721, 2762), True, 'import scipp as sc\n'), ((2771, 2822), 'scipp.Dataset', 'sc.Dataset', ([], {'data': "{'array': da}", 'coords': "{'y': coord}"}), "(data={'array': da}, coords={'y': coord})\n", (2781, 2822), True, 'import scipp as sc\n'), ((2861, 2889), 'scipp.identical', 'sc.identical', (["d['array']", 'da'], {}), "(d['array'], da)\n", (2873, 2889), True, 'import scipp as sc\n'), ((2901, 2935), 'scipp.identical', 'sc.identical', (["d.coords['y']", 'coord'], {}), "(d.coords['y'], coord)\n", (2913, 2935), True, 'import scipp as sc\n'), ((2947, 2979), 'scipp.identical', 'sc.identical', (["d.coords['x']", 'var'], {}), "(d.coords['x'], var)\n", (2959, 2979), True, 'import scipp as sc\n'), ((2991, 3025), 'scipp.identical', 'sc.identical', (["d.coords['aux']", 'var'], {}), "(d.coords['aux'], var)\n", (3003, 3025), True, 'import scipp as sc\n'), ((3054, 3066), 'scipp.Dataset', 'sc.Dataset', ([], {}), '()\n', (3064, 3066), True, 'import scipp as sc\n'), ((3212, 3224), 'scipp.Dataset', 'sc.Dataset', ([], {}), '()\n', (3222, 3224), True, 'import scipp as sc\n'), ((3238, 3254), 'scipp.Variable', 'sc.Variable', (['(1.0)'], {}), '(1.0)\n', (3249, 3254), True, 'import scipp as sc\n'), ((3394, 3406), 'scipp.Dataset', 'sc.Dataset', ([], {}), '()\n', (3404, 3406), True, 'import scipp as sc\n'), ((3420, 3436), 'scipp.Variable', 'sc.Variable', (['(1.0)'], {}), '(1.0)\n', (3431, 3436), True, 'import scipp as sc\n'), ((3535, 3547), 'scipp.Dataset', 'sc.Dataset', ([], {}), '()\n', (3545, 3547), True, 'import scipp as sc\n'), ((3706, 3747), 'scipp.Dataset', 'sc.Dataset', (["{'a': var}"], {'coords': "{'x': var}"}), "({'a': var}, coords={'x': var})\n", (3716, 3747), True, 'import scipp as sc\n'), ((3888, 3904), 'scipp.Variable', 'sc.Variable', (['(1.0)'], {}), '(1.0)\n', (3899, 3904), True, 'import scipp as sc\n'), ((4052, 4064), 'scipp.Dataset', 'sc.Dataset', ([], {}), '()\n', (4062, 4064), True, 'import scipp as sc\n'), ((4116, 4132), 'scipp.Variable', 'sc.Variable', (['(1.0)'], {}), '(1.0)\n', (4127, 4132), True, 'import scipp as sc\n'), ((4194, 4206), 'scipp.Dataset', 'sc.Dataset', ([], {}), '()\n', (4204, 4206), True, 'import scipp as sc\n'), ((4227, 4243), 'scipp.Variable', 'sc.Variable', (['(1.0)'], {}), '(1.0)\n', (4238, 4243), True, 'import scipp as sc\n'), ((4948, 4960), 'numpy.arange', 'np.arange', (['(2)'], {}), '(2)\n', (4957, 4960), True, 'import numpy as np\n'), ((5390, 5402), 'numpy.arange', 'np.arange', (['(2)'], {}), '(2)\n', (5399, 5402), True, 'import numpy as np\n'), ((5436, 5451), 'numpy.arange', 'np.arange', (['(2)', '(4)'], {}), '(2, 4)\n', (5445, 5451), True, 'import numpy as np\n'), ((6375, 6387), 'scipp.Dataset', 'sc.Dataset', ([], {}), '()\n', (6385, 6387), True, 'import scipp as sc\n'), ((6425, 6441), 'scipp.Variable', 'sc.Variable', (['(1.0)'], {}), '(1.0)\n', (6436, 6441), True, 'import scipp as sc\n'), ((6499, 6515), 'scipp.Variable', 'sc.Variable', (['(1.0)'], {}), '(1.0)\n', (6510, 6515), True, 'import scipp as sc\n'), ((6960, 6993), 'scipp.identical', 'sc.identical', (["d['x', 1]", 'expected'], {}), "(d['x', 1], expected)\n", (6972, 6993), True, 'import scipp as sc\n'), ((7602, 7614), 'scipp.Dataset', 'sc.Dataset', ([], {}), '()\n', (7612, 7614), True, 'import scipp as sc\n'), ((7749, 7765), 'scipp.Variable', 'sc.Variable', (['(1.5)'], {}), '(1.5)\n', (7760, 7765), True, 'import scipp as sc\n'), ((8025, 8066), 'scipp.identical', 'sc.identical', (["d['x', 1]['z', 5]", 'expected'], {}), "(d['x', 1]['z', 5], expected)\n", (8037, 8066), True, 'import scipp as sc\n'), ((10287, 10319), 'scipp.identical', 'sc.identical', (['result', "d_ref['a']"], {}), "(result, d_ref['a'])\n", (10299, 10319), True, 'import scipp as sc\n'), ((10505, 10526), 'scipp.Dataset', 'sc.Dataset', (["{'a': da}"], {}), "({'a': da})\n", (10515, 10526), True, 'import scipp as sc\n'), ((11152, 11184), 'scipp.identical', 'sc.identical', (['result', "d_ref['a']"], {}), "(result, d_ref['a'])\n", (11164, 11184), True, 'import scipp as sc\n'), ((11407, 11428), 'scipp.Dataset', 'sc.Dataset', (["{'a': da}"], {}), "({'a': da})\n", (11417, 11428), True, 'import scipp as sc\n'), ((12370, 12406), 'scipp.DataArray', 'sc.DataArray', (['var'], {'masks': "{'m': mask}"}), "(var, masks={'m': mask})\n", (12382, 12406), True, 'import scipp as sc\n'), ((12769, 12805), 'scipp.DataArray', 'sc.DataArray', (['var'], {'masks': "{'m': mask}"}), "(var, masks={'m': mask})\n", (12781, 12805), True, 'import scipp as sc\n'), ((13122, 13136), 'scipp.merge', 'sc.merge', (['a', 'b'], {}), '(a, b)\n', (13130, 13136), True, 'import scipp as sc\n'), ((13148, 13178), 'scipp.identical', 'sc.identical', (["a['d1']", "c['d1']"], {}), "(a['d1'], c['d1'])\n", (13160, 13178), True, 'import scipp as sc\n'), ((13190, 13220), 'scipp.identical', 'sc.identical', (["b['d2']", "c['d2']"], {}), "(b['d2'], c['d2'])\n", (13202, 13220), True, 'import scipp as sc\n'), ((13588, 13613), 'scipp.concatenate', 'sc.concatenate', (['a', 'b', '"""x"""'], {}), "(a, b, 'x')\n", (13602, 13613), True, 'import scipp as sc\n'), ((14849, 14861), 'scipp.Dataset', 'sc.Dataset', ([], {}), '()\n', (14859, 14861), True, 'import scipp as sc\n'), ((14895, 14935), 'scipp.identical', 'sc.identical', (["d3['b'].data", "d1['a'].data"], {}), "(d3['b'].data, d1['a'].data)\n", (14907, 14935), True, 'import scipp as sc\n'), ((15035, 15075), 'scipp.identical', 'sc.identical', (["d2['a'].data", "d1['a'].data"], {}), "(d2['a'].data, d1['a'].data)\n", (15047, 15075), True, 'import scipp as sc\n'), ((15087, 15127), 'scipp.identical', 'sc.identical', (["d2['a'].data", "d1['c'].data"], {}), "(d2['a'].data, d1['c'].data)\n", (15099, 15127), True, 'import scipp as sc\n'), ((15137, 15149), 'scipp.Dataset', 'sc.Dataset', ([], {}), '()\n', (15147, 15149), True, 'import scipp as sc\n'), ((15463, 15533), 'scipp.Dataset', 'sc.Dataset', (["{'a': d1['a'].data}"], {'coords': "{'row': d1['a'].coords['row']}"}), "({'a': d1['a'].data}, coords={'row': d1['a'].coords['row']})\n", (15473, 15533), True, 'import scipp as sc\n'), ((15571, 15583), 'scipp.Dataset', 'sc.Dataset', ([], {}), '()\n', (15581, 15583), True, 'import scipp as sc\n'), ((15908, 15934), 'scipp.identical', 'sc.identical', (['d2', 'expected'], {}), '(d2, expected)\n', (15920, 15934), True, 'import scipp as sc\n'), ((16183, 16202), 'scipp.identical', 'sc.identical', (['d', 'd2'], {}), '(d, d2)\n', (16195, 16202), True, 'import scipp as sc\n'), ((16532, 16554), 'scipp.identical', 'sc.identical', (['ds', 'orig'], {}), '(ds, orig)\n', (16544, 16554), True, 'import scipp as sc\n'), ((16936, 16957), 'scipp.identical', 'sc.identical', (['d', 'copy'], {}), '(d, copy)\n', (16948, 16957), True, 'import scipp as sc\n'), ((17216, 17237), 'scipp.identical', 'sc.identical', (['d', 'copy'], {}), '(d, copy)\n', (17228, 17237), True, 'import scipp as sc\n'), ((17533, 17554), 'scipp.identical', 'sc.identical', (['d', 'copy'], {}), '(d, copy)\n', (17545, 17554), True, 'import scipp as sc\n'), ((17798, 17828), 'scipp.identical', 'sc.identical', (["d['sum']", "d['a']"], {}), "(d['sum'], d['a'])\n", (17810, 17828), True, 'import scipp as sc\n'), ((19157, 19215), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["d['a'].variances", 'variances'], {}), "(d['a'].variances, variances)\n", (19186, 19215), True, 'import numpy as np\n'), ((20882, 20923), 'scipp.Dataset', 'sc.Dataset', (["{'a': var}"], {'coords': "{'x': var}"}), "({'a': var}, coords={'x': var})\n", (20892, 20923), True, 'import scipp as sc\n'), ((20964, 20980), 'scipp.Variable', 'sc.Variable', (['(1.0)'], {}), '(1.0)\n', (20975, 20980), True, 'import scipp as sc\n'), ((21035, 21056), 'scipp.identical', 'sc.identical', (['dref', 'd'], {}), '(dref, d)\n', (21047, 21056), True, 'import scipp as sc\n'), ((21149, 21190), 'scipp.Dataset', 'sc.Dataset', (["{'a': var}"], {'coords': "{'x': var}"}), "({'a': var}, coords={'x': var})\n", (21159, 21190), True, 'import scipp as sc\n'), ((21429, 21450), 'scipp.identical', 'sc.identical', (['d', 'dref'], {}), '(d, dref)\n', (21441, 21450), True, 'import scipp as sc\n'), ((21559, 21600), 'scipp.Dataset', 'sc.Dataset', (["{'a': var}"], {'coords': "{'x': var}"}), "({'a': var}, coords={'x': var})\n", (21569, 21600), True, 'import scipp as sc\n'), ((21731, 21752), 'scipp.identical', 'sc.identical', (['d', 'dref'], {}), '(d, dref)\n', (21743, 21752), True, 'import scipp as sc\n'), ((21839, 21860), 'scipp.Dataset', 'sc.Dataset', (["{'a': v1}"], {}), "({'a': v1})\n", (21849, 21860), True, 'import scipp as sc\n'), ((21995, 22007), 'scipp.Dataset', 'sc.Dataset', ([], {}), '()\n', (22005, 22007), True, 'import scipp as sc\n'), ((22321, 22354), 'scipp.rebin', 'sc.rebin', (['dataset', '"""x"""', 'new_coord'], {}), "(dataset, 'x', new_coord)\n", (22329, 22354), True, 'import scipp as sc\n'), ((22591, 22615), 'scipp.identical', 'sc.identical', (['orig', 'copy'], {}), '(orig, copy)\n', (22603, 22615), True, 'import scipp as sc\n'), ((22677, 22701), 'scipp.identical', 'sc.identical', (['orig', 'copy'], {}), '(orig, copy)\n', (22689, 22701), True, 'import scipp as sc\n'), ((22749, 22773), 'scipp.identical', 'sc.identical', (['orig', 'copy'], {}), '(orig, copy)\n', (22761, 22773), True, 'import scipp as sc\n'), ((22907, 22919), 'scipp.Dataset', 'sc.Dataset', ([], {}), '()\n', (22917, 22919), True, 'import scipp as sc\n'), ((22933, 22953), 'scipp.Variable', 'sc.Variable', ([], {'value': '(1)'}), '(value=1)\n', (22944, 22953), True, 'import scipp as sc\n'), ((23165, 23177), 'scipp.Dataset', 'sc.Dataset', ([], {}), '()\n', (23175, 23177), True, 'import scipp as sc\n'), ((23403, 23439), 'scipp.Variable', 'sc.Variable', (["['x', 'y']"], {'values': 'arr1'}), "(['x', 'y'], values=arr1)\n", (23414, 23439), True, 'import scipp as sc\n'), ((23618, 23638), 'scipp.Variable', 'sc.Variable', ([], {'value': '(1)'}), '(value=1)\n', (23629, 23638), True, 'import scipp as sc\n'), ((23647, 23684), 'scipp.Dataset', 'sc.Dataset', ([], {'data': "{'a': var, 'b': var}"}), "(data={'a': var, 'b': var})\n", (23657, 23684), True, 'import scipp as sc\n'), ((1954, 2026), 'scipp.Dataset', 'sc.Dataset', ([], {'data': "{'a': var1, 'b': var1}", 'coords': "{'x': var1, 'aux': var2}"}), "(data={'a': var1, 'b': var1}, coords={'x': var1, 'aux': var2})\n", (1964, 2026), True, 'import scipp as sc\n'), ((3315, 3331), 'scipp.Variable', 'sc.Variable', (['(1.0)'], {}), '(1.0)\n', (3326, 3331), True, 'import scipp as sc\n'), ((3557, 3584), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (3570, 3584), False, 'import pytest\n'), ((3757, 3784), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (3770, 3784), False, 'import pytest\n'), ((3820, 3836), 'scipp.Variable', 'sc.Variable', (['(1.0)'], {}), '(1.0)\n', (3831, 3836), True, 'import scipp as sc\n'), ((3997, 4013), 'scipp.Variable', 'sc.Variable', (['(1.0)'], {}), '(1.0)\n', (4008, 4013), True, 'import scipp as sc\n'), ((5493, 5523), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0, 1.0]'], {}), '([0.0, 1.0, 0.0, 1.0])\n', (5501, 5523), True, 'import numpy as np\n'), ((5569, 5599), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0, 3.0]'], {}), '([0.0, 1.0, 2.0, 3.0])\n', (5577, 5599), True, 'import numpy as np\n'), ((6112, 6139), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (6125, 6139), False, 'import pytest\n'), ((9708, 9722), 'scipp.sum', 'sc.sum', (['d', '"""y"""'], {}), "(d, 'y')\n", (9714, 9722), True, 'import scipp as sc\n'), ((10256, 10270), 'scipp.sum', 'sc.sum', (['d', '"""x"""'], {}), "(d, 'x')\n", (10262, 10270), True, 'import scipp as sc\n'), ((10568, 10591), 'scipp.Variable', 'sc.Variable', ([], {'value': '(10.0)'}), '(value=10.0)\n', (10579, 10591), True, 'import scipp as sc\n'), ((10617, 10627), 'scipp.sum', 'sc.sum', (['da'], {}), '(da)\n', (10623, 10627), True, 'import scipp as sc\n'), ((11118, 11135), 'scipp.nansum', 'sc.nansum', (['d', '"""x"""'], {}), "(d, 'x')\n", (11127, 11135), True, 'import scipp as sc\n'), ((11532, 11554), 'scipp.Variable', 'sc.Variable', ([], {'value': '(9.0)'}), '(value=9.0)\n', (11543, 11554), True, 'import scipp as sc\n'), ((11580, 11593), 'scipp.nansum', 'sc.nansum', (['da'], {}), '(da)\n', (11589, 11593), True, 'import scipp as sc\n'), ((13663, 13685), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (13671, 13685), True, 'import numpy as np\n'), ((13731, 13757), 'numpy.array', 'np.array', (['[11, 12, 13, 14]'], {}), '([11, 12, 13, 14])\n', (13739, 13757), True, 'import numpy as np\n'), ((18368, 18388), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (18382, 18388), True, 'import numpy as np\n'), ((20472, 20495), 'scipp.sort', 'sc.sort', (['d', "d['b'].data"], {}), "(d, d['b'].data)\n", (20479, 20495), True, 'import scipp as sc\n'), ((21297, 21318), 'scipp.identical', 'sc.identical', (['d', 'dref'], {}), '(d, dref)\n', (21309, 21318), True, 'import scipp as sc\n'), ((21359, 21380), 'scipp.identical', 'sc.identical', (['d', 'dref'], {}), '(d, dref)\n', (21371, 21380), True, 'import scipp as sc\n'), ((21668, 21689), 'scipp.identical', 'sc.identical', (['d', 'dref'], {}), '(d, dref)\n', (21680, 21689), True, 'import scipp as sc\n'), ((22413, 22430), 'numpy.array', 'np.array', (['(5 * [2])'], {}), '(5 * [2])\n', (22421, 22430), True, 'import numpy as np\n'), ((22528, 22547), 'numpy.arange', 'np.arange', (['(0)', '(11)', '(2)'], {}), '(0, 11, 2)\n', (22537, 22547), True, 'import numpy as np\n'), ((22839, 22863), 'scipp.identical', 'sc.identical', (['orig', 'copy'], {}), '(orig, copy)\n', (22851, 22863), True, 'import scipp as sc\n'), ((23046, 23058), 'copy.copy', 'copy.copy', (['a'], {}), '(a)\n', (23055, 23058), False, 'import copy\n'), ((23084, 23100), 'copy.deepcopy', 'copy.deepcopy', (['a'], {}), '(a)\n', (23097, 23100), False, 'import copy\n'), ((928, 940), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (937, 940), True, 'import numpy as np\n'), ((981, 993), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (990, 993), True, 'import numpy as np\n'), ((1442, 1454), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (1451, 1454), True, 'import numpy as np\n'), ((1663, 1675), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (1672, 1675), True, 'import numpy as np\n'), ((1719, 1729), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (1726, 1729), True, 'import numpy as np\n'), ((2245, 2257), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (2254, 2257), True, 'import numpy as np\n'), ((2302, 2314), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (2311, 2314), True, 'import numpy as np\n'), ((2629, 2641), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (2638, 2641), True, 'import numpy as np\n'), ((2686, 2698), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (2695, 2698), True, 'import numpy as np\n'), ((3111, 3123), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (3120, 3123), True, 'import numpy as np\n'), ((3684, 3696), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (3693, 3696), True, 'import numpy as np\n'), ((4475, 4487), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (4484, 4487), True, 'import numpy as np\n'), ((4911, 4923), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (4920, 4923), True, 'import numpy as np\n'), ((5293, 5307), 'numpy.arange', 'np.arange', (['(4.0)'], {}), '(4.0)\n', (5302, 5307), True, 'import numpy as np\n'), ((5344, 5358), 'numpy.arange', 'np.arange', (['(4.0)'], {}), '(4.0)\n', (5353, 5358), True, 'import numpy as np\n'), ((5761, 5773), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (5770, 5773), True, 'import numpy as np\n'), ((6089, 6101), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (6098, 6101), True, 'import numpy as np\n'), ((6689, 6705), 'scipp.Variable', 'sc.Variable', (['(1.0)'], {}), '(1.0)\n', (6700, 6705), True, 'import scipp as sc\n'), ((6837, 6902), 'scipp.DataArray', 'sc.DataArray', (['(1.0 * sc.units.one)'], {'attrs': "{'x': 1.0 * sc.units.one}"}), "(1.0 * sc.units.one, attrs={'x': 1.0 * sc.units.one})\n", (6849, 6902), True, 'import scipp as sc\n'), ((6925, 6941), 'scipp.Variable', 'sc.Variable', (['(1.0)'], {}), '(1.0)\n', (6936, 6941), True, 'import scipp as sc\n'), ((7063, 7078), 'numpy.arange', 'np.arange', (['(11.0)'], {}), '(11.0)\n', (7072, 7078), True, 'import numpy as np\n'), ((7119, 7134), 'numpy.arange', 'np.arange', (['(11.0)'], {}), '(11.0)\n', (7128, 7134), True, 'import numpy as np\n'), ((7175, 7190), 'numpy.arange', 'np.arange', (['(11.0)'], {}), '(11.0)\n', (7184, 7190), True, 'import numpy as np\n'), ((7698, 7727), 'numpy.arange', 'np.arange', (['(501.0)', '(600.0)', '(10.0)'], {}), '(501.0, 600.0, 10.0)\n', (7707, 7727), True, 'import numpy as np\n'), ((7996, 8011), 'numpy.arange', 'np.arange', (['(11.0)'], {}), '(11.0)\n', (8005, 8011), True, 'import numpy as np\n'), ((8227, 8243), 'scipp.Variable', 'sc.Variable', (['(1.0)'], {}), '(1.0)\n', (8238, 8243), True, 'import scipp as sc\n'), ((8441, 8457), 'scipp.Variable', 'sc.Variable', (['(1.0)'], {}), '(1.0)\n', (8452, 8457), True, 'import scipp as sc\n'), ((9471, 9485), 'scipp.Variable', 'sc.Variable', (['(3)'], {}), '(3)\n', (9482, 9485), True, 'import scipp as sc\n'), ((10098, 10141), 'numpy.array', 'np.array', (['[False, True, False, True, False]'], {}), '([False, True, False, True, False])\n', (10106, 10141), True, 'import numpy as np\n'), ((10551, 10561), 'scipp.sum', 'sc.sum', (['da'], {}), '(da)\n', (10557, 10561), True, 'import scipp as sc\n'), ((10629, 10639), 'scipp.sum', 'sc.sum', (['ds'], {}), '(ds)\n', (10635, 10639), True, 'import scipp as sc\n'), ((10958, 11001), 'numpy.array', 'np.array', (['[False, True, False, True, False]'], {}), '([False, True, False, True, False])\n', (10966, 11001), True, 'import numpy as np\n'), ((11512, 11525), 'scipp.nansum', 'sc.nansum', (['da'], {}), '(da)\n', (11521, 11525), True, 'import scipp as sc\n'), ((11595, 11608), 'scipp.nansum', 'sc.nansum', (['ds'], {}), '(ds)\n', (11604, 11608), True, 'import scipp as sc\n'), ((11905, 11948), 'numpy.array', 'np.array', (['[False, True, False, True, False]'], {}), '([False, True, False, True, False])\n', (11913, 11948), True, 'import numpy as np\n'), ((12021, 12037), 'scipp.Variable', 'sc.Variable', (['(2.0)'], {}), '(2.0)\n', (12032, 12037), True, 'import scipp as sc\n'), ((12064, 12079), 'scipp.mean', 'sc.mean', (['d', '"""x"""'], {}), "(d, 'x')\n", (12071, 12079), True, 'import scipp as sc\n'), ((12122, 12140), 'scipp.nanmean', 'sc.nanmean', (['d', '"""x"""'], {}), "(d, 'x')\n", (12132, 12140), True, 'import scipp as sc\n'), ((12318, 12359), 'numpy.array', 'np.array', (['[[False, False], [True, False]]'], {}), '([[False, False], [True, False]])\n', (12326, 12359), True, 'import numpy as np\n'), ((12717, 12758), 'numpy.array', 'np.array', (['[[False, False], [True, False]]'], {}), '([[False, False], [True, False]])\n', (12725, 12758), True, 'import numpy as np\n'), ((13914, 13930), 'scipp.Variable', 'sc.Variable', (['(1.0)'], {}), '(1.0)\n', (13925, 13930), True, 'import scipp as sc\n'), ((14439, 14455), 'scipp.Variable', 'sc.Variable', (['(1.0)'], {}), '(1.0)\n', (14450, 14455), True, 'import scipp as sc\n'), ((15221, 15236), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (15230, 15236), True, 'import numpy as np\n'), ((15273, 15288), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (15282, 15288), True, 'import numpy as np\n'), ((15336, 15357), 'numpy.arange', 'np.arange', (['(10.0)', '(20.0)'], {}), '(10.0, 20.0)\n', (15345, 15357), True, 'import numpy as np\n'), ((15414, 15429), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (15423, 15429), True, 'import numpy as np\n'), ((15669, 15683), 'numpy.arange', 'np.arange', (['(1.0)'], {}), '(1.0)\n', (15678, 15683), True, 'import numpy as np\n'), ((15727, 15741), 'numpy.arange', 'np.arange', (['(1.0)'], {}), '(1.0)\n', (15736, 15741), True, 'import numpy as np\n'), ((15796, 15817), 'numpy.arange', 'np.arange', (['(10.0)', '(11.0)'], {}), '(10.0, 11.0)\n', (15805, 15817), True, 'import numpy as np\n'), ((15881, 15895), 'numpy.arange', 'np.arange', (['(1.0)'], {}), '(1.0)\n', (15890, 15895), True, 'import numpy as np\n'), ((17028, 17066), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'values': '[10.0]'}), "(dims=['x'], values=[10.0])\n", (17039, 17066), True, 'import scipp as sc\n'), ((17313, 17351), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'values': '[10.0]'}), "(dims=['x'], values=[10.0])\n", (17324, 17351), True, 'import scipp as sc\n'), ((18517, 18533), 'scipp.Variable', 'sc.Variable', (['(1.0)'], {}), '(1.0)\n', (18528, 18533), True, 'import scipp as sc\n'), ((19017, 19029), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (19026, 19029), True, 'import numpy as np\n'), ((19457, 19499), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'values': "['b', 'a']"}), "(dims=['x'], values=['b', 'a'])\n", (19468, 19499), True, 'import scipp as sc\n'), ((20086, 20128), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'values': "['a', 'b']"}), "(dims=['x'], values=['a', 'b'])\n", (20097, 20128), True, 'import scipp as sc\n'), ((20860, 20872), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (20869, 20872), True, 'import numpy as np\n'), ((21127, 21139), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (21136, 21139), True, 'import numpy as np\n'), ((21265, 21280), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {}), '(1, 5)\n', (21274, 21280), True, 'import numpy as np\n'), ((21520, 21549), 'numpy.array', 'np.array', (['[True, True, False]'], {}), '([True, True, False])\n', (21528, 21549), True, 'import numpy as np\n'), ((21810, 21829), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (21818, 21829), True, 'import numpy as np\n'), ((21918, 21937), 'numpy.array', 'np.array', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (21926, 21937), True, 'import numpy as np\n'), ((22090, 22110), 'numpy.array', 'np.array', (['(10 * [1.0])'], {}), '(10 * [1.0])\n', (22098, 22110), True, 'import numpy as np\n'), ((22220, 22235), 'numpy.arange', 'np.arange', (['(11.0)'], {}), '(11.0)\n', (22229, 22235), True, 'import numpy as np\n'), ((22284, 22305), 'numpy.arange', 'np.arange', (['(0.0)', '(11)', '(2)'], {}), '(0.0, 11, 2)\n', (22293, 22305), True, 'import numpy as np\n'), ((520, 538), 'scipp.scalar', 'sc.scalar', ([], {'value': '(1)'}), '(value=1)\n', (529, 538), True, 'import scipp as sc\n'), ((4591, 4606), 'numpy.arange', 'np.arange', (['(2)', '(4)'], {}), '(2, 4)\n', (4600, 4606), True, 'import numpy as np\n'), ((4718, 4733), 'numpy.arange', 'np.arange', (['(6)', '(8)'], {}), '(6, 8)\n', (4727, 4733), True, 'import numpy as np\n'), ((5053, 5075), 'numpy.array', 'np.array', (['[0, 1, 0, 1]'], {}), '([0, 1, 0, 1])\n', (5061, 5075), True, 'import numpy as np\n'), ((5904, 5926), 'numpy.array', 'np.array', (['[0, 3, 2, 3]'], {}), '([0, 3, 2, 3])\n', (5912, 5926), True, 'import numpy as np\n'), ((6319, 6341), 'numpy.array', 'np.array', (['[0, 1, 4, 6]'], {}), '([0, 1, 4, 6])\n', (6327, 6341), True, 'import numpy as np\n'), ((9803, 9818), 'scipp.mean', 'sc.mean', (['d', '"""y"""'], {}), "(d, 'y')\n", (9810, 9818), True, 'import scipp as sc\n'), ((10227, 10238), 'numpy.int64', 'np.int64', (['(6)'], {}), '(6)\n', (10235, 10238), True, 'import numpy as np\n'), ((11087, 11100), 'numpy.float64', 'np.float64', (['(2)'], {}), '(2)\n', (11097, 11100), True, 'import numpy as np\n'), ((11449, 11459), 'scipp.sum', 'sc.sum', (['da'], {}), '(da)\n', (11455, 11459), True, 'import scipp as sc\n'), ((12431, 12441), 'scipp.sum', 'sc.sum', (['da'], {}), '(da)\n', (12437, 12441), True, 'import scipp as sc\n'), ((12484, 12495), 'scipp.mean', 'sc.mean', (['da'], {}), '(da)\n', (12491, 12495), True, 'import scipp as sc\n'), ((12830, 12843), 'scipp.nansum', 'sc.nansum', (['da'], {}), '(da)\n', (12839, 12843), True, 'import scipp as sc\n'), ((12894, 12905), 'scipp.mean', 'sc.mean', (['da'], {}), '(da)\n', (12901, 12905), True, 'import scipp as sc\n'), ((17101, 17137), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'values': '[10]'}), "(dims=['x'], values=[10])\n", (17112, 17137), True, 'import scipp as sc\n'), ((17386, 17422), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'values': '[10]'}), "(dims=['x'], values=[10])\n", (17397, 17422), True, 'import scipp as sc\n'), ((20174, 20233), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'values': '[1.0, 0.0]', 'unit': 'sc.units.m'}), "(dims=['x'], values=[1.0, 0.0], unit=sc.units.m)\n", (20185, 20233), True, 'import scipp as sc\n'), ((20394, 20436), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'values': '[1.0, 0.0]'}), "(dims=['x'], values=[1.0, 0.0])\n", (20405, 20436), True, 'import scipp as sc\n'), ((1040, 1053), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (1049, 1053), True, 'import numpy as np\n'), ((6654, 6669), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (6663, 6669), True, 'import numpy as np\n'), ((8192, 8207), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (8201, 8207), True, 'import numpy as np\n'), ((8406, 8421), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (8415, 8421), True, 'import numpy as np\n'), ((8830, 8858), 'numpy.arange', 'np.arange', (['(3)'], {'dtype': 'np.int64'}), '(3, dtype=np.int64)\n', (8839, 8858), True, 'import numpy as np\n'), ((9418, 9451), 'numpy.array', 'np.array', (['[3, 12]'], {'dtype': 'np.int64'}), '([3, 12], dtype=np.int64)\n', (9426, 9451), True, 'import numpy as np\n'), ((9955, 9996), 'numpy.array', 'np.array', (['[1, 5, 4, 5, 1]'], {'dtype': 'np.int64'}), '([1, 5, 4, 5, 1], dtype=np.int64)\n', (9963, 9996), True, 'import numpy as np\n'), ((10767, 10820), 'numpy.array', 'np.array', (['[1, 5, np.nan, np.nan, 1]'], {'dtype': 'np.float64'}), '([1, 5, np.nan, np.nan, 1], dtype=np.float64)\n', (10775, 10820), True, 'import numpy as np\n'), ((11734, 11759), 'numpy.array', 'np.array', (['[1, 5, 4, 5, 1]'], {}), '([1, 5, 4, 5, 1])\n', (11742, 11759), True, 'import numpy as np\n'), ((12223, 12237), 'numpy.arange', 'np.arange', (['(4.0)'], {}), '(4.0)\n', (12232, 12237), True, 'import numpy as np\n'), ((12583, 12597), 'numpy.arange', 'np.arange', (['(4.0)'], {}), '(4.0)\n', (12592, 12597), True, 'import numpy as np\n'), ((13011, 13030), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (13019, 13030), True, 'import numpy as np\n'), ((13091, 13110), 'numpy.array', 'np.array', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (13099, 13110), True, 'import numpy as np\n'), ((13323, 13341), 'numpy.array', 'np.array', (['[11, 12]'], {}), '([11, 12])\n', (13331, 13341), True, 'import numpy as np\n'), ((13485, 13503), 'numpy.array', 'np.array', (['[13, 14]'], {}), '([13, 14])\n', (13493, 13503), True, 'import numpy as np\n'), ((13874, 13894), 'numpy.random.rand', 'np.random.rand', (['(2)', '(3)'], {}), '(2, 3)\n', (13888, 13894), True, 'import numpy as np\n'), ((14399, 14419), 'numpy.random.rand', 'np.random.rand', (['(2)', '(3)'], {}), '(2, 3)\n', (14413, 14419), True, 'import numpy as np\n'), ((16039, 16054), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (16048, 16054), True, 'import numpy as np\n'), ((16307, 16327), 'numpy.arange', 'np.arange', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (16316, 16327), True, 'import numpy as np\n'), ((16666, 16681), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (16675, 16681), True, 'import numpy as np\n'), ((17653, 17668), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (17662, 17668), True, 'import numpy as np\n'), ((17715, 17726), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (17722, 17726), True, 'import numpy as np\n'), ((17926, 17941), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (17935, 17941), True, 'import numpy as np\n'), ((17992, 18003), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (17999, 18003), True, 'import numpy as np\n'), ((18477, 18497), 'numpy.random.rand', 'np.random.rand', (['(2)', '(3)'], {}), '(2, 3)\n', (18491, 18497), True, 'import numpy as np\n'), ((23218, 23230), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (23227, 23230), True, 'import numpy as np\n'), ((23291, 23303), 'numpy.arange', 'np.arange', (['M'], {}), '(M)\n', (23300, 23303), True, 'import numpy as np\n'), ((4412, 4427), 'numpy.arange', 'np.arange', (['(4)', '(8)'], {}), '(4, 8)\n', (4421, 4427), True, 'import numpy as np\n'), ((4848, 4863), 'numpy.arange', 'np.arange', (['(4)', '(8)'], {}), '(4, 8)\n', (4857, 4863), True, 'import numpy as np\n'), ((5205, 5220), 'numpy.arange', 'np.arange', (['(4)', '(8)'], {}), '(4, 8)\n', (5214, 5220), True, 'import numpy as np\n'), ((5698, 5713), 'numpy.arange', 'np.arange', (['(4)', '(8)'], {}), '(4, 8)\n', (5707, 5713), True, 'import numpy as np\n'), ((6026, 6041), 'numpy.arange', 'np.arange', (['(4)', '(8)'], {}), '(4, 8)\n', (6035, 6041), True, 'import numpy as np\n'), ((6769, 6784), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (6778, 6784), True, 'import numpy as np\n'), ((8307, 8322), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (8316, 8322), True, 'import numpy as np\n'), ((8521, 8536), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (8530, 8536), True, 'import numpy as np\n'), ((8948, 8976), 'numpy.arange', 'np.arange', (['(2)'], {'dtype': 'np.int64'}), '(2, dtype=np.int64)\n', (8957, 8976), True, 'import numpy as np\n'), ((9039, 9067), 'numpy.arange', 'np.arange', (['(3)'], {'dtype': 'np.int64'}), '(3, dtype=np.int64)\n', (9048, 9067), True, 'import numpy as np\n'), ((9266, 9294), 'numpy.arange', 'np.arange', (['(2)'], {'dtype': 'np.int64'}), '(2, dtype=np.int64)\n', (9275, 9294), True, 'import numpy as np\n'), ((9562, 9590), 'numpy.arange', 'np.arange', (['(2)'], {'dtype': 'np.int64'}), '(2, dtype=np.int64)\n', (9571, 9590), True, 'import numpy as np\n'), ((9642, 9670), 'numpy.arange', 'np.arange', (['(2)'], {'dtype': 'np.int64'}), '(2, dtype=np.int64)\n', (9651, 9670), True, 'import numpy as np\n'), ((9743, 9758), 'scipp.mean', 'sc.mean', (['d', '"""y"""'], {}), "(d, 'y')\n", (9750, 9758), True, 'import scipp as sc\n'), ((10395, 10406), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (10402, 10406), True, 'import numpy as np\n'), ((11263, 11274), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (11270, 11274), True, 'import numpy as np\n'), ((13397, 13413), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (13405, 13413), True, 'import numpy as np\n'), ((13559, 13575), 'numpy.array', 'np.array', (['[3, 4]'], {}), '([3, 4])\n', (13567, 13575), True, 'import numpy as np\n'), ((14036, 14050), 'numpy.arange', 'np.arange', (['(2.0)'], {}), '(2.0)\n', (14045, 14050), True, 'import numpy as np\n'), ((14176, 14190), 'numpy.arange', 'np.arange', (['(3.0)'], {}), '(3.0)\n', (14185, 14190), True, 'import numpy as np\n'), ((14289, 14301), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (14298, 14301), True, 'import numpy as np\n'), ((14561, 14575), 'numpy.arange', 'np.arange', (['(2.0)'], {}), '(2.0)\n', (14570, 14575), True, 'import numpy as np\n'), ((14701, 14715), 'numpy.arange', 'np.arange', (['(3.0)'], {}), '(3.0)\n', (14710, 14715), True, 'import numpy as np\n'), ((14814, 14826), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (14823, 14826), True, 'import numpy as np\n'), ((16110, 16125), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (16119, 16125), True, 'import numpy as np\n'), ((16382, 16402), 'numpy.arange', 'np.arange', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (16391, 16402), True, 'import numpy as np\n'), ((16737, 16752), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (16746, 16752), True, 'import numpy as np\n'), ((18068, 18081), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (18077, 18081), True, 'import numpy as np\n'), ((18642, 18656), 'numpy.arange', 'np.arange', (['(2.0)'], {}), '(2.0)\n', (18651, 18656), True, 'import numpy as np\n'), ((18786, 18800), 'numpy.arange', 'np.arange', (['(3.0)'], {}), '(3.0)\n', (18795, 18800), True, 'import numpy as np\n'), ((18901, 18918), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (18915, 18918), True, 'import numpy as np\n'), ((19605, 19619), 'numpy.arange', 'np.arange', (['(2.0)'], {}), '(2.0)\n', (19614, 19619), True, 'import numpy as np\n'), ((19745, 19759), 'numpy.arange', 'np.arange', (['(3.0)'], {}), '(3.0)\n', (19754, 19759), True, 'import numpy as np\n'), ((19858, 19872), 'numpy.arange', 'np.arange', (['(2.0)'], {}), '(2.0)\n', (19867, 19872), True, 'import numpy as np\n'), ((20312, 20326), 'numpy.arange', 'np.arange', (['(3.0)'], {}), '(3.0)\n', (20321, 20326), True, 'import numpy as np\n'), ((23335, 23351), 'numpy.arange', 'np.arange', (['(N * M)'], {}), '(N * M)\n', (23344, 23351), True, 'import numpy as np\n'), ((7316, 7333), 'numpy.arange', 'np.arange', (['(1000.0)'], {}), '(1000.0)\n', (7325, 7333), True, 'import numpy as np\n'), ((7445, 7470), 'numpy.arange', 'np.arange', (['(0.0)', '(10.0)', '(0.1)'], {}), '(0.0, 10.0, 0.1)\n', (7454, 7470), True, 'import numpy as np\n'), ((8725, 8753), 'numpy.arange', 'np.arange', (['(6)'], {'dtype': 'np.int64'}), '(6, dtype=np.int64)\n', (8734, 8753), True, 'import numpy as np\n'), ((19411, 19423), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (19420, 19423), True, 'import numpy as np\n'), ((9160, 9188), 'numpy.arange', 'np.arange', (['(6)'], {'dtype': 'np.int64'}), '(6, dtype=np.int64)\n', (9169, 9188), True, 'import numpy as np\n'), ((20019, 20031), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (20028, 20031), True, 'import numpy as np\n')] |
import numpy as np
try:
import PyQt4.QtGui as QtGui
import PyQt4.QtCore as QtCore
except:
import PyQt5.QtGui as QtGui
import PyQt5.QtCore as QtCore
class SetFittingVariablesHandler(object):
colorscale_nbr_row = 15
colorscale_cell_size = {'width': 75,
'height': 30}
def __init__(self, parent=None):
self.parent = parent
def get_variable_selected(self):
if self.parent.fitting_set_variables_ui.ui.d_spacing_button.isChecked():
return 'd_spacing'
elif self.parent.fitting_set_variables_ui.ui.sigma_button.isChecked():
return 'sigma'
elif self.parent.fitting_set_variables_ui.ui.alpha_button.isChecked():
return 'alpha'
elif self.parent.fitting_set_variables_ui.ui.a1_button.isChecked():
return 'a1'
elif self.parent.fitting_set_variables_ui.ui.a2_button.isChecked():
return 'a2'
elif self.parent.fitting_set_variables_ui.ui.a5_button.isChecked():
return 'a5'
elif self.parent.fitting_set_variables_ui.ui.a6_button.isChecked():
return 'a6'
def populate_table_with_variable(self, variable='d_spacing'):
array_2d_values = self.create_array_of_variable(variable=variable)
# retrieve min and max values
if np.isnan(array_2d_values).any():
min_value = np.NaN
max_value = np.NaN
mid_point = np.NaN
else:
min_value = np.nanmin(array_2d_values)
max_value = np.nanmax(array_2d_values)
mid_point = np.mean([min_value, max_value])
# define colorscale table
self.initialize_colorscale_table(min_value=min_value, max_value=max_value)
[nbr_row, nbr_column] = np.shape(array_2d_values)
for _row in np.arange(nbr_row):
for _col in np.arange(nbr_column):
_value = array_2d_values[_row, _col]
_color = self.get_color_for_this_value(min_value = min_value,
max_value = max_value,
value = _value)
_item = QtGui.QTableWidgetItem(str(_value))
_item.setBackgroundColor(_color)
bin_index = _row + nbr_row * _col
if self.is_bin_locked(bin_index = bin_index):
_gradient = QtGui.QRadialGradient(10, 10, 10, 20, 20)
_gradient.setColorAt(1, _color)
if self.is_bin_fixed(bin_index = bin_index, variable_name=variable):
_gradient.setColorAt(0.5, QtGui.QColor(255, 255, 255))
_gradient.setColorAt(0, QtGui.QColor(255,0,0, alpha=255))
_item.setBackground(QtGui.QBrush(_gradient))
elif self.is_bin_activated(bin_index = bin_index):
_gradient = QtGui.QRadialGradient(10, 10, 10, 20, 20)
_gradient.setColorAt(1, _color)
if self.is_bin_fixed(bin_index = bin_index, variable_name=variable):
_gradient.setColorAt(0.5, QtGui.QColor(255, 255, 255))
_gradient.setColorAt(0, QtGui.QColor(0, 0, 255, alpha=255))
_item.setBackground(QtGui.QBrush(_gradient))
else:
if self.is_bin_fixed(bin_index = bin_index, variable_name=variable):
_gradient = QtGui.QRadialGradient(10, 10, 10, 20, 20)
_gradient.setColorAt(1, _color)
_gradient.setColorAt(0, QtGui.QColor(255, 255, 255))
_item.setBackground(QtGui.QBrush(_gradient))
if (_value > mid_point):
_foreground_color = QtGui.QColor(255, 255, 255, alpha=255)
_item.setTextColor(_foreground_color)
self.parent.fitting_set_variables_ui.ui.variable_table.setItem(_row, _col, _item)
def is_bin_fixed(self, bin_index=0, variable_name='d_spacing'):
table_dictionary = self.parent.table_dictionary
return table_dictionary[str(bin_index)][variable_name]['fixed']
def is_bin_locked(self, bin_index=0):
table_dictionary = self.parent.table_dictionary
return table_dictionary[str(bin_index)]['lock']
def is_bin_activated(self, bin_index=0):
table_dictionary = self.parent.table_dictionary
return table_dictionary[str(bin_index)]['active']
def clear_colorscale_table(self):
nbr_row = self.parent.fitting_set_variables_ui.ui.colorscale_table.rowCount()
for _row in np.arange(nbr_row):
self.parent.fitting_set_variables_ui.ui.colorscale_table.removeRow(0)
def initialize_colorscale_table(self, min_value=0, max_value=1):
self.clear_colorscale_table()
nbr_row = self.colorscale_nbr_row
step = (float(max_value) - float(min_value))/(nbr_row-1)
mid_point = np.int(nbr_row / 2)
_row = 0
if (min_value == max_value):
nbr_row = 1
if np.isnan(step):
nbr_row = 1
for _index in np.arange(nbr_row-1, -1, -1):
self.parent.fitting_set_variables_ui.ui.colorscale_table.insertRow(_row)
self.parent.fitting_set_variables_ui.ui.colorscale_table.setRowHeight(_row,
self.colorscale_cell_size['height'])
self.parent.fitting_set_variables_ui.ui.colorscale_table.setColumnWidth(_row,
self.colorscale_cell_size['width'])
if np.isnan(step):
_value = np.NaN
else:
_value = min_value + _index * step
_color = self.get_color_for_this_value(min_value=min_value,
max_value=max_value,
value = _value)
if np.isnan(_value):
_item = QtGui.QTableWidgetItem("nan")
else:
_item = QtGui.QTableWidgetItem("{:04.2f}".format(_value))
_item.setBackgroundColor(_color)
_item.setTextAlignment(QtCore.Qt.AlignRight)
if (_row < mid_point) and (nbr_row != 1):
#font should be changed from black to white
_foreground_color = QtGui.QColor(255, 255, 255, alpha=255)
_item.setTextColor(_foreground_color)
self.parent.fitting_set_variables_ui.ui.colorscale_table.setItem(_row, 0, _item)
_row += 1
def get_color_for_this_value(self, min_value=0, max_value=1, value=0):
if np.isnan(value):
return QtGui.QColor(255, 255, 255, alpha=100) #white
elif max_value == min_value:
return QtGui.QColor(250, 0, 0, alpha=255) #red
_ratio = (1-(float(value) - float(min_value)) / (float(max_value) - float(min_value)))
return QtGui.QColor(0, _ratio*255, 0, alpha=255)
def create_array_of_variable(self, variable='d_spacing'):
table_dictionary = self.parent.table_dictionary
_table_selection = self.parent.fitting_selection
nbr_column = _table_selection['nbr_column']
nbr_row = _table_selection['nbr_row']
_array = np.zeros((nbr_row, nbr_column), dtype=float)
for _entry in table_dictionary.keys():
row_index = table_dictionary[_entry]['row_index']
column_index = table_dictionary[_entry]['column_index']
value = table_dictionary[_entry][variable]['val']
_array[row_index, column_index] = value
return _array
def set_new_value_to_selected_bins(self, selection=[],
variable_name='d_spacing',
variable_value=0,
table_nbr_row=0):
table_dictionary = self.parent.table_dictionary
nbr_row = table_nbr_row
for _select in selection:
_left_column = _select.leftColumn()
_right_column = _select.rightColumn()
_top_row = _select.topRow()
_bottom_row = _select.bottomRow()
for _row in np.arange(_top_row, _bottom_row+1):
for _col in np.arange(_left_column, _right_column+1):
_index = str(_row + _col * nbr_row)
if not table_dictionary[_index]['lock']:
table_dictionary[_index][variable_name]['val'] = float(variable_value)
table_dictionary[_index][variable_name]['err'] = np.NaN
self.parent.fitting_set_variables_ui.ui.variable_table.setRangeSelected(_select, False)
self.parent.table_dictionary = table_dictionary
self.populate_table_with_variable(variable=variable_name) | [
"numpy.mean",
"PyQt5.QtGui.QTableWidgetItem",
"PyQt5.QtGui.QColor",
"PyQt5.QtGui.QBrush",
"PyQt5.QtGui.QRadialGradient",
"numpy.zeros",
"numpy.isnan",
"numpy.nanmax",
"numpy.nanmin",
"numpy.shape",
"numpy.int",
"numpy.arange"
] | [((1834, 1859), 'numpy.shape', 'np.shape', (['array_2d_values'], {}), '(array_2d_values)\n', (1842, 1859), True, 'import numpy as np\n'), ((1880, 1898), 'numpy.arange', 'np.arange', (['nbr_row'], {}), '(nbr_row)\n', (1889, 1898), True, 'import numpy as np\n'), ((4770, 4788), 'numpy.arange', 'np.arange', (['nbr_row'], {}), '(nbr_row)\n', (4779, 4788), True, 'import numpy as np\n'), ((5108, 5127), 'numpy.int', 'np.int', (['(nbr_row / 2)'], {}), '(nbr_row / 2)\n', (5114, 5127), True, 'import numpy as np\n'), ((5235, 5249), 'numpy.isnan', 'np.isnan', (['step'], {}), '(step)\n', (5243, 5249), True, 'import numpy as np\n'), ((5306, 5336), 'numpy.arange', 'np.arange', (['(nbr_row - 1)', '(-1)', '(-1)'], {}), '(nbr_row - 1, -1, -1)\n', (5315, 5336), True, 'import numpy as np\n'), ((6951, 6966), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (6959, 6966), True, 'import numpy as np\n'), ((7249, 7292), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(0)', '(_ratio * 255)', '(0)'], {'alpha': '(255)'}), '(0, _ratio * 255, 0, alpha=255)\n', (7261, 7292), True, 'import PyQt5.QtGui as QtGui\n'), ((7609, 7653), 'numpy.zeros', 'np.zeros', (['(nbr_row, nbr_column)'], {'dtype': 'float'}), '((nbr_row, nbr_column), dtype=float)\n', (7617, 7653), True, 'import numpy as np\n'), ((1533, 1559), 'numpy.nanmin', 'np.nanmin', (['array_2d_values'], {}), '(array_2d_values)\n', (1542, 1559), True, 'import numpy as np\n'), ((1584, 1610), 'numpy.nanmax', 'np.nanmax', (['array_2d_values'], {}), '(array_2d_values)\n', (1593, 1610), True, 'import numpy as np\n'), ((1635, 1666), 'numpy.mean', 'np.mean', (['[min_value, max_value]'], {}), '([min_value, max_value])\n', (1642, 1666), True, 'import numpy as np\n'), ((1924, 1945), 'numpy.arange', 'np.arange', (['nbr_column'], {}), '(nbr_column)\n', (1933, 1945), True, 'import numpy as np\n'), ((5858, 5872), 'numpy.isnan', 'np.isnan', (['step'], {}), '(step)\n', (5866, 5872), True, 'import numpy as np\n'), ((6206, 6222), 'numpy.isnan', 'np.isnan', (['_value'], {}), '(_value)\n', (6214, 6222), True, 'import numpy as np\n'), ((6987, 7025), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {'alpha': '(100)'}), '(255, 255, 255, alpha=100)\n', (6999, 7025), True, 'import PyQt5.QtGui as QtGui\n'), ((8574, 8610), 'numpy.arange', 'np.arange', (['_top_row', '(_bottom_row + 1)'], {}), '(_top_row, _bottom_row + 1)\n', (8583, 8610), True, 'import numpy as np\n'), ((1369, 1394), 'numpy.isnan', 'np.isnan', (['array_2d_values'], {}), '(array_2d_values)\n', (1377, 1394), True, 'import numpy as np\n'), ((6248, 6277), 'PyQt5.QtGui.QTableWidgetItem', 'QtGui.QTableWidgetItem', (['"""nan"""'], {}), "('nan')\n", (6270, 6277), True, 'import PyQt5.QtGui as QtGui\n'), ((6622, 6660), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {'alpha': '(255)'}), '(255, 255, 255, alpha=255)\n', (6634, 6660), True, 'import PyQt5.QtGui as QtGui\n'), ((7090, 7124), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(250)', '(0)', '(0)'], {'alpha': '(255)'}), '(250, 0, 0, alpha=255)\n', (7102, 7124), True, 'import PyQt5.QtGui as QtGui\n'), ((8638, 8680), 'numpy.arange', 'np.arange', (['_left_column', '(_right_column + 1)'], {}), '(_left_column, _right_column + 1)\n', (8647, 8680), True, 'import numpy as np\n'), ((2497, 2538), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(10)', '(10)', '(10)', '(20)', '(20)'], {}), '(10, 10, 10, 20, 20)\n', (2518, 2538), True, 'import PyQt5.QtGui as QtGui\n'), ((3897, 3935), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {'alpha': '(255)'}), '(255, 255, 255, alpha=255)\n', (3909, 3935), True, 'import PyQt5.QtGui as QtGui\n'), ((2803, 2837), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(0)', '(0)'], {'alpha': '(255)'}), '(255, 0, 0, alpha=255)\n', (2815, 2837), True, 'import PyQt5.QtGui as QtGui\n'), ((2877, 2900), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['_gradient'], {}), '(_gradient)\n', (2889, 2900), True, 'import PyQt5.QtGui as QtGui\n'), ((3001, 3042), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(10)', '(10)', '(10)', '(20)', '(20)'], {}), '(10, 10, 10, 20, 20)\n', (3022, 3042), True, 'import PyQt5.QtGui as QtGui\n'), ((2730, 2757), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (2742, 2757), True, 'import PyQt5.QtGui as QtGui\n'), ((3307, 3341), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(0)', '(0)', '(255)'], {'alpha': '(255)'}), '(0, 0, 255, alpha=255)\n', (3319, 3341), True, 'import PyQt5.QtGui as QtGui\n'), ((3383, 3406), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['_gradient'], {}), '(_gradient)\n', (3395, 3406), True, 'import PyQt5.QtGui as QtGui\n'), ((3555, 3596), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(10)', '(10)', '(10)', '(20)', '(20)'], {}), '(10, 10, 10, 20, 20)\n', (3576, 3596), True, 'import PyQt5.QtGui as QtGui\n'), ((3234, 3261), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (3246, 3261), True, 'import PyQt5.QtGui as QtGui\n'), ((3701, 3728), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (3713, 3728), True, 'import PyQt5.QtGui as QtGui\n'), ((3774, 3797), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['_gradient'], {}), '(_gradient)\n', (3786, 3797), True, 'import PyQt5.QtGui as QtGui\n')] |
"""
Utility functions for dealing with Human3.6M dataset.
Some functions are adapted from https://github.com/una-dinosauria/3d-pose-baseline
"""
import os
import numpy as np
import copy
import logging
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import libs.dataset.h36m.cameras as cameras
import libs.dataset.h36m.pth_dataset as dataset
# Human3.6m IDs for training and testing
TRAIN_SUBJECTS = [1, 5, 6, 7, 8]
TEST_SUBJECTS = [9, 11]
# Use camera coordinate system
camera_frame = True
# Joint names in H3.6M -- data has 32 joints, but only 17 that move;
# these are the indices.
H36M_NAMES = ['']*32
H36M_NAMES[0] = 'Hip'
H36M_NAMES[1] = 'RHip'
H36M_NAMES[2] = 'RKnee'
H36M_NAMES[3] = 'RFoot'
H36M_NAMES[6] = 'LHip'
H36M_NAMES[7] = 'LKnee'
H36M_NAMES[8] = 'LFoot'
H36M_NAMES[12] = 'Spine'
H36M_NAMES[13] = 'Thorax'
H36M_NAMES[14] = 'Neck/Nose'
H36M_NAMES[15] = 'Head'
H36M_NAMES[17] = 'LShoulder'
H36M_NAMES[18] = 'LElbow'
H36M_NAMES[19] = 'LWrist'
H36M_NAMES[25] = 'RShoulder'
H36M_NAMES[26] = 'RElbow'
H36M_NAMES[27] = 'RWrist'
parent_indices = np.array([0, 1, 2, 0, 6, 7, 0, 12, 13, 14, 13, 17, 18, 13, 25, 26])
children_indices = np.array([1, 2, 3, 6, 7, 8, 12, 13, 14, 15, 17, 18, 19, 25, 26, 27])
# Stacked Hourglass produces 16 joints. These are the names.
SH_NAMES = ['']*16
SH_NAMES[0] = 'RFoot'
SH_NAMES[1] = 'RKnee'
SH_NAMES[2] = 'RHip'
SH_NAMES[3] = 'LHip'
SH_NAMES[4] = 'LKnee'
SH_NAMES[5] = 'LFoot'
SH_NAMES[6] = 'Hip'
SH_NAMES[7] = 'Spine'
SH_NAMES[8] = 'Thorax'
SH_NAMES[9] = 'Head'
SH_NAMES[10] = 'RWrist'
SH_NAMES[11] = 'RElbow'
SH_NAMES[12] = 'RShoulder'
SH_NAMES[13] = 'LShoulder'
SH_NAMES[14] = 'LElbow'
SH_NAMES[15] = 'LWrist'
# The .h5 suffix in pose sequence name is just inherited from the original
# naming convention. The '.sh' suffix means stacked hourglass key-point detector
# used in previous works. Here we just use '.sh' to represent key-points obtained
# from any heat-map regression model. We used high-resolution net instead of
# stacked-hourglass model.
def list_remove(list_a, list_b):
"""
Fine all elements of a list A that does not exist in list B.
Args
list_a: list A
list_b: list B
Returns
list_c: result
"""
list_c = []
for item in list_a:
if item not in list_b:
list_c.append(item)
return list_c
def add_virtual_cams(cams, visualize=False):
"""
Deprecated. Add virtual cameras.
"""
# add more cameras to the scene
#R, T, f, c, k, p, name = cams[ (1,1) ]
# plot the position of human subjects
old_cam_num = 4
def add_coordinate_system(ax, origin, system, length=300, new=False):
# draw a coordinate system at a specified origin
origin = origin.reshape(3, 1)
start_points = np.repeat(origin, 3, axis=1)
# system: [v1, v2, v3]
end_points = start_points + system*length
color = ['g', 'y', 'k'] # color for v1, v2 and v3
if new:
color = ['b', 'r', 'g']
def get_args(start_points, end_points):
x = [start_points[0], end_points[0]]
y = [start_points[1], end_points[1]]
z = [start_points[2], end_points[2]]
return x, y, z
for i in range(3):
x, y, z = get_args(start_points[:,i], end_points[:,i])
ax.plot(x, y, z, lw=2, c=color[i])
return
def get_new_camera(system, center, rotation = [0,0,90.]):
from scipy.spatial.transform import Rotation as Rotation
center = center.reshape(3, 1)
start_points = np.repeat(center, 3, axis=1)
end_points = start_points + system
r = Rotation.from_euler('xyz', rotation, degrees=True)
start_points_new = r.as_dcm() @ start_points
end_points_new = r.as_dcm() @ end_points
new_system = [(end_points_new[:,i] - start_points_new[:,i]).reshape(3,1) for i in range(3)]
new_system = np.hstack(new_system)
return new_system, start_points_new[:,0]
# the new cameras are added by rotating one existing camera
# TODO: more rotations
new_cams = cams.copy()
for key in cams.keys():
subject, camera_idx = key
if camera_idx != 1: # only rotate the first camera
continue
R, T, f, c, k, p, name = cams[key]
angles = [80., 130., 270., 320.]
for angle_idx in range(len(angles)):
angle = angles[angle_idx]
new_R, new_T = get_new_camera(R.T, T, [0., 0., angle])
new_cams[(subject, old_cam_num + angle_idx + 1)]\
= (new_R.T, new_T.reshape(3,1), f, c, k, p, name+'new'+str(angle_idx+1))
# visualize cameras used
if visualize:
train_set_3d = np.load('../data/human3.6M/h36m/numpy/threeDPose_train.npy').item()
test_set_3d = np.load('../data/human3.6M/h36m/numpy/threeDPose_test.npy').item()
hips_train = np.vstack(list(train_set_3d.values()))
hips_test = np.vstack(list(test_set_3d.values()))
ax = plt.subplot(111, projection='3d')
chosen = np.random.choice(len(hips_train), 1000, replace=False)
chosen_hips = hips_train[chosen, :3]
ax.plot(chosen_hips[:,0], chosen_hips[:,1], chosen_hips[:,2], 'bo')
chosen = np.random.choice(len(hips_test), 1000, replace=False)
chosen_hips = hips_test[chosen, :3]
ax.plot(chosen_hips[:,0], chosen_hips[:,1], chosen_hips[:,2], 'ro')
ax.set_xlabel("x");ax.set_ylabel("y");ax.set_zlabel("z")
plt.title('Blue dots: Hip positions in the h36m training set. \
Red dots: testing set. \
Old camera coordinates: x-green, y-yellow, z-black \
New camera coordinates: x-blue, y-red, z-green')
plt.pause(0.1)
for key in new_cams.keys():
R, T, f, c, k, p, name = new_cams[key]
# R gives camera basis vectors row-by-row, T gives camera center
if 'new' in name:
new = True
else:
new = False
add_coordinate_system(ax, T, R.T, new=new)
RADIUS = 3000 # space around the subject
xroot, yroot, zroot = 0., 0., 500.
ax.set_xlim3d([-RADIUS+xroot, RADIUS+xroot])
ax.set_zlim3d([-RADIUS+zroot, RADIUS+zroot])
ax.set_ylim3d([-RADIUS+yroot, RADIUS+yroot])
ax.set_aspect("equal")
return new_cams
def down_sample_training_data(train_dict, opt):
"""
Down-sample the training data.
Args
train_dict: python dictionary contraining the training data
opt: experiment options
Returns
train_dict/sampled_dict: a dictionary containing a subset of training data
"""
if opt.ws_name in ['S1', 'S15', 'S156']:
sub_list = [int(opt.ws_name[i]) for i in range(1, len(opt.ws_name))]
keys_to_delete = []
for key in train_dict.keys():
if key[0] not in sub_list:
keys_to_delete.append(key)
for key in keys_to_delete:
del train_dict[key]
return train_dict
elif opt.ws_name in ['0.001S1','0.01S1', '0.05S1', '0.1S1', '0.5S1']:
ratio = float(opt.ws_name.split('S')[0])
# randomly sample a portion of 3D data
sampled_dict = {}
for key in train_dict.keys():
if key[0] != 1:
continue
total = len(train_dict[key])
sampled_num = int(ratio*total)
chosen_indices = np.random.choice(total, sampled_num, replace=False)
sampled_dict[key] = train_dict[key][chosen_indices].copy()
return sampled_dict
else:
raise ValueError('Unknown experiment setting.')
def get_train_dict_3d(opt):
"""
Get the training 3d skeletons as a Python dictionary.
Args
opt: experiment options
Returns
train_dict_3d: a dictionary containing training 3d poses
"""
dict_path = os.path.join(opt.data_dir, 'threeDPose_train.npy')
#=========================================================================#
# For real 2D detections, the down-sampling and data augmentation
# are done later in get_train_dict_2d
if opt.twoD_source != 'synthetic':
train_dict_3d = np.load(dict_path).item()
return train_dict_3d
#=========================================================================#
# For synthetic 2D detections (For Protocol P1*), the down-sampling is
# performed here and the data augmentation is assumed to be already done
if opt.evolved_path is not None:
# the data is pre-augmented
train_dict_3d = np.load(opt.evolved_path).item()
elif opt.ws:
# raw training data from Human 3.6M (S15678)
# Down-sample the raw data to simulate an environment with scarce
# training data, which is used in weakly-supervised experiments
train_dict_3d = np.load(dict_path).item()
train_dict_3d = down_sample_training_data(train_dict_3d, opt)
else:
# raw training data from Human 3.6M (S15678)
train_dict_3d = np.load(dict_path).item()
return train_dict_3d
def get_test_dict_3d(opt):
"""
Get the testing 3d skeletons as a Python dictionary.
Args
opt: experiment options
Returns
test_dict_3d: a dictionary containing testing 3d poses
"""
if opt.test_source == 'h36m':
# for h36m
dict_path = os.path.join(opt.data_dir, 'threeDPose_test.npy')
test_dict_3d = np.load(dict_path).item()
else:
raise NotImplementedError
return test_dict_3d
def get_dict_2d(train_dict_3d, test_dict_3d, rcams, ncams, opt):
"""
Prepare 2D training and testing data as Python dictionaries.
Args
train_dict_3d: dictionary containing training 3d poses
test_dict_3d: dictionary containing testing 3d poses
rcams: camera parameters
ncams: number of camera to use
opt: experiment options
Returns
train_dict_2d: a dictionary containing training 2d poses
test_dict_2d: a dictionary containing testing 2d poses
train_dict_3d: the dictionary containing training 3d poses, which may be
updated
"""
if opt.twoD_source == 'synthetic':
# project the 3D key-points to 2D ones
# This type of key-points is used to validate the performance of
# 2D-to-3D networks and the noise of 2D key-point detector is ignored.
# In fact, these 2D key-points are used as ground-truth to train the
# first stage of TAG-net.
if opt.virtual_cams:
ncams *= 2
train_dict_2d = project_to_cameras(train_dict_3d, rcams, ncams=ncams)
test_dict_2d = project_to_cameras(test_dict_3d, rcams, ncams=ncams)
elif opt.twoD_source == 'HRN':
# The 2D key-point detections obtained by the heatmap regression model.
# The model uses high-resolution net as backbone and pixel-shuffle super-resolution
# to regress high-resolution heatmaps.
train_dict_2d = np.load(os.path.join(opt.data_dir, 'twoDPose_HRN_train.npy')).item()
test_dict_2d = np.load(os.path.join(opt.data_dir, 'twoDPose_HRN_test.npy')).item()
def delete(dic, actions):
keys_to_delete = []
for key in dic.keys():
sub, act, name = key
if act not in actions:
keys_to_delete.append(key)
for key in keys_to_delete:
del dic[key]
return dic
def replace(dic, temp):
for key in dic.keys():
sub, act, name = key
temp_key = (sub, act, name[:-3])
synthetic = temp[temp_key]
assert len(dic[key]) == len(synthetic)
indices = np.random.choice(len(synthetic), int(0.5*len(synthetic)), replace=False)
dic[key][indices] = synthetic[indices].copy()
return dic
# # weakly-supervised experiment
def remove_keys(dic, name_list):
keys_to_delete = []
for key in dic.keys():
if key[0] not in name_list:
keys_to_delete.append(key)
for key in keys_to_delete:
del dic[key]
return dic
# down-sample the data for weakly-supervised experiment
if opt.ws and opt.ws_name in ['S1', 'S15', 'S156']:
sub_list = [int(opt.ws_name[i]) for i in range(1, len(opt.ws_name))]
remove_keys(train_dict_3d, sub_list)
remove_keys(train_dict_2d, sub_list)
# data augmentation with evolved data
if opt.evolved_path is not None:
evolved_dict_3d = np.load(opt.evolved_path).item()
evolved_dict_2d = project_to_cameras(evolved_dict_3d, rcams, ncams=ncams)
# combine the synthetic 2D-3D pair with the real 2D-3D pair
train_dict_3d = {**train_dict_3d, **evolved_dict_3d}
train_dict_2d = {**train_dict_2d, **evolved_dict_2d}
return train_dict_2d, test_dict_2d, train_dict_3d
def prepare_data_dict(rcams,
opt,
ncams=4,
predict_14=False,
use_nose=True
):
"""
Prepare 2D and 3D data as Python dictionaries.
Args
rcams: camera parameters
opt: experiment options
ncams: number of camera to use
predict_14: whether to predict 14 joints or not
use_nose: whether to use nose joint or not
Returns
data_dic: a dictionary containing training and testing data
data_stats: statistics computed from training data
"""
assert opt.twoD_source in ['synthetic', 'HRN'], 'Unknown 2D key-point type.'
data_dic = {}
# get 3D skeleton data
train_dict_3d = get_train_dict_3d(opt)
test_dict_3d = get_test_dict_3d(opt)
# get 2D key-point data
train_dict_2d, test_dict_2d, train_dict_3d = get_dict_2d(train_dict_3d,
test_dict_3d,
rcams,
ncams,
opt)
# compute normalization statistics and normalize the 2D data
complete_train_2d = copy.deepcopy(np.vstack(list(train_dict_2d.values())))
data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = \
normalization_stats(complete_train_2d,
dim=2,
norm_twoD=opt.norm_twoD,
use_nose=use_nose)
data_dic['train_set_2d'] = normalize_data(train_dict_2d,
data_mean_2d,
data_std_2d,
dim_to_use_2d,
norm_single = opt.norm_single)
data_dic['test_set_2d'] = normalize_data(test_dict_2d,
data_mean_2d,
data_std_2d,
dim_to_use_2d,
norm_single = opt.norm_single)
# The 3D joint position is represented in the world coordinate,
# which is converted to camera coordinate system as the regression target
train_dict_3d = transform_world_to_camera(train_dict_3d, rcams, ncams=ncams)
test_dict_3d = transform_world_to_camera(test_dict_3d, rcams, ncams=ncams)
# apply 3d post-processing (centering around root)
train_dict_3d, train_root_positions = postprocess_3d(train_dict_3d)
test_dict_3d, test_root_positions = postprocess_3d(test_dict_3d)
# compute normalization statistics and normalize the 3D data
complete_train_3d = copy.deepcopy(np.vstack(list(train_dict_3d.values())))
data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d =\
normalization_stats(complete_train_3d, dim=3, predict_14=predict_14)
data_dic['train_set_3d'] = normalize_data(train_dict_3d,
data_mean_3d,
data_std_3d,
dim_to_use_3d)
data_dic['test_set_3d'] = normalize_data(test_dict_3d,
data_mean_3d,
data_std_3d,
dim_to_use_3d)
# some joints are not used during training
dim_use_2d = list_remove([i for i in range(len(data_mean_2d))],
list(dim_to_ignore_2d))
dim_use_3d = list_remove([i for i in range(len(data_mean_3d))],
list(dim_to_ignore_3d))
# assemble a dictionary for data statistics
data_stats = {'mean_2d':data_mean_2d,
'std_2d':data_std_2d,
'mean_3d':data_mean_3d,
'std_3d':data_std_3d,
'dim_ignore_2d':dim_to_ignore_2d,
'dim_ignore_3d':dim_to_ignore_3d,
'dim_use_2d':dim_use_2d,
'dim_use_3d':dim_use_3d}
return data_dic, data_stats
def select_action(dic_2d, dic_3d, action, twoD_source):
"""
Construct sub-dictionaries by specifying which action to use
Args
dic_2d: dictionary containing 2d poses
dic_3d: dictionary containing 3d poses
action: the action to use
twoD_source: how the key-points are generated (synthetic or real)
Returns
dic_2d_action: sub-dictionary containing 2d poses for the specified action
dic_3d_action: sub-dictionary containing 3d poses for the specified action
"""
dic_2d_action = {}
dic_3d_action = {}
for key in dic_2d.keys():
if key[1] == action:
dic_2d_action[key] = dic_2d[key].copy()
if twoD_source == 'synthetic':
key3d = key
else:
key3d = (key[0], key[1], key[2][:-3])
dic_3d_action[key3d] = dic_3d[key3d].copy()
return dic_2d_action, dic_3d_action
def split_action(dic_2d, dic_3d, actions, camera_frame, opt, input_size, output_size):
"""
Generate a list of datasets for each action.
Args
dic_2d: dictionary containing 2d poses
dic_3d: dictionary containing 3d poses
actions: list of defined actions
camera_frame: use camera coordinate system
opt: experiment options
input_size: input vector length
output_size: output vector length
Returns
action_dataset_list: a list of datasets where each element correspond
to one action
"""
action_dataset_list = []
for act_id in range(len(actions)):
action = actions[act_id]
dic_2d_action, dic_3d_action = select_action(dic_2d, dic_3d, action, opt.twoD_source)
eval_input, eval_output = get_all_data(dic_2d_action,
dic_3d_action,
camera_frame,
norm_twoD=opt.norm_twoD,
input_size=input_size,
output_size=output_size)
action_dataset = dataset.PoseDataset(eval_input,
eval_output,
'eval',
action_name=action,
refine_3d=opt.refine_3d)
action_dataset_list.append(action_dataset)
return action_dataset_list
def normalization_stats(complete_data,
dim,
predict_14=False,
norm_twoD=False,
use_nose=False
):
"""
Computes normalization statistics: mean and stdev, dimensions used and ignored
Args
complete_data: nxd np array with poses
dim. integer={2,3} dimensionality of the data
predict_14. boolean. Whether to use only 14 joints
use_nose: whether to use nose or not
Returns
data_mean: np vector with the mean of the data
data_std: np vector with the standard deviation of the data
dimensions_to_ignore: list of dimensions not used in the model
dimensions_to_use: list of dimensions used in the model
"""
if not dim in [2,3]:
raise(ValueError, 'dim must be 2 or 3')
data_mean = np.mean(complete_data, axis=0)
data_std = np.std(complete_data, axis=0)
# Encodes which 17 (or 14) 2d-3d pairs we are predicting
dimensions_to_ignore = []
if dim == 2:
if not use_nose:
dimensions_to_use = np.where(np.array([x != '' and x != 'Neck/Nose' for x in H36M_NAMES]))[0]
else:
dimensions_to_use = np.where(np.array([x != '' for x in H36M_NAMES]))[0]
if norm_twoD:
dimensions_to_use = np.delete(dimensions_to_use, 0)
dimensions_to_use = np.sort(np.hstack((dimensions_to_use*2,
dimensions_to_use*2+1)))
dimensions_to_ignore = np.delete(np.arange(len(H36M_NAMES)*2),
dimensions_to_use)
else:
dimensions_to_use = np.where(np.array([x != '' for x in H36M_NAMES]))[0]
# hip is deleted
# spine and neck are also deleted if predict_14
dimensions_to_use = np.delete(dimensions_to_use, [0,7,9] if predict_14 else 0)
dimensions_to_use = np.sort(np.hstack((dimensions_to_use*3,
dimensions_to_use*3+1,
dimensions_to_use*3+2)))
dimensions_to_ignore = np.delete(np.arange(len(H36M_NAMES)*3),
dimensions_to_use)
return data_mean, data_std, dimensions_to_ignore, dimensions_to_use
def transform_world_to_camera(poses_set, cams, ncams=4):
"""
Transform 3d poses from world coordinate to camera coordinate system
Args
poses_set: dictionary with 3d poses
cams: dictionary with cameras
ncams: number of cameras per subject
Return:
t3d_camera: dictionary with 3d poses in camera coordinate
"""
t3d_camera = {}
for t3dk in sorted(poses_set.keys()):
subj, action, seqname = t3dk
t3d_world = poses_set[t3dk]
for c in range(ncams):
R, T, f, c, k, p, name = cams[(subj, c+1)]
camera_coord = cameras.world_to_camera_frame(np.reshape(t3d_world, [-1, 3]), R, T)
camera_coord = np.reshape(camera_coord, [-1, len(H36M_NAMES)*3])
sname = seqname[:-3]+"."+name+".h5" # e.g.: Waiting 1.58860488.h5
t3d_camera[(subj, action, sname)] = camera_coord
return t3d_camera
def normalize_data(data, data_mean, data_std, dim_to_use, norm_single=False):
"""
Normalizes a dictionary of poses
Args
data: dictionary where values are
data_mean: np vector with the mean of the data
data_std: np vector with the standard deviation of the data
dim_to_use: list of dimensions to keep in the data
norm_single: whether to perform normalization independently for each
sample
Returns
data_out: dictionary with same keys as data, but values have been normalized
"""
data_out = {}
for key in data.keys():
data[ key ] = data[ key ][ :, dim_to_use ]
if norm_single:
# does not use statistics over the whole dataset
temp = data[key]
temp = temp.reshape(len(temp), -1, 2)
mean_x = np.mean(temp[:,:,0], axis=1).reshape(len(temp), 1)
std_x = np.std(temp[:,:,0], axis=1)
mean_y = np.mean(temp[:,:,1], axis=1).reshape(len(temp), 1)
std_y = np.std(temp[:,:,1], axis=1)
denominator = (0.5*(std_x + std_y)).reshape(len(std_x), 1)
temp[:,:,0] = (temp[:,:,0] - mean_x)/denominator
temp[:,:,1] = (temp[:,:,1] - mean_y)/denominator
data_out[key] = temp.reshape(len(temp), -1)
else:
mu = data_mean[dim_to_use]
stddev = data_std[dim_to_use]
data_out[ key ] = np.divide( (data[key] - mu), stddev )
return data_out
def unNormalizeData(normalized_data, data_mean, data_std, dimensions_to_ignore):
"""
Un-normalizes a matrix whose mean has been substracted and that has been
divided by standard deviation. Some dimensions might also be missing.
Args
normalized_data: nxd matrix to unnormalize
data_mean: np vector with the mean of the data
data_std: np vector with the standard deviation of the data
dimensions_to_ignore: list of dimensions that were removed from the original data
Returns
orig_data: the unnormalized data
"""
T = normalized_data.shape[0] # batch size
D = data_mean.shape[0] # dimensionality
orig_data = np.zeros((T, D), dtype=np.float32)
dimensions_to_use = np.array([dim for dim in range(D)
if dim not in dimensions_to_ignore])
orig_data[:, dimensions_to_use] = normalized_data
# multiply times stdev and add the mean
stdMat = data_std.reshape((1, D))
stdMat = np.repeat(stdMat, T, axis=0)
meanMat = data_mean.reshape((1, D))
meanMat = np.repeat(meanMat, T, axis=0)
orig_data = np.multiply(orig_data, stdMat) + meanMat
return orig_data
def define_actions(action):
"""
Given an action string, returns a list of corresponding actions.
Args
action: String. either "all" or one of the h36m actions
Returns
actions: List of strings. Actions to use.
Raises
ValueError: if the action is not a valid action in Human 3.6M
"""
actions = ["Directions",
"Discussion",
"Eating",
"Greeting",
"Phoning",
"Photo",
"Posing",
"Purchases",
"Sitting",
"SittingDown",
"Smoking",
"Waiting",
"WalkDog",
"Walking",
"WalkTogether"
]
if action == "All" or action == "all":
return actions
if not action in actions:
raise( ValueError, "Unrecognized action: %s" % action )
return [action]
def project_to_cameras(poses_set, cams, ncams=4):
"""
Project 3d poses using camera parameters
Args
poses_set: dictionary containing 3d poses
cams: dictionary containing camera parameters
ncams: number of cameras per subject
Returns
t2d: dictionary with 2d poses
"""
t2d = {}
for t3dk in sorted(poses_set.keys()):
subj, a, seqname = t3dk
t3d = poses_set[t3dk]
for cam in range(ncams):
R, T, f, c, k, p, name = cams[(subj, cam+1)]
pts2d, _, _, _, _ = cameras.project_point_radial(np.reshape(t3d, [-1, 3]), R, T, f, c, k, p)
pts2d = np.reshape(pts2d, [-1, len(H36M_NAMES)*2])
sname = seqname[:-3] + "." + name + ".h5" # e.g.: Waiting 1.58860488.h5
t2d[ (subj, a, sname) ] = pts2d
return t2d
def postprocess_3d(poses_set):
"""
Center 3d points around root
Args
poses_set: dictionary with 3d data
Returns
poses_set: dictionary with 3d data centred around root (center hip) joint
root_positions: dictionary with the original 3d position of each pose
"""
root_positions = {}
for k in poses_set.keys():
# Keep track of the global position
root_positions[k] = copy.deepcopy(poses_set[k][:,:3])
# Remove the root from the 3d position
poses = poses_set[k]
poses = poses - np.tile( poses[:,:3], [1, len(H36M_NAMES)] )
poses_set[k] = poses
return poses_set, root_positions
def postprocess_2d(poses_set):
"""
Center 2d points around root
Args
poses_set: dictionary with 2d data
Returns
poses_set: dictionary with 2d data centred around root (center hip) joint
root_positions: dictionary with the original 2d position of each pose
"""
root_positions = {}
for k in poses_set.keys():
# Keep track of the global position
root_positions[k] = copy.deepcopy(poses_set[k][:,:2])
# Remove the root from the 3d position
poses = poses_set[k]
poses = poses - np.tile( poses[:,:2], [1, len(H36M_NAMES)] )
poses_set[k] = poses
return poses_set, root_positions
def get_all_data(data_x,
data_y,
camera_frame,
norm_twoD=False,
input_size=32,
output_size=48
):
"""
Obtain numpy arrays for network inputs/outputs
Args
data_x: dictionary with 2d inputs
data_y: dictionary with 3d expected outputs
camera_frame: whether the 3d data is in camera coordinates
input_size: input vector length for each sample
output_size: output vector length for each sample
Returns
encoder_inputs: numpy array for the input data
decoder_outputs: numpy array for the output data
"""
if norm_twoD:
input_size -= 2
# Figure out how many frames we have
n = 0
for key2d in data_x.keys():
n2d, _ = data_x[ key2d ].shape
n = n + n2d
encoder_inputs = np.zeros((n, input_size), dtype=np.float32)
decoder_outputs = np.zeros((n, output_size), dtype=np.float32)
# Put all the data into big arrays
idx = 0
for key2d in data_x.keys():
(subj, b, fname) = key2d
# keys should be the same if 3d is in camera coordinates
key3d = key2d if (camera_frame) else (subj, b, '{0}.h5'.format(fname.split('.')[0]))
# '-sh' suffix means detected key-points are used
key3d = (subj, b, fname[:-3]) if fname.endswith('-sh') and camera_frame else key3d
n2d, _ = data_x[ key2d ].shape
encoder_inputs[idx:idx+n2d, :] = data_x[ key2d ]
decoder_outputs[idx:idx+n2d, :] = data_y[ key3d ]
idx = idx + n2d
return encoder_inputs, decoder_outputs
def prepare_dataset(opt):
"""
Prepare PyTorch dataset objects used for training 2D-to-3D deep network
Args
opt: experiment options
Returns
train_dataset: training dataset as PyTorch dataset object
eval_dataset: evaluation dataset as PyTorch dataset object
data_stats: dataset statistics computed from the training dataset
action_eval_list: a list of evaluation dataset objects where each
corresponds to one action
"""
# get relevant paths
data_dir = opt.data_dir
cameras_path = os.path.join(data_dir, 'cameras.npy')
# By default, all actions are used
actions = define_actions(opt.actions)
# load camera parameters to project 3D skeleton
rcams = np.load(cameras_path).item()
# produce more camera views by adding virtual cameras if needed
if opt.virtual_cams:
rcams = add_virtual_cams(rcams)
# first prepare Python dictionary containing 2D and 3D data
data_dic, data_stats = prepare_data_dict(rcams,
opt,
predict_14=False)
input_size = len(data_stats['dim_use_2d'])
output_size = len(data_stats['dim_use_3d'])
# convert Python dictionary to numpy array
train_input, train_output = get_all_data(data_dic['train_set_2d'],
data_dic['train_set_3d'],
camera_frame,
norm_twoD=opt.norm_twoD,
input_size=input_size,
output_size=output_size)
eval_input, eval_output = get_all_data(data_dic['test_set_2d'],
data_dic['test_set_3d'],
camera_frame,
norm_twoD=opt.norm_twoD,
input_size=input_size,
output_size=output_size)
# The Numpy arrays are finally used to initialize the dataset objects
train_dataset = dataset.PoseDataset(train_input,
train_output,
'train',
refine_3d = opt.refine_3d)
eval_dataset = dataset.PoseDataset(eval_input,
eval_output,
'eval',
refine_3d = opt.refine_3d)
# Create a list of dataset objects for action-wise evaluation
action_eval_list = split_action(data_dic['test_set_2d'],
data_dic['test_set_3d'],
actions,
camera_frame,
opt,
input_size=input_size,
output_size=output_size)
return train_dataset, eval_dataset, data_stats, action_eval_list | [
"numpy.hstack",
"numpy.array",
"copy.deepcopy",
"numpy.divide",
"numpy.mean",
"numpy.multiply",
"numpy.repeat",
"scipy.spatial.transform.Rotation.from_euler",
"numpy.reshape",
"numpy.delete",
"libs.dataset.h36m.pth_dataset.PoseDataset",
"numpy.random.choice",
"numpy.std",
"matplotlib.pyplo... | [((1093, 1160), 'numpy.array', 'np.array', (['[0, 1, 2, 0, 6, 7, 0, 12, 13, 14, 13, 17, 18, 13, 25, 26]'], {}), '([0, 1, 2, 0, 6, 7, 0, 12, 13, 14, 13, 17, 18, 13, 25, 26])\n', (1101, 1160), True, 'import numpy as np\n'), ((1180, 1248), 'numpy.array', 'np.array', (['[1, 2, 3, 6, 7, 8, 12, 13, 14, 15, 17, 18, 19, 25, 26, 27]'], {}), '([1, 2, 3, 6, 7, 8, 12, 13, 14, 15, 17, 18, 19, 25, 26, 27])\n', (1188, 1248), True, 'import numpy as np\n'), ((7967, 8017), 'os.path.join', 'os.path.join', (['opt.data_dir', '"""threeDPose_train.npy"""'], {}), "(opt.data_dir, 'threeDPose_train.npy')\n", (7979, 8017), False, 'import os\n'), ((20822, 20852), 'numpy.mean', 'np.mean', (['complete_data'], {'axis': '(0)'}), '(complete_data, axis=0)\n', (20829, 20852), True, 'import numpy as np\n'), ((20870, 20899), 'numpy.std', 'np.std', (['complete_data'], {'axis': '(0)'}), '(complete_data, axis=0)\n', (20876, 20899), True, 'import numpy as np\n'), ((25325, 25359), 'numpy.zeros', 'np.zeros', (['(T, D)'], {'dtype': 'np.float32'}), '((T, D), dtype=np.float32)\n', (25333, 25359), True, 'import numpy as np\n'), ((25640, 25668), 'numpy.repeat', 'np.repeat', (['stdMat', 'T'], {'axis': '(0)'}), '(stdMat, T, axis=0)\n', (25649, 25668), True, 'import numpy as np\n'), ((25723, 25752), 'numpy.repeat', 'np.repeat', (['meanMat', 'T'], {'axis': '(0)'}), '(meanMat, T, axis=0)\n', (25732, 25752), True, 'import numpy as np\n'), ((29866, 29909), 'numpy.zeros', 'np.zeros', (['(n, input_size)'], {'dtype': 'np.float32'}), '((n, input_size), dtype=np.float32)\n', (29874, 29909), True, 'import numpy as np\n'), ((29932, 29976), 'numpy.zeros', 'np.zeros', (['(n, output_size)'], {'dtype': 'np.float32'}), '((n, output_size), dtype=np.float32)\n', (29940, 29976), True, 'import numpy as np\n'), ((31175, 31212), 'os.path.join', 'os.path.join', (['data_dir', '"""cameras.npy"""'], {}), "(data_dir, 'cameras.npy')\n", (31187, 31212), False, 'import os\n'), ((32801, 32886), 'libs.dataset.h36m.pth_dataset.PoseDataset', 'dataset.PoseDataset', (['train_input', 'train_output', '"""train"""'], {'refine_3d': 'opt.refine_3d'}), "(train_input, train_output, 'train', refine_3d=opt.refine_3d\n )\n", (32820, 32886), True, 'import libs.dataset.h36m.pth_dataset as dataset\n'), ((33025, 33102), 'libs.dataset.h36m.pth_dataset.PoseDataset', 'dataset.PoseDataset', (['eval_input', 'eval_output', '"""eval"""'], {'refine_3d': 'opt.refine_3d'}), "(eval_input, eval_output, 'eval', refine_3d=opt.refine_3d)\n", (33044, 33102), True, 'import libs.dataset.h36m.pth_dataset as dataset\n'), ((2814, 2842), 'numpy.repeat', 'np.repeat', (['origin', '(3)'], {'axis': '(1)'}), '(origin, 3, axis=1)\n', (2823, 2842), True, 'import numpy as np\n'), ((3607, 3635), 'numpy.repeat', 'np.repeat', (['center', '(3)'], {'axis': '(1)'}), '(center, 3, axis=1)\n', (3616, 3635), True, 'import numpy as np\n'), ((3691, 3741), 'scipy.spatial.transform.Rotation.from_euler', 'Rotation.from_euler', (['"""xyz"""', 'rotation'], {'degrees': '(True)'}), "('xyz', rotation, degrees=True)\n", (3710, 3741), True, 'from scipy.spatial.transform import Rotation as Rotation\n'), ((3967, 3988), 'numpy.hstack', 'np.hstack', (['new_system'], {}), '(new_system)\n', (3976, 3988), True, 'import numpy as np\n'), ((5055, 5088), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {'projection': '"""3d"""'}), "(111, projection='3d')\n", (5066, 5088), True, 'import matplotlib.pyplot as plt\n'), ((5554, 5802), 'matplotlib.pyplot.title', 'plt.title', (['"""Blue dots: Hip positions in the h36m training set. Red dots: testing set. Old camera coordinates: x-green, y-yellow, z-black New camera coordinates: x-blue, y-red, z-green"""'], {}), "(\n 'Blue dots: Hip positions in the h36m training set. Red dots: testing set. Old camera coordinates: x-green, y-yellow, z-black New camera coordinates: x-blue, y-red, z-green'\n )\n", (5563, 5802), True, 'import matplotlib.pyplot as plt\n'), ((5807, 5821), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (5816, 5821), True, 'import matplotlib.pyplot as plt\n'), ((9476, 9525), 'os.path.join', 'os.path.join', (['opt.data_dir', '"""threeDPose_test.npy"""'], {}), "(opt.data_dir, 'threeDPose_test.npy')\n", (9488, 9525), False, 'import os\n'), ((19560, 19661), 'libs.dataset.h36m.pth_dataset.PoseDataset', 'dataset.PoseDataset', (['eval_input', 'eval_output', '"""eval"""'], {'action_name': 'action', 'refine_3d': 'opt.refine_3d'}), "(eval_input, eval_output, 'eval', action_name=action,\n refine_3d=opt.refine_3d)\n", (19579, 19661), True, 'import libs.dataset.h36m.pth_dataset as dataset\n'), ((21807, 21867), 'numpy.delete', 'np.delete', (['dimensions_to_use', '([0, 7, 9] if predict_14 else 0)'], {}), '(dimensions_to_use, [0, 7, 9] if predict_14 else 0)\n', (21816, 21867), True, 'import numpy as np\n'), ((25769, 25799), 'numpy.multiply', 'np.multiply', (['orig_data', 'stdMat'], {}), '(orig_data, stdMat)\n', (25780, 25799), True, 'import numpy as np\n'), ((28069, 28103), 'copy.deepcopy', 'copy.deepcopy', (['poses_set[k][:, :3]'], {}), '(poses_set[k][:, :3])\n', (28082, 28103), False, 'import copy\n'), ((28747, 28781), 'copy.deepcopy', 'copy.deepcopy', (['poses_set[k][:, :2]'], {}), '(poses_set[k][:, :2])\n', (28760, 28781), False, 'import copy\n'), ((21300, 21331), 'numpy.delete', 'np.delete', (['dimensions_to_use', '(0)'], {}), '(dimensions_to_use, 0)\n', (21309, 21331), True, 'import numpy as np\n'), ((21368, 21429), 'numpy.hstack', 'np.hstack', (['(dimensions_to_use * 2, dimensions_to_use * 2 + 1)'], {}), '((dimensions_to_use * 2, dimensions_to_use * 2 + 1))\n', (21377, 21429), True, 'import numpy as np\n'), ((21902, 21995), 'numpy.hstack', 'np.hstack', (['(dimensions_to_use * 3, dimensions_to_use * 3 + 1, dimensions_to_use * 3 + 2)'], {}), '((dimensions_to_use * 3, dimensions_to_use * 3 + 1, \n dimensions_to_use * 3 + 2))\n', (21911, 21995), True, 'import numpy as np\n'), ((24074, 24103), 'numpy.std', 'np.std', (['temp[:, :, 0]'], {'axis': '(1)'}), '(temp[:, :, 0], axis=1)\n', (24080, 24103), True, 'import numpy as np\n'), ((24194, 24223), 'numpy.std', 'np.std', (['temp[:, :, 1]'], {'axis': '(1)'}), '(temp[:, :, 1], axis=1)\n', (24200, 24223), True, 'import numpy as np\n'), ((24596, 24629), 'numpy.divide', 'np.divide', (['(data[key] - mu)', 'stddev'], {}), '(data[key] - mu, stddev)\n', (24605, 24629), True, 'import numpy as np\n'), ((31358, 31379), 'numpy.load', 'np.load', (['cameras_path'], {}), '(cameras_path)\n', (31365, 31379), True, 'import numpy as np\n'), ((4759, 4819), 'numpy.load', 'np.load', (['"""../data/human3.6M/h36m/numpy/threeDPose_train.npy"""'], {}), "('../data/human3.6M/h36m/numpy/threeDPose_train.npy')\n", (4766, 4819), True, 'import numpy as np\n'), ((4849, 4908), 'numpy.load', 'np.load', (['"""../data/human3.6M/h36m/numpy/threeDPose_test.npy"""'], {}), "('../data/human3.6M/h36m/numpy/threeDPose_test.npy')\n", (4856, 4908), True, 'import numpy as np\n'), ((7512, 7563), 'numpy.random.choice', 'np.random.choice', (['total', 'sampled_num'], {'replace': '(False)'}), '(total, sampled_num, replace=False)\n', (7528, 7563), True, 'import numpy as np\n'), ((8274, 8292), 'numpy.load', 'np.load', (['dict_path'], {}), '(dict_path)\n', (8281, 8292), True, 'import numpy as np\n'), ((8659, 8684), 'numpy.load', 'np.load', (['opt.evolved_path'], {}), '(opt.evolved_path)\n', (8666, 8684), True, 'import numpy as np\n'), ((9550, 9568), 'numpy.load', 'np.load', (['dict_path'], {}), '(dict_path)\n', (9557, 9568), True, 'import numpy as np\n'), ((21653, 21694), 'numpy.array', 'np.array', (["[(x != '') for x in H36M_NAMES]"], {}), "([(x != '') for x in H36M_NAMES])\n", (21661, 21694), True, 'import numpy as np\n'), ((22897, 22927), 'numpy.reshape', 'np.reshape', (['t3d_world', '[-1, 3]'], {}), '(t3d_world, [-1, 3])\n', (22907, 22927), True, 'import numpy as np\n'), ((27374, 27398), 'numpy.reshape', 'np.reshape', (['t3d', '[-1, 3]'], {}), '(t3d, [-1, 3])\n', (27384, 27398), True, 'import numpy as np\n'), ((8942, 8960), 'numpy.load', 'np.load', (['dict_path'], {}), '(dict_path)\n', (8949, 8960), True, 'import numpy as np\n'), ((9126, 9144), 'numpy.load', 'np.load', (['dict_path'], {}), '(dict_path)\n', (9133, 9144), True, 'import numpy as np\n'), ((21074, 21136), 'numpy.array', 'np.array', (["[(x != '' and x != 'Neck/Nose') for x in H36M_NAMES]"], {}), "([(x != '' and x != 'Neck/Nose') for x in H36M_NAMES])\n", (21082, 21136), True, 'import numpy as np\n'), ((21194, 21235), 'numpy.array', 'np.array', (["[(x != '') for x in H36M_NAMES]"], {}), "([(x != '') for x in H36M_NAMES])\n", (21202, 21235), True, 'import numpy as np\n'), ((24003, 24033), 'numpy.mean', 'np.mean', (['temp[:, :, 0]'], {'axis': '(1)'}), '(temp[:, :, 0], axis=1)\n', (24010, 24033), True, 'import numpy as np\n'), ((24123, 24153), 'numpy.mean', 'np.mean', (['temp[:, :, 1]'], {'axis': '(1)'}), '(temp[:, :, 1], axis=1)\n', (24130, 24153), True, 'import numpy as np\n'), ((11116, 11168), 'os.path.join', 'os.path.join', (['opt.data_dir', '"""twoDPose_HRN_train.npy"""'], {}), "(opt.data_dir, 'twoDPose_HRN_train.npy')\n", (11128, 11168), False, 'import os\n'), ((11219, 11270), 'os.path.join', 'os.path.join', (['opt.data_dir', '"""twoDPose_HRN_test.npy"""'], {}), "(opt.data_dir, 'twoDPose_HRN_test.npy')\n", (11231, 11270), False, 'import os\n'), ((12803, 12828), 'numpy.load', 'np.load', (['opt.evolved_path'], {}), '(opt.evolved_path)\n', (12810, 12828), True, 'import numpy as np\n')] |
# This file is part of PyTMM.
#
# PyTMM is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyTMM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
#
#
# Copyright 2014-2015 <NAME> <<EMAIL>>
import numpy
import enum
class Polarization(enum.Enum):
s = 0
p = 1
class TransferMatrix:
"""
Dielectric layer TMM
How the functions eat structure matricies:
| T | | | | | | | | 1 |
| | = | Bottom | | Matrix | | Top | = | |
| 0 | | | | | | | | R |
"""
@staticmethod
def structure(*args):
"""
args - separate structure matricies
Left to Right = Bottom to Top
:param args:
"""
mat = numpy.identity(2, dtype=numpy.complex128)
for m in args:
mat = numpy.dot(m.matrix, mat)
return TransferMatrix(mat)
@staticmethod
def layer(n, d, wavelength, theta=0, pol=Polarization.s):
"""
Creates a Air-DielectricLayer-Air Transfer Matrix
:param n:
:param d:
:param wavelength:
"""
bottomBoundary = TransferMatrix.boundingLayer(1, n, theta, pol)
topBoundary = TransferMatrix.boundingLayer(n, 1, theta, pol)
propagation = TransferMatrix.propagationLayer(n, d, wavelength, theta, pol)
return TransferMatrix.structure(bottomBoundary,
propagation,
topBoundary)
@staticmethod
def boundingLayer(n1, n2, theta=0, pol=Polarization.s):
"""
Creates a DielectricLayer-DielectricLayer Boundary Transfer Matrix
:param n1:
:param n2:
"""
# if numpy.abs((n1/n2)*numpy.sin(theta)) >= 1.0:
# theta2 = numpy.pi/2 * numpy.sign(numpy.sin(theta))
# else:
theta2 = numpy.arcsin((n1/n2)*numpy.sin(theta), dtype=numpy.complex128)
# TE
if pol is Polarization.s:
_n1 = n1*numpy.cos(theta)
_n2 = n2*numpy.cos(theta2)
a21 = 1
# TM
elif pol is Polarization.p:
_n1 = n1/numpy.cos(theta)
_n2 = n2/numpy.cos(theta2)
a21 = numpy.cos(theta2)/numpy.cos(theta)
boundary = 1/(2 * a21 * _n2) *numpy.array([[(_n1 + _n2), (_n2 - _n1)],
[(_n2 - _n1), (_n1 + _n2)]], dtype=numpy.complex128)
return TransferMatrix(boundary)
@staticmethod
def propagationLayer(n, d, wavelength, theta=0, pol=Polarization.s):
"""
Creates a Propagation Transfer Matrix, width d, refractive index n
:param n:
:param d:
:param wavelength:
"""
theta2 = numpy.arcsin((1/n)*numpy.sin(theta), dtype=numpy.complex128)
propagation = numpy.array([[numpy.exp((-1j * n * d * 2 * numpy.pi / wavelength) * numpy.cos(theta2)), 0],
[0, numpy.exp((1j * n * d * 2 * numpy.pi / wavelength) * numpy.cos(theta2))]],
dtype=numpy.complex128)
return TransferMatrix(propagation)
def __init__(self, matrix):
self.matrix = matrix
def invert(self):
"""
Inverts matrix
"""
self.matrix = numpy.linalg.inv(self.matrix)
def appendLeft(self, matrix):
"""
:param matrix:
"""
self.matrix = numpy.dot(matrix.matrix, self.matrix)
def appendRight(self, matrix):
"""
:param matrix:
"""
self.matrix = numpy.dot(self.matrix, matrix.matrix)
def solvePropagation(transferMatrix, incidentField=1.0):
"""Calculate reflectance and transmittance
:param transferMatrix:
:param incidentField:
"""
# res[1] = transmittance, res[0] = reflectance
lhs = numpy.array([[transferMatrix.matrix[0, 1], -1],
[transferMatrix.matrix[1, 1], 0]])
rhs = numpy.array([-transferMatrix.matrix[0, 0], -transferMatrix.matrix[1, 0]])
rhs = numpy.multiply(rhs, incidentField)
res = numpy.linalg.solve(lhs, rhs)
reflectance = res[0]
transmittance = res[1]
return reflectance, transmittance
def findReciprocalTransferMatrix(transmittance, reflectance, bottomMat=TransferMatrix(numpy.identity(2)),
topMat=TransferMatrix(numpy.identity(2))): # , incidentField=1.0
"""
:param transmittance:
:param reflectance:
:param bottomMat:
:param topMat:
:return:
"""
assert transmittance != 0
matrix = numpy.array([[1 / numpy.conj(transmittance), reflectance / transmittance],
[numpy.conj(reflectance / transmittance), 1 / transmittance]])
matrix = numpy.dot(numpy.linalg.inv(bottomMat.matrix), matrix)
matrix = numpy.dot(matrix, numpy.linalg.inv(topMat.matrix))
return TransferMatrix(matrix)
def findReciprocalTransferMatrixLegacy(transmittance, reflectance, bottomMat=TransferMatrix(numpy.identity(2)),
topMat=TransferMatrix(numpy.identity(2))): # , incidentField=1.0
"""
:param transmittance:
:param reflectance:
:param bottomMat:
:param topMat:
:return:
"""
a = numpy.identity(2)
b = numpy.array([[numpy.real(reflectance), numpy.imag(reflectance)],
[numpy.imag(reflectance), -numpy.real(reflectance)]])
lhs = numpy.vstack((numpy.hstack((a, b)), numpy.hstack((b, a))))
rhs = numpy.array([numpy.real(transmittance), numpy.imag(transmittance), 0, 0])
res = numpy.linalg.solve(lhs, rhs)
matrix = numpy.array([[res[0] + 1j * res[1], res[2] - 1j * res[3]],
[res[2] + 1j * res[3], res[0] - 1j * res[1]]])
matrix = numpy.dot(numpy.linalg.inv(bottomMat.matrix), matrix)
matrix = numpy.dot(matrix, numpy.linalg.inv(topMat.matrix))
return TransferMatrix(matrix)
def findGeneralizedTransferMatrix(transmitance1, reflectance1, transmitance2, reflectance2,
bottomMat1=TransferMatrix(numpy.identity(2)),
topMat1=TransferMatrix(numpy.identity(2)),
bottomMat2=TransferMatrix(numpy.identity(2)),
topMat2=TransferMatrix(numpy.identity(2))):
"""
:param transmitance1:
:param reflectance1:
:param transmitance2:
:param reflectance2:
:param bottomMat1:
:param topMat1:
:param bottomMat2:
:param topMat2:
:return:
"""
a12 = numpy.dot(numpy.linalg.inv(bottomMat1.matrix), numpy.array([[transmitance1], [0]]))
a34 = numpy.dot(numpy.linalg.inv(bottomMat2.matrix), numpy.array([[transmitance2], [0]]))
b12 = numpy.dot(topMat1.matrix, numpy.array([[1], [reflectance1]]))
b34 = numpy.dot(topMat2.matrix, numpy.array([[1], [reflectance2]]))
rhs = numpy.array([a12[0, 0], a34[0, 0], a12[1, 0], a34[1, 0]])
bmat = numpy.array([[b12[0, 0], b12[1, 0]],
[b34[0, 0], b34[1, 0]]])
lhs = numpy.vstack((numpy.hstack((bmat, numpy.zeros((2, 2)))),
numpy.hstack((numpy.zeros((2, 2)), bmat))))
res = numpy.linalg.solve(lhs, rhs)
mat = numpy.array([[res[0], res[2]],
[res[1], res[3]]])
return TransferMatrix(mat)
| [
"numpy.identity",
"numpy.multiply",
"numpy.linalg.solve",
"numpy.hstack",
"numpy.conj",
"numpy.array",
"numpy.dot",
"numpy.linalg.inv",
"numpy.real",
"numpy.cos",
"numpy.zeros",
"numpy.sin",
"numpy.imag"
] | [((4361, 4448), 'numpy.array', 'numpy.array', (['[[transferMatrix.matrix[0, 1], -1], [transferMatrix.matrix[1, 1], 0]]'], {}), '([[transferMatrix.matrix[0, 1], -1], [transferMatrix.matrix[1, 1\n ], 0]])\n', (4372, 4448), False, 'import numpy\n'), ((4477, 4550), 'numpy.array', 'numpy.array', (['[-transferMatrix.matrix[0, 0], -transferMatrix.matrix[1, 0]]'], {}), '([-transferMatrix.matrix[0, 0], -transferMatrix.matrix[1, 0]])\n', (4488, 4550), False, 'import numpy\n'), ((4561, 4595), 'numpy.multiply', 'numpy.multiply', (['rhs', 'incidentField'], {}), '(rhs, incidentField)\n', (4575, 4595), False, 'import numpy\n'), ((4606, 4634), 'numpy.linalg.solve', 'numpy.linalg.solve', (['lhs', 'rhs'], {}), '(lhs, rhs)\n', (4624, 4634), False, 'import numpy\n'), ((5774, 5791), 'numpy.identity', 'numpy.identity', (['(2)'], {}), '(2)\n', (5788, 5791), False, 'import numpy\n'), ((6103, 6131), 'numpy.linalg.solve', 'numpy.linalg.solve', (['lhs', 'rhs'], {}), '(lhs, rhs)\n', (6121, 6131), False, 'import numpy\n'), ((6145, 6263), 'numpy.array', 'numpy.array', (['[[res[0] + 1.0j * res[1], res[2] - 1.0j * res[3]], [res[2] + 1.0j * res[3],\n res[0] - 1.0j * res[1]]]'], {}), '([[res[0] + 1.0j * res[1], res[2] - 1.0j * res[3]], [res[2] + \n 1.0j * res[3], res[0] - 1.0j * res[1]]])\n', (6156, 6263), False, 'import numpy\n'), ((7414, 7471), 'numpy.array', 'numpy.array', (['[a12[0, 0], a34[0, 0], a12[1, 0], a34[1, 0]]'], {}), '([a12[0, 0], a34[0, 0], a12[1, 0], a34[1, 0]])\n', (7425, 7471), False, 'import numpy\n'), ((7484, 7545), 'numpy.array', 'numpy.array', (['[[b12[0, 0], b12[1, 0]], [b34[0, 0], b34[1, 0]]]'], {}), '([[b12[0, 0], b12[1, 0]], [b34[0, 0], b34[1, 0]]])\n', (7495, 7545), False, 'import numpy\n'), ((7716, 7744), 'numpy.linalg.solve', 'numpy.linalg.solve', (['lhs', 'rhs'], {}), '(lhs, rhs)\n', (7734, 7744), False, 'import numpy\n'), ((7756, 7805), 'numpy.array', 'numpy.array', (['[[res[0], res[2]], [res[1], res[3]]]'], {}), '([[res[0], res[2]], [res[1], res[3]]])\n', (7767, 7805), False, 'import numpy\n'), ((1273, 1314), 'numpy.identity', 'numpy.identity', (['(2)'], {'dtype': 'numpy.complex128'}), '(2, dtype=numpy.complex128)\n', (1287, 1314), False, 'import numpy\n'), ((3816, 3845), 'numpy.linalg.inv', 'numpy.linalg.inv', (['self.matrix'], {}), '(self.matrix)\n', (3832, 3845), False, 'import numpy\n'), ((3951, 3988), 'numpy.dot', 'numpy.dot', (['matrix.matrix', 'self.matrix'], {}), '(matrix.matrix, self.matrix)\n', (3960, 3988), False, 'import numpy\n'), ((4095, 4132), 'numpy.dot', 'numpy.dot', (['self.matrix', 'matrix.matrix'], {}), '(self.matrix, matrix.matrix)\n', (4104, 4132), False, 'import numpy\n'), ((4813, 4830), 'numpy.identity', 'numpy.identity', (['(2)'], {}), '(2)\n', (4827, 4830), False, 'import numpy\n'), ((4888, 4905), 'numpy.identity', 'numpy.identity', (['(2)'], {}), '(2)\n', (4902, 4905), False, 'import numpy\n'), ((5284, 5318), 'numpy.linalg.inv', 'numpy.linalg.inv', (['bottomMat.matrix'], {}), '(bottomMat.matrix)\n', (5300, 5318), False, 'import numpy\n'), ((5359, 5390), 'numpy.linalg.inv', 'numpy.linalg.inv', (['topMat.matrix'], {}), '(topMat.matrix)\n', (5375, 5390), False, 'import numpy\n'), ((5520, 5537), 'numpy.identity', 'numpy.identity', (['(2)'], {}), '(2)\n', (5534, 5537), False, 'import numpy\n'), ((5601, 5618), 'numpy.identity', 'numpy.identity', (['(2)'], {}), '(2)\n', (5615, 5618), False, 'import numpy\n'), ((6301, 6335), 'numpy.linalg.inv', 'numpy.linalg.inv', (['bottomMat.matrix'], {}), '(bottomMat.matrix)\n', (6317, 6335), False, 'import numpy\n'), ((6376, 6407), 'numpy.linalg.inv', 'numpy.linalg.inv', (['topMat.matrix'], {}), '(topMat.matrix)\n', (6392, 6407), False, 'import numpy\n'), ((6597, 6614), 'numpy.identity', 'numpy.identity', (['(2)'], {}), '(2)\n', (6611, 6614), False, 'import numpy\n'), ((6674, 6691), 'numpy.identity', 'numpy.identity', (['(2)'], {}), '(2)\n', (6688, 6691), False, 'import numpy\n'), ((6754, 6771), 'numpy.identity', 'numpy.identity', (['(2)'], {}), '(2)\n', (6768, 6771), False, 'import numpy\n'), ((6831, 6848), 'numpy.identity', 'numpy.identity', (['(2)'], {}), '(2)\n', (6845, 6848), False, 'import numpy\n'), ((7090, 7125), 'numpy.linalg.inv', 'numpy.linalg.inv', (['bottomMat1.matrix'], {}), '(bottomMat1.matrix)\n', (7106, 7125), False, 'import numpy\n'), ((7127, 7162), 'numpy.array', 'numpy.array', (['[[transmitance1], [0]]'], {}), '([[transmitance1], [0]])\n', (7138, 7162), False, 'import numpy\n'), ((7184, 7219), 'numpy.linalg.inv', 'numpy.linalg.inv', (['bottomMat2.matrix'], {}), '(bottomMat2.matrix)\n', (7200, 7219), False, 'import numpy\n'), ((7221, 7256), 'numpy.array', 'numpy.array', (['[[transmitance2], [0]]'], {}), '([[transmitance2], [0]])\n', (7232, 7256), False, 'import numpy\n'), ((7295, 7329), 'numpy.array', 'numpy.array', (['[[1], [reflectance1]]'], {}), '([[1], [reflectance1]])\n', (7306, 7329), False, 'import numpy\n'), ((7367, 7401), 'numpy.array', 'numpy.array', (['[[1], [reflectance2]]'], {}), '([[1], [reflectance2]])\n', (7378, 7401), False, 'import numpy\n'), ((1356, 1380), 'numpy.dot', 'numpy.dot', (['m.matrix', 'mat'], {}), '(m.matrix, mat)\n', (1365, 1380), False, 'import numpy\n'), ((2829, 2919), 'numpy.array', 'numpy.array', (['[[_n1 + _n2, _n2 - _n1], [_n2 - _n1, _n1 + _n2]]'], {'dtype': 'numpy.complex128'}), '([[_n1 + _n2, _n2 - _n1], [_n2 - _n1, _n1 + _n2]], dtype=numpy.\n complex128)\n', (2840, 2919), False, 'import numpy\n'), ((5964, 5984), 'numpy.hstack', 'numpy.hstack', (['(a, b)'], {}), '((a, b))\n', (5976, 5984), False, 'import numpy\n'), ((5986, 6006), 'numpy.hstack', 'numpy.hstack', (['(b, a)'], {}), '((b, a))\n', (5998, 6006), False, 'import numpy\n'), ((6032, 6057), 'numpy.real', 'numpy.real', (['transmittance'], {}), '(transmittance)\n', (6042, 6057), False, 'import numpy\n'), ((6059, 6084), 'numpy.imag', 'numpy.imag', (['transmittance'], {}), '(transmittance)\n', (6069, 6084), False, 'import numpy\n'), ((2422, 2438), 'numpy.sin', 'numpy.sin', (['theta'], {}), '(theta)\n', (2431, 2438), False, 'import numpy\n'), ((2533, 2549), 'numpy.cos', 'numpy.cos', (['theta'], {}), '(theta)\n', (2542, 2549), False, 'import numpy\n'), ((2571, 2588), 'numpy.cos', 'numpy.cos', (['theta2'], {}), '(theta2)\n', (2580, 2588), False, 'import numpy\n'), ((3289, 3305), 'numpy.sin', 'numpy.sin', (['theta'], {}), '(theta)\n', (3298, 3305), False, 'import numpy\n'), ((5199, 5238), 'numpy.conj', 'numpy.conj', (['(reflectance / transmittance)'], {}), '(reflectance / transmittance)\n', (5209, 5238), False, 'import numpy\n'), ((5814, 5837), 'numpy.real', 'numpy.real', (['reflectance'], {}), '(reflectance)\n', (5824, 5837), False, 'import numpy\n'), ((5839, 5862), 'numpy.imag', 'numpy.imag', (['reflectance'], {}), '(reflectance)\n', (5849, 5862), False, 'import numpy\n'), ((5887, 5910), 'numpy.imag', 'numpy.imag', (['reflectance'], {}), '(reflectance)\n', (5897, 5910), False, 'import numpy\n'), ((2680, 2696), 'numpy.cos', 'numpy.cos', (['theta'], {}), '(theta)\n', (2689, 2696), False, 'import numpy\n'), ((2718, 2735), 'numpy.cos', 'numpy.cos', (['theta2'], {}), '(theta2)\n', (2727, 2735), False, 'import numpy\n'), ((2754, 2771), 'numpy.cos', 'numpy.cos', (['theta2'], {}), '(theta2)\n', (2763, 2771), False, 'import numpy\n'), ((2772, 2788), 'numpy.cos', 'numpy.cos', (['theta'], {}), '(theta)\n', (2781, 2788), False, 'import numpy\n'), ((5115, 5140), 'numpy.conj', 'numpy.conj', (['transmittance'], {}), '(transmittance)\n', (5125, 5140), False, 'import numpy\n'), ((5913, 5936), 'numpy.real', 'numpy.real', (['reflectance'], {}), '(reflectance)\n', (5923, 5936), False, 'import numpy\n'), ((7615, 7634), 'numpy.zeros', 'numpy.zeros', (['(2, 2)'], {}), '((2, 2))\n', (7626, 7634), False, 'import numpy\n'), ((7676, 7695), 'numpy.zeros', 'numpy.zeros', (['(2, 2)'], {}), '((2, 2))\n', (7687, 7695), False, 'import numpy\n'), ((3422, 3439), 'numpy.cos', 'numpy.cos', (['theta2'], {}), '(theta2)\n', (3431, 3439), False, 'import numpy\n'), ((3538, 3555), 'numpy.cos', 'numpy.cos', (['theta2'], {}), '(theta2)\n', (3547, 3555), False, 'import numpy\n')] |
"""
Various utilities used in the main code.
NOTE: there should be no autograd functions here, only plain numpy/scipy
"""
import numpy as np
from scipy.linalg import toeplitz
from scipy.optimize import brentq
def ftinv(ft_coeff, gvec, xgrid, ygrid):
"""
Returns the discrete inverse Fourier transform over a real-space mesh
defined by 'xgrid', 'ygrid', computed given a number of FT coefficients
'ft_coeff' defined over a set of reciprocal vectors 'gvec'.
This could be sped up through an fft function but written like this it is
more general as we don't have to deal with grid and lattice issues.
"""
(xmesh, ymesh) = np.meshgrid(xgrid, ygrid)
ftinv = np.zeros(xmesh.shape, dtype=np.complex128)
# Take only the unique components
(g_unique, ind_unique) = np.unique(gvec, return_index=True, axis=1)
for indg in ind_unique:
ftinv += ft_coeff[indg]*np.exp(1j*gvec[0, indg]*xmesh + \
1j*gvec[1, indg]*ymesh)
# # Do the x- and y-transforms separately
# # I wrote this but then realized it doesn't improve anything
# (gx_u, indx) = np.unique(gvec[0, :], return_inverse=True)
# for ix, gx in enumerate(gx_u):
# ind_match = np.where(indx==ix)[0]
# (gy_u, indy) = np.unique(gvec[1, ind_match], return_index=True)
# term = np.zeros(xmesh.shape, dtype=np.complex128)
# for iy, gy in enumerate(gy_u):
# # print(ft_coeff[indx[indy[iy]]])
# term += ft_coeff[ind_match[indy[iy]]]*np.exp(-1j*gy*ymesh)
# ftinv += term*np.exp(-1j*gx*xmesh)
# # Can also be defined through a DFT matrix but it doesn't seem faster and
# # it's *very* memory intensive.
# exp_matrix = xmesh.reshape((-1, 1)).dot(g_unique[[0], :]) + \
# ymesh.reshape((-1, 1)).dot(g_unique[[1], :])
# dft_matrix = np.exp(1j*exp_matrix)
# ftinv = dft_matrix.dot(ft_coeff[ind_unique]).reshape(xmesh.shape)
# print(ftinv)
return ftinv
def ft2square(lattice, ft_coeff, gvec):
"""
Make a square array of Fourier components given a number of them defined
over a set of reciprocal vectors gvec.
NB: function hasn't really been tested, just storing some code.
"""
if lattice.type not in ['hexagonal', 'square']:
raise NotImplementedError("ft2square probably only works for" \
"a lattice initialized as 'square' or 'hexagonal'")
dgx = np.abs(lattice.b1[0])
dgy = np.abs(lattice.b2[1])
nx = np.int_(np.abs(np.max(gvec[0, :])/dgx))
ny = np.int_(np.abs(np.max(gvec[1, :])/dgy))
nxtot = 2*nx + 1
nytot = 2*ny + 1
eps_ft = np.zeros((nxtot, nytot), dtype=np.complex128)
gx_grid = np.arange(-nx, nx)*dgx
gy_grid = np.arange(-ny, ny)*dgy
for jG in range(gvec.shape[1]):
nG = np.int_(gvec[:, jG]/[dgx, dgy])
eps_ft[nx + nG1[0], ny + nG1[1]] = ft_coeff[jG]
return (eps_ft, gx_grid, gy_grid)
def grad_num(fn, arg, step_size=1e-7):
""" Numerically differentiate `fn` w.r.t. its argument `arg`
`arg` can be a numpy array of arbitrary shape
`step_size` can be a number or an array of the same shape as `arg` """
N = arg.size
shape = arg.shape
gradient = np.zeros((N,))
f_old = fn(arg)
if type(step_size) == float:
step = step_size*np.ones((N))
else:
step = step_size.ravel()
for i in range(N):
arg_new = arg.flatten()
arg_new[i] += step[i]
f_new_i = fn(arg_new.reshape(shape))
gradient[i] = (f_new_i - f_old) / step[i]
return gradient.reshape(shape)
def vjp_maker_num(fn, arg_inds, steps):
""" Makes a vjp_maker for the numerical derivative of a function `fn`
w.r.t. argument at position `arg_ind` using step sizes `steps` """
def vjp_single_arg(ia):
arg_ind = arg_inds[ia]
step = steps[ia]
def vjp_maker(fn_out, *args):
shape = args[arg_ind].shape
num_p = args[arg_ind].size
step = steps[ia]
def vjp(v):
vjp_num = np.zeros(num_p)
for ip in range(num_p):
args_new = list(args)
args_rav = args[arg_ind].flatten()
args_rav[ip] += step
args_new[arg_ind] = args_rav.reshape(shape)
dfn_darg = (fn(*args_new) - fn_out)/step
vjp_num[ip] = np.sum(v * dfn_darg)
return vjp_num
return vjp
return vjp_maker
vjp_makers = []
for ia in range(len(arg_inds)):
vjp_makers.append(vjp_single_arg(ia=ia))
return tuple(vjp_makers)
def toeplitz_block(n, T1, T2):
"""
Constructs a Hermitian Toeplitz-block-Toeplitz matrix with n blocks and
T1 in the first row and T2 in the first column of every block in the first
row of blocks
"""
ntot = T1.shape[0]
p = int(ntot/n) # Linear size of each block
Tmat = np.zeros((ntot, ntot), dtype=T1.dtype)
for ind1 in range(n):
for ind2 in range(ind1, n):
toep1 = T1[(ind2-ind1)*p:(ind2-ind1+1)*p]
toep2 = T2[(ind2-ind1)*p:(ind2-ind1+1)*p]
Tmat[ind1*p:(ind1+1)*p, ind2*p:(ind2+1)*p] = \
toeplitz(toep2, toep1)
return np.triu(Tmat) + np.conj(np.transpose(np.triu(Tmat,1)))
return np.triu(Tmat) + np.conj(np.transpose(np.triu(Tmat,1)))
def get_value(x):
"""
This is for when using the 'autograd' backend and you want to detach an
ArrayBox and just convert it to a numpy array.
"""
if str(type(x)) == "<class 'autograd.numpy.numpy_boxes.ArrayBox'>":
return x._value
else:
return x
def fsolve(f, lb, ub, *args):
"""
Solve for scalar f(x, *args) = 0 w.r.t. scalar x within lb < x < ub
"""
args_value = tuple([get_value(arg) for arg in args])
return brentq(f, lb, ub, args=args_value)
def find_nearest(array, value, N):
"""
Find the indexes of the N elements in an array nearest to a given value
(Not the most efficient way but this is not a coding interview...)
"""
idx = np.abs(array - value).argsort()
return idx[:N]
def RedhefferStar(SA,SB): #SA and SB are both 2x2 matrices;
assert type(SA) == np.ndarray, 'not np.matrix'
assert type(SB) == np.ndarray, 'not np.matrix'
I = 1;
# once we break every thing like this, we should still have matrices
SA_11 = SA[0, 0]; SA_12 = SA[0, 1]; SA_21 = SA[1, 0]; SA_22 = SA[1, 1];
SB_11 = SB[0, 0]; SB_12 = SB[0, 1]; SB_21 = SB[1, 0]; SB_22 = SB[1, 1];
D = 1.0/(I-SB_11*SA_22);
F = 1.0/(I-SA_22*SB_11);
SAB_11 = SA_11 + SA_12*D*SB_11*SA_21;
SAB_12 = SA_12*D*SB_12;
SAB_21 = SB_21*F*SA_21;
SAB_22 = SB_22 + SB_21*F*SA_22*SB_12;
SAB = np.array([[SAB_11, SAB_12],[SAB_21, SAB_22]])
return SAB
def extend(vals, inds, shape):
""" Makes an array of shape `shape` where indices `inds` have vales `vals`
"""
z = np.zeros(shape, dtype=vals.dtype)
z[inds] = vals
return z
| [
"numpy.abs",
"numpy.triu",
"numpy.unique",
"scipy.optimize.brentq",
"numpy.ones",
"numpy.max",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"scipy.linalg.toeplitz",
"numpy.sum",
"numpy.meshgrid",
"numpy.int_",
"numpy.arange"
] | [((658, 683), 'numpy.meshgrid', 'np.meshgrid', (['xgrid', 'ygrid'], {}), '(xgrid, ygrid)\n', (669, 683), True, 'import numpy as np\n'), ((696, 738), 'numpy.zeros', 'np.zeros', (['xmesh.shape'], {'dtype': 'np.complex128'}), '(xmesh.shape, dtype=np.complex128)\n', (704, 738), True, 'import numpy as np\n'), ((807, 849), 'numpy.unique', 'np.unique', (['gvec'], {'return_index': '(True)', 'axis': '(1)'}), '(gvec, return_index=True, axis=1)\n', (816, 849), True, 'import numpy as np\n'), ((2438, 2459), 'numpy.abs', 'np.abs', (['lattice.b1[0]'], {}), '(lattice.b1[0])\n', (2444, 2459), True, 'import numpy as np\n'), ((2470, 2491), 'numpy.abs', 'np.abs', (['lattice.b2[1]'], {}), '(lattice.b2[1])\n', (2476, 2491), True, 'import numpy as np\n'), ((2645, 2690), 'numpy.zeros', 'np.zeros', (['(nxtot, nytot)'], {'dtype': 'np.complex128'}), '((nxtot, nytot), dtype=np.complex128)\n', (2653, 2690), True, 'import numpy as np\n'), ((3229, 3243), 'numpy.zeros', 'np.zeros', (['(N,)'], {}), '((N,))\n', (3237, 3243), True, 'import numpy as np\n'), ((4965, 5003), 'numpy.zeros', 'np.zeros', (['(ntot, ntot)'], {'dtype': 'T1.dtype'}), '((ntot, ntot), dtype=T1.dtype)\n', (4973, 5003), True, 'import numpy as np\n'), ((5885, 5919), 'scipy.optimize.brentq', 'brentq', (['f', 'lb', 'ub'], {'args': 'args_value'}), '(f, lb, ub, args=args_value)\n', (5891, 5919), False, 'from scipy.optimize import brentq\n'), ((6794, 6840), 'numpy.array', 'np.array', (['[[SAB_11, SAB_12], [SAB_21, SAB_22]]'], {}), '([[SAB_11, SAB_12], [SAB_21, SAB_22]])\n', (6802, 6840), True, 'import numpy as np\n'), ((6984, 7017), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'vals.dtype'}), '(shape, dtype=vals.dtype)\n', (6992, 7017), True, 'import numpy as np\n'), ((2705, 2723), 'numpy.arange', 'np.arange', (['(-nx)', 'nx'], {}), '(-nx, nx)\n', (2714, 2723), True, 'import numpy as np\n'), ((2742, 2760), 'numpy.arange', 'np.arange', (['(-ny)', 'ny'], {}), '(-ny, ny)\n', (2751, 2760), True, 'import numpy as np\n'), ((2815, 2848), 'numpy.int_', 'np.int_', (['(gvec[:, jG] / [dgx, dgy])'], {}), '(gvec[:, jG] / [dgx, dgy])\n', (2822, 2848), True, 'import numpy as np\n'), ((5288, 5301), 'numpy.triu', 'np.triu', (['Tmat'], {}), '(Tmat)\n', (5295, 5301), True, 'import numpy as np\n'), ((5355, 5368), 'numpy.triu', 'np.triu', (['Tmat'], {}), '(Tmat)\n', (5362, 5368), True, 'import numpy as np\n'), ((911, 978), 'numpy.exp', 'np.exp', (['(1.0j * gvec[0, indg] * xmesh + 1.0j * gvec[1, indg] * ymesh)'], {}), '(1.0j * gvec[0, indg] * xmesh + 1.0j * gvec[1, indg] * ymesh)\n', (917, 978), True, 'import numpy as np\n'), ((3323, 3333), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (3330, 3333), True, 'import numpy as np\n'), ((5253, 5275), 'scipy.linalg.toeplitz', 'toeplitz', (['toep2', 'toep1'], {}), '(toep2, toep1)\n', (5261, 5275), False, 'from scipy.linalg import toeplitz\n'), ((6131, 6152), 'numpy.abs', 'np.abs', (['(array - value)'], {}), '(array - value)\n', (6137, 6152), True, 'import numpy as np\n'), ((2516, 2534), 'numpy.max', 'np.max', (['gvec[0, :]'], {}), '(gvec[0, :])\n', (2522, 2534), True, 'import numpy as np\n'), ((2565, 2583), 'numpy.max', 'np.max', (['gvec[1, :]'], {}), '(gvec[1, :])\n', (2571, 2583), True, 'import numpy as np\n'), ((4067, 4082), 'numpy.zeros', 'np.zeros', (['num_p'], {}), '(num_p)\n', (4075, 4082), True, 'import numpy as np\n'), ((5325, 5341), 'numpy.triu', 'np.triu', (['Tmat', '(1)'], {}), '(Tmat, 1)\n', (5332, 5341), True, 'import numpy as np\n'), ((5392, 5408), 'numpy.triu', 'np.triu', (['Tmat', '(1)'], {}), '(Tmat, 1)\n', (5399, 5408), True, 'import numpy as np\n'), ((4420, 4440), 'numpy.sum', 'np.sum', (['(v * dfn_darg)'], {}), '(v * dfn_darg)\n', (4426, 4440), True, 'import numpy as np\n')] |
import numpy as np
from nltk.tokenize import word_tokenize
class Dataset(object):
def __init__(self, text_file, context_size, vocab_min_count):
self.text_file = text_file
self.context_size = context_size
self.vocab_min_count = vocab_min_count
self.vocab = []
self.comat = None
self.coocs = None
# Load and process the data
self.load()
def load(self):
f = open(self.text_file, 'r')
text = f.read().lower()
f.close()
# Tokenize the text to create a word list
word_list = word_tokenize(text)
w_list_size = len(word_list)
# Get the vocabulary
words, counts = np.unique(word_list, return_counts=True)
self.vocab = []
for w, count in zip(words, counts):
if count >= self.vocab_min_count:
self.vocab.append(w)
self.vocab_size = len(self.vocab)
word_to_idx = {w: i for i, w in enumerate(self.vocab)}
# Construct a co-occurance matrix
self.comat = np.zeros((self.vocab_size, self.vocab_size))
for i in range(w_list_size):
w = word_list[i]
if w not in self.vocab:
continue
idx = word_to_idx[w]
for j in range(1, self.context_size + 1):
# Words in the left context
if i - j > 0:
left_idx = word_to_idx.get(word_list[i - j], None)
if left_idx is not None:
self.comat[idx, left_idx] += 1.0 / j
# Words in the right context
if i + j < w_list_size:
right_idx = word_to_idx.get(word_list[i + j], None)
if right_idx is not None:
self.comat[idx, right_idx] += 1.0 / j
# Non-zero co-occurances
self.coocs = np.transpose(np.nonzero(self.comat))
def __getitem__(self, index):
# Get the left and right word indexes using the self.coocs numpy array
i, j = self.coocs[index]
# Return left_idx, right_idx and the co-occurance count
return int(i), int(j), float(self.comat[i, j])
def __len__(self):
return len(self.coocs)
| [
"numpy.zeros",
"numpy.unique",
"numpy.nonzero",
"nltk.tokenize.word_tokenize"
] | [((587, 606), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (600, 606), False, 'from nltk.tokenize import word_tokenize\n'), ((698, 738), 'numpy.unique', 'np.unique', (['word_list'], {'return_counts': '(True)'}), '(word_list, return_counts=True)\n', (707, 738), True, 'import numpy as np\n'), ((1061, 1105), 'numpy.zeros', 'np.zeros', (['(self.vocab_size, self.vocab_size)'], {}), '((self.vocab_size, self.vocab_size))\n', (1069, 1105), True, 'import numpy as np\n'), ((1908, 1930), 'numpy.nonzero', 'np.nonzero', (['self.comat'], {}), '(self.comat)\n', (1918, 1930), True, 'import numpy as np\n')] |
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Unit tests for the states.py submodule"""
import pytest
import numpy as np
from scipy.special import factorial as fac
from strawberryfields import backends
from strawberryfields import utils
a = 0.3 + 0.1j
r = 0.23
phi = 0.123
class TestBackendStateCreation:
"""Test the backends properly create states"""
def test_full_state_creation(self, hbar, cutoff, setup_backend):
"""Test backend returns a properly formed state object"""
backend = setup_backend(3)
state = backend.state(modes=None)
assert state.num_modes == 3
assert state.hbar == hbar
assert state.mode_names == {0: "q[0]", 1: "q[1]", 2: "q[2]"}
assert state.mode_indices == {"q[0]": 0, "q[1]": 1, "q[2]": 2}
if isinstance(backend, backends.BaseFock):
assert state.cutoff_dim == cutoff
def test_reduced_state_creation(self, setup_backend):
"""Test backend returns a properly formed reduced state object"""
backend = setup_backend(3)
state = backend.state(modes=[0, 2])
assert state.num_modes == 2
assert state.mode_names == {0: "q[0]", 1: "q[2]"}
assert state.mode_indices == {"q[0]": 0, "q[2]": 1}
def test_reduced_state_fidelity(self, setup_backend, tol):
"""Test backend calculates correct fidelity of reduced coherent state"""
backend = setup_backend(2)
backend.prepare_coherent_state(np.abs(a), np.angle(a), 0)
backend.prepare_squeezed_state(r, phi, 1)
state = backend.state(modes=[0])
f = state.fidelity_coherent([a])
assert np.allclose(f, 1, atol=tol, rtol=0)
def test_reduced_state_fock_probs(self, cutoff, setup_backend, batch_size, tol):
"""Test backend calculates correct fock prob of reduced coherent state"""
backend = setup_backend(2)
backend.prepare_coherent_state(np.abs(a), np.angle(a), 0)
backend.prepare_squeezed_state(r, phi, 1)
state = backend.state(modes=[0])
probs = np.array([state.fock_prob([i]) for i in range(cutoff)]).T
n = np.arange(cutoff)
ref_state = np.exp(-0.5 * np.abs(a) ** 2) * a**n / np.sqrt(fac(n))
ref_probs = np.abs(ref_state) ** 2
if batch_size is not None:
ref_probs = np.tile(ref_probs, batch_size)
assert np.allclose(probs.flatten(), ref_probs.flatten(), atol=tol, rtol=0)
class TestBaseStateMeanPhotonNumber:
"""Tests for the mean photon number method"""
def test_mean_photon_coherent(self, setup_backend, tol, batch_size):
"""Test that E(n) = |a|^2 and var(n) = |a|^2 for a coherent state"""
if batch_size is not None:
pytest.skip("Does not support batch mode")
backend = setup_backend(1)
backend.displacement(np.abs(a), np.angle(a), 0)
state = backend.state()
mean_photon, var = state.mean_photon(0)
assert np.allclose(mean_photon, np.abs(a) ** 2, atol=tol, rtol=0)
assert np.allclose(var, np.abs(a) ** 2, atol=tol, rtol=0)
def test_mean_photon_squeezed(self, setup_backend, tol, batch_size):
"""Test that E(n)=sinh^2(r) and var(n)=2(sinh^2(r)+sinh^4(r)) for a squeezed state"""
if batch_size is not None:
pytest.skip("Does not support batch mode")
backend = setup_backend(1)
r = 0.1
a = 0.3 + 0.1j
backend.squeeze(r, phi, 0)
state = backend.state()
mean_photon, var = state.mean_photon(0)
assert np.allclose(mean_photon, np.sinh(r) ** 2, atol=tol, rtol=0)
assert np.allclose(var, 2 * (np.sinh(r) ** 2 + np.sinh(r) ** 4), atol=tol, rtol=0)
def test_mean_photon_displaced_squeezed(self, setup_backend, tol, batch_size):
"""Test that E(n) = sinh^2(r)+|a|^2 for a displaced squeezed state"""
if batch_size is not None:
pytest.skip("Does not support batch mode")
backend = setup_backend(1)
nbar = 0.123
a = 0.12 - 0.05j
r = 0.195
backend.squeeze(r, phi, 0)
backend.displacement(np.abs(a), np.angle(a), 0)
state = backend.state()
mean_photon, var = state.mean_photon(0)
mag_a = np.abs(a)
phi_a = np.angle(a)
magnitude_squared = np.abs(a) ** 2
mean_ex = magnitude_squared + np.sinh(r) ** 2
var_ex = (
-magnitude_squared
+ magnitude_squared**2
+ 2 * magnitude_squared * np.cosh(2 * r)
- np.exp(-1j * phi) * a**2 * np.cosh(r) * np.sinh(r)
- np.exp(1j * phi) * np.conj(a) ** 2 * np.cosh(r) * np.sinh(r)
+ np.sinh(r) ** 4
- (magnitude_squared + np.conj(np.sinh(r)) * np.sinh(r)) ** 2
+ np.cosh(r) * np.sinh(r) * np.sinh(2 * r)
)
assert np.allclose(mean_photon, mean_ex, atol=tol, rtol=0)
assert np.allclose(var, var_ex, atol=tol, rtol=0)
def test_mean_photon_displaced_thermal(self, setup_backend, tol, batch_size):
"""Test that E(n)=|a|^2+nbar and var(n)=var_th+|a|^2(1+2nbar)"""
if batch_size is not None:
pytest.skip("Does not support batch mode")
backend = setup_backend(1)
nbar = 0.123
backend.prepare_thermal_state(nbar, 0)
backend.displacement(np.abs(a), np.angle(a), 0)
state = backend.state()
mean_photon, var = state.mean_photon(0)
mean_ex = np.abs(a) ** 2 + nbar
var_ex = nbar**2 + nbar + np.abs(a) ** 2 * (1 + 2 * nbar)
assert np.allclose(mean_photon, mean_ex, atol=tol, rtol=0)
assert np.allclose(var, var_ex, atol=tol, rtol=0)
@pytest.mark.backends("fock", "tf", "gaussian")
class TestBaseFockKetDensityMatrix:
"""Tests for the ket, dm, and reduced density matrix function."""
def test_rdm(self, setup_backend, tol, cutoff, batch_size):
"""Test reduced density matrix of a coherent state is as expected
This is supported by all backends, since it returns
the reduced density matrix of a single mode."""
backend = setup_backend(2)
backend.prepare_coherent_state(np.abs(a), np.angle(a), 0)
backend.prepare_coherent_state(0.1, 0, 1)
state = backend.state()
rdm = state.reduced_dm(0, cutoff=cutoff)
n = np.arange(cutoff)
ket = np.exp(-0.5 * np.abs(a) ** 2) * a**n / np.sqrt(fac(n))
rdm_exact = np.outer(ket, ket.conj())
if batch_size is not None:
np.tile(rdm_exact, [batch_size, 1])
assert np.allclose(rdm, rdm_exact, atol=tol, rtol=0)
def test_ket(self, setup_backend, pure, cutoff, batch_size, tol):
"""Test that the ket of a displaced state matches analytic result"""
backend = setup_backend(2)
backend.displacement(np.abs(a), np.angle(a), 0)
state = backend.state()
if not pure and backend.short_name != "gaussian":
assert state.is_pure == False
pytest.skip("Test only works with pure states.")
assert state.is_pure == True
ket = np.sum(state.ket(cutoff=cutoff), axis=-1)
n = np.arange(cutoff)
expected = np.exp(-0.5 * np.abs(a) ** 2) * a**n / np.sqrt(fac(n))
if batch_size is not None:
ket = np.tile(ket, [batch_size, 1])
assert np.allclose(ket, expected, atol=tol, rtol=0)
def test_density_matrix_thermal_state(self, setup_backend, cutoff, batch_size, tol):
"""Test that a thermal state returns the correct density matrix, using
both the dm() and reduced_dm() methods."""
backend = setup_backend(1)
backend.prepare_thermal_state(r, 0)
state = backend.state()
assert not state.is_pure
rho1 = state.dm(cutoff=cutoff)
rho2 = state.reduced_dm(0, cutoff=cutoff)
assert np.allclose(rho1, rho2, atol=tol, rtol=0)
n = np.arange(cutoff)
expected = np.diag((r**n) / ((1 + r) ** (n + 1)))
if batch_size is not None:
expected = np.tile(expected, [batch_size, 1]).reshape(-1, cutoff, cutoff)
assert np.allclose(rho1, expected, atol=tol, rtol=0)
@pytest.mark.backends("gaussian")
class TestBaseGaussianMethods:
"""This tests state methods unique to the BaseGaussian
class, including is_coherent, displacement, is_squeezed,
and squeezing."""
def test_coherent_methods(self, setup_backend, tol):
"""Test that the ket of a displaced state matches analytic result"""
backend = setup_backend(2)
a = 1 + 0.5j
r = 2
phi = -0.5
backend.prepare_coherent_state(np.abs(a), np.angle(a), 0)
backend.prepare_squeezed_state(r, phi, 1)
state = backend.state()
coherent_check = []
for i in range(2):
coherent_check.append(state.is_coherent(i))
alpha_list = state.displacement()
assert np.all(coherent_check == [True, False])
assert np.allclose(alpha_list, [a, 0.0], atol=tol, rtol=0)
def test_squeezing_methods(self, setup_backend, tol):
"""Test that the ket of a displaced state matches analytic result"""
backend = setup_backend(2)
a = 1 + 0.5j
r = 2
phi = -0.5
backend.prepare_coherent_state(np.abs(a), np.angle(a), 0)
backend.prepare_squeezed_state(r, phi, 1)
state = backend.state()
squeezing_check = []
for i in range(2):
squeezing_check.append(state.is_squeezed(i))
z_list = np.array(state.squeezing())
assert np.all(squeezing_check == [False, True])
assert np.allclose(z_list, [[0.0, 0.0], [r, phi]], atol=tol, rtol=0)
class TestQuadratureExpectations:
"""Test quad_expectation methods"""
def test_vacuum(self, setup_backend, hbar, batch_size, tol):
"""Test vacuum state has zero mean and hbar/2 variance"""
backend = setup_backend(1)
state = backend.state()
res = np.array(state.quad_expectation(0, phi=np.pi / 4)).T
res_exact = np.array([0, hbar / 2.0])
if batch_size is not None:
res_exact = np.tile(res_exact, batch_size)
assert np.allclose(res.flatten(), res_exact.flatten(), atol=tol, rtol=0)
def test_squeezed_coherent(self, setup_backend, hbar, batch_size, tol):
"""Test squeezed coherent state has correct mean and variance"""
# quadrature rotation angle
backend = setup_backend(1)
qphi = 0.78
backend.prepare_displaced_squeezed_state(np.abs(a), np.angle(a), r, phi, 0)
state = backend.state()
res = np.array(state.quad_expectation(0, phi=qphi)).T
xphi_mean = (a.real * np.cos(qphi) + a.imag * np.sin(qphi)) * np.sqrt(2 * hbar)
xphi_var = (np.cosh(2 * r) - np.cos(phi - 2 * qphi) * np.sinh(2 * r)) * hbar / 2
res_exact = np.array([xphi_mean, xphi_var])
if batch_size is not None:
res_exact = np.tile(res_exact, batch_size)
assert np.allclose(res.flatten(), res_exact.flatten(), atol=tol, rtol=0)
@pytest.mark.backends("fock", "tf", "gaussian")
class TestNumberExpectation:
"""Multimode photon-number expectation value tests"""
def test_number_expectation_vacuum(self, setup_backend, tol, batch_size):
"""Tests the expectation value of any photon number in vacuum is zero,
and the variance is also 0."""
if batch_size is not None:
pytest.skip("Does not support batch mode")
backend = setup_backend(2)
state = backend.state()
expected = (0, 0)
assert np.allclose(state.number_expectation([0, 1]), expected, atol=tol, rtol=0)
assert np.allclose(state.number_expectation([0]), expected, atol=tol, rtol=0)
assert np.allclose(state.number_expectation([1]), expected, atol=tol, rtol=0)
def test_number_expectation_displaced_squeezed(self, setup_backend, tol, batch_size):
"""Tests the expectation value of photon numbers when there is no correlation"""
if batch_size is not None:
pytest.skip("Does not support batch mode")
backend = setup_backend(2)
state = backend.state()
a0 = 0.2 + 0.1 * 1j
r0 = 0.2
phi0 = 0.6
a1 = 0.1 + 0.1 * 1j
r1 = 0.1
phi1 = 0.4
backend.prepare_displaced_squeezed_state(np.abs(a0), np.angle(a0), r0, phi0, 0)
backend.prepare_displaced_squeezed_state(np.abs(a1), np.angle(a1), r1, phi1, 1)
state = backend.state()
n0 = np.sinh(r0) ** 2 + np.abs(a0) ** 2
n1 = np.sinh(r1) ** 2 + np.abs(a1) ** 2
def squared_term(a, r, phi):
magnitude_squared = np.abs(a) ** 2
squared_term = (
-magnitude_squared
+ magnitude_squared**2
+ 2 * magnitude_squared * np.cosh(2 * r)
- 2 * np.real(np.exp(-1j * phi) * a**2 * np.cosh(r) * np.sinh(r))
+ np.sinh(r) ** 4
+ np.cosh(r) * np.sinh(r) * np.sinh(2 * r)
)
return squared_term
res = state.number_expectation([0, 1])
var = squared_term(a0, r0, phi0) * squared_term(a1, r1, phi1) - n0**2 * n1**2
assert np.allclose(res[0], n0 * n1, atol=tol, rtol=0)
assert np.allclose(res[1], var, atol=tol, rtol=0)
res = state.number_expectation([0])
var = squared_term(a0, r0, phi0) - n0**2
assert np.allclose(res[0], n0, atol=tol, rtol=0)
assert np.allclose(res[1], var, atol=tol, rtol=0)
res = state.number_expectation([1])
var = squared_term(a1, r1, phi1) - n1**2
assert np.allclose(res[0], n1, atol=tol, rtol=0)
assert np.allclose(res[1], var, atol=tol, rtol=0)
def test_number_expectation_repeated_modes(self, setup_backend, tol):
"""Tests that the correct exception is raised for repeated modes"""
backend = setup_backend(2)
state = backend.state()
with pytest.raises(ValueError, match="There can be no duplicates in the modes specified."):
state.number_expectation([0, 0])
def test_number_expectation_two_mode_squeezed(self, setup_backend, tol, batch_size):
"""Tests the expectation value of photon numbers when there is correlation"""
if batch_size is not None:
pytest.skip("Does not support batch mode")
backend = setup_backend(3)
state = backend.state()
r = 0.2
phi = 0.0
backend.prepare_squeezed_state(r, phi, 0)
backend.prepare_squeezed_state(-r, phi, 2)
backend.beamsplitter(np.pi / 4, np.pi, 0, 2)
state = backend.state()
nbar = np.sinh(r) ** 2
res = state.number_expectation([2, 0])
assert np.allclose(res[0], 2 * nbar**2 + nbar, atol=tol, rtol=0)
res = state.number_expectation([0])
assert np.allclose(res[0], nbar, atol=tol, rtol=0)
res = state.number_expectation([2])
assert np.allclose(res[0], nbar, atol=tol, rtol=0)
def test_number_expectation_photon_displaced_thermal(self, setup_backend, tol, batch_size):
"""Test that E(n)=|a|^2+nbar and var(n)=var_th+|a|^2(1+2nbar)
for uncorrelated displaced thermal states."""
if batch_size is not None:
pytest.skip("Does not support batch mode")
backend = setup_backend(2)
a0 = 0.1
nbar0 = 0.123
a1 = 0.05
nbar1 = 0.21
backend.prepare_thermal_state(nbar0, 0)
backend.displacement(a0, 0.0, 0)
backend.prepare_thermal_state(nbar1, 1)
backend.displacement(a1, 0.0, 1)
state = backend.state()
res = state.number_expectation([0])
mean0 = np.abs(a0) ** 2 + nbar0
var0 = nbar0**2 + nbar0 + np.abs(a0) ** 2 * (1 + 2 * nbar0)
assert np.allclose(res[0], mean0, atol=tol, rtol=0)
assert np.allclose(res[1], var0, atol=tol, rtol=0.01)
res = state.number_expectation([1])
mean1 = np.abs(a1) ** 2 + nbar1
var1 = nbar1**2 + nbar1 + np.abs(a1) ** 2 * (1 + 2 * nbar1)
assert np.allclose(res[0], mean1, atol=tol, rtol=0)
assert np.allclose(res[1], var1, atol=tol, rtol=0.01)
# test uncorrelated result
res = state.number_expectation([0, 1])
expected_mean = mean0 * mean1
assert np.allclose(res[0], expected_mean, atol=tol, rtol=0)
expected_var = mean0**2 * var1 + mean1**2 * var0 + var0 * var1
assert np.allclose(res[1], expected_var, atol=tol, rtol=0.01)
@pytest.mark.backends("fock", "tf")
def test_number_expectation_four_modes(self, setup_backend, tol, batch_size):
"""Tests the expectation value of photon numbers when there is correlation"""
if batch_size is not None:
pytest.skip("Does not support batch mode")
backend = setup_backend(4)
state = backend.state()
r = 0.2
phi = 0.0
backend.prepare_squeezed_state(r, phi, 0)
backend.prepare_squeezed_state(r, phi + np.pi, 1)
backend.beamsplitter(np.pi / 4, np.pi, 0, 1)
backend.prepare_squeezed_state(r, phi, 2)
backend.prepare_squeezed_state(r, phi + np.pi, 3)
backend.beamsplitter(np.pi / 4, np.pi, 2, 3)
state = backend.state()
nbar = np.sinh(r) ** 2
assert np.allclose(
state.number_expectation([0, 1, 2, 3])[0],
(2 * nbar**2 + nbar) ** 2,
atol=tol,
rtol=0,
)
assert np.allclose(
state.number_expectation([0, 1, 3])[0],
nbar * (2 * nbar**2 + nbar),
atol=tol,
rtol=0,
)
assert np.allclose(
state.number_expectation([3, 1, 2])[0],
nbar * (2 * nbar**2 + nbar),
atol=tol,
rtol=0,
)
class TestParityExpectation:
@pytest.mark.backends("fock", "tf")
def test_parity_fock(self, setup_backend, tol, batch_size):
"""Tests the parity operator for an even superposition of the first two number states"""
if batch_size is not None:
pytest.skip("Does not support batch mode")
backend = setup_backend(2)
state = backend.state()
n1 = 3
n2 = 2
backend.prepare_fock_state(n1, 0)
backend.prepare_fock_state(n2, 1)
backend.beamsplitter(np.pi / 4, 0, 0, 1)
state = backend.state()
assert np.allclose(state.parity_expectation([0]), 0, atol=tol, rtol=0)
@pytest.mark.backends("fock", "tf")
def test_two_mode_fock(self, setup_backend, tol, batch_size):
"""Tests the product of parity operators for two number states"""
if batch_size is not None:
pytest.skip("Does not support batch mode")
backend = setup_backend(2)
state = backend.state()
n1 = 3
n2 = 5
backend.prepare_fock_state(n1, 0)
backend.prepare_fock_state(n2, 1)
state = backend.state()
assert np.allclose(state.parity_expectation([0, 1]), 1, atol=tol, rtol=0)
def test_coherent(self, setup_backend, tol, batch_size):
"""Tests the parity operator for a coherent state"""
if batch_size is not None:
pytest.skip("Does not support batch mode")
backend = setup_backend(1)
state = backend.state()
r = 0.2
backend.prepare_coherent_state(r, 0, 0)
state = backend.state()
assert np.allclose(
state.parity_expectation([0]), np.exp(-2 * (np.abs(r) ** 2)), atol=tol, rtol=0
)
def test_squeezed(self, setup_backend, tol, batch_size):
"""Tests the parity operator for a squeezed state"""
if batch_size is not None:
pytest.skip("Does not support batch mode")
backend = setup_backend(1)
state = backend.state()
r = 0.2
phi = 0
backend.prepare_squeezed_state(r, phi, 0)
state = backend.state()
assert np.allclose(state.parity_expectation([0]), 1, atol=tol, rtol=0)
def test_two_mode_squeezed(self, setup_backend, tol, batch_size):
"""Tests the parity operator for a two-mode squeezed state"""
if batch_size is not None:
pytest.skip("Does not support batch mode")
backend = setup_backend(2)
state = backend.state()
r = 0.2
phi = 0
backend.beamsplitter(np.sqrt(0.5), -np.sqrt(0.5), 0, 1)
backend.prepare_squeezed_state(r, phi, 0)
backend.prepare_squeezed_state(-1 * r, phi, 1)
backend.beamsplitter(np.sqrt(0.5), -np.sqrt(0.5), 0, 1)
state = backend.state()
assert np.allclose(state.parity_expectation([0, 1]), 1, atol=tol, rtol=0)
@pytest.mark.backends("fock", "tf")
def test_thermal(self, setup_backend, tol, batch_size):
"""Tests the parity operator for a thermal state"""
if batch_size is not None:
pytest.skip("Does not support batch mode")
backend = setup_backend(1)
state = backend.state()
m = 0.2
backend.prepare_thermal_state(m, 0)
state = backend.state()
assert np.allclose(state.parity_expectation([0]), (1 / ((2 * m) + 1)), atol=tol, rtol=0)
@pytest.mark.backends("fock", "tf", "gaussian")
class TestFidelities:
"""Fidelity tests."""
def test_vacuum(self, setup_backend, tol):
backend = setup_backend(2)
state = backend.state()
assert np.allclose(state.fidelity_vacuum(), 1, atol=tol, rtol=0)
def test_coherent_fidelity(self, setup_backend, cutoff, tol, hbar):
backend = setup_backend(2)
backend.prepare_coherent_state(np.abs(a), np.angle(a), 0)
backend.displacement(np.abs(a), np.angle(a), 1)
state = backend.state()
if isinstance(backend, backends.BaseFock):
in_state = utils.coherent_state(
np.abs(a), np.angle(a), basis="fock", fock_dim=cutoff, hbar=hbar
)
else:
in_state = utils.coherent_state(np.abs(a), np.angle(a), basis="gaussian", hbar=hbar)
assert np.allclose(state.fidelity(in_state, 0), 1, atol=tol, rtol=0)
assert np.allclose(state.fidelity(in_state, 1), 1, atol=tol, rtol=0)
assert np.allclose(state.fidelity_coherent([a, a]), 1, atol=tol, rtol=0)
def test_squeezed_fidelity(self, setup_backend, cutoff, tol, hbar):
backend = setup_backend(2)
backend.prepare_squeezed_state(r, phi, 0)
backend.squeeze(r, phi, 1)
state = backend.state()
if isinstance(backend, backends.BaseFock):
in_state = utils.squeezed_state(r, phi, basis="fock", fock_dim=cutoff, hbar=hbar)
else:
in_state = utils.squeezed_state(r, phi, basis="gaussian", hbar=hbar)
assert np.allclose(state.fidelity(in_state, 0), 1, atol=tol, rtol=0)
assert np.allclose(state.fidelity(in_state, 1), 1, atol=tol, rtol=0)
def test_squeezed_coherent_fidelity(self, setup_backend, cutoff, tol, hbar):
backend = setup_backend(2)
backend.prepare_displaced_squeezed_state(np.abs(a), np.angle(a), r, phi, 0)
backend.squeeze(r, phi, 1)
backend.displacement(np.abs(a), np.angle(a), 1)
state = backend.state()
if isinstance(backend, backends.BaseFock):
in_state = utils.displaced_squeezed_state(
np.abs(a), np.angle(a), r, phi, basis="fock", fock_dim=cutoff, hbar=hbar
)
else:
in_state = utils.displaced_squeezed_state(
np.abs(a), np.angle(a), r, phi, basis="gaussian", hbar=hbar
)
assert np.allclose(state.fidelity(in_state, 0), 1, atol=tol, rtol=0)
assert np.allclose(state.fidelity(in_state, 1), 1, atol=tol, rtol=0)
@pytest.mark.backends("bosonic")
class TestBosonicState:
"""Tests the bosonic state class."""
def test_weights(self, setup_backend):
"""Test weights are correct for a Gaussian state represented using the bosonic backend."""
backend = setup_backend(1)
backend.prepare_coherent_state(1, 0, 0)
weights = backend.state().weights()
assert len(weights) == 1
assert weights == np.array([1])
assert sum(weights) == 1
def test_bosonic_purity(self, setup_backend, tol):
"""Test purities are correct for Gaussian states represented using the bosonic backend."""
backend = setup_backend(1)
backend.prepare_coherent_state(r, phi, 0)
purity1 = backend.state().purity()
backend.reset()
backend.prepare_thermal_state(1, 0)
purity2 = backend.state().purity()
assert np.allclose(purity1, 1, atol=tol, rtol=0)
assert purity2 < 1
def test_displacement(self, setup_backend, tol):
"""Tests average displacement calculation."""
backend = setup_backend(1)
backend.prepare_coherent_state(r, phi, 0)
disp = backend.state().displacement(0)
assert np.allclose(r * np.cos(phi) + 1j * r * np.sin(phi), disp)
def test_density_matrix(self, setup_backend, tol):
"""Tests vacuum density matrix."""
backend = setup_backend(1)
backend.prepare_vacuum_state(0)
dm = backend.state().dm()
dm_compare = np.zeros((10, 10))
dm_compare[0, 0] = 1
assert np.allclose(dm, dm_compare)
def test_is_vacuum(self, setup_backend):
"""Tests fidelity_vacuum method in BaseBosonicState."""
backend = setup_backend(1)
backend.prepare_vacuum_state(0)
assert np.allclose(backend.state().fidelity_vacuum(), 1)
backend.del_mode(0)
assert np.allclose(backend.state().fidelity_vacuum(), 1)
| [
"numpy.sqrt",
"scipy.special.factorial",
"numpy.sinh",
"numpy.array",
"numpy.sin",
"pytest.mark.backends",
"numpy.arange",
"numpy.exp",
"pytest.skip",
"numpy.abs",
"numpy.tile",
"numpy.allclose",
"numpy.conj",
"strawberryfields.utils.squeezed_state",
"pytest.raises",
"numpy.cos",
"nu... | [((6225, 6271), 'pytest.mark.backends', 'pytest.mark.backends', (['"""fock"""', '"""tf"""', '"""gaussian"""'], {}), "('fock', 'tf', 'gaussian')\n", (6245, 6271), False, 'import pytest\n'), ((8725, 8757), 'pytest.mark.backends', 'pytest.mark.backends', (['"""gaussian"""'], {}), "('gaussian')\n", (8745, 8757), False, 'import pytest\n'), ((11643, 11689), 'pytest.mark.backends', 'pytest.mark.backends', (['"""fock"""', '"""tf"""', '"""gaussian"""'], {}), "('fock', 'tf', 'gaussian')\n", (11663, 11689), False, 'import pytest\n'), ((21828, 21874), 'pytest.mark.backends', 'pytest.mark.backends', (['"""fock"""', '"""tf"""', '"""gaussian"""'], {}), "('fock', 'tf', 'gaussian')\n", (21848, 21874), False, 'import pytest\n'), ((24384, 24415), 'pytest.mark.backends', 'pytest.mark.backends', (['"""bosonic"""'], {}), "('bosonic')\n", (24404, 24415), False, 'import pytest\n'), ((17128, 17162), 'pytest.mark.backends', 'pytest.mark.backends', (['"""fock"""', '"""tf"""'], {}), "('fock', 'tf')\n", (17148, 17162), False, 'import pytest\n'), ((18464, 18498), 'pytest.mark.backends', 'pytest.mark.backends', (['"""fock"""', '"""tf"""'], {}), "('fock', 'tf')\n", (18484, 18498), False, 'import pytest\n'), ((19098, 19132), 'pytest.mark.backends', 'pytest.mark.backends', (['"""fock"""', '"""tf"""'], {}), "('fock', 'tf')\n", (19118, 19132), False, 'import pytest\n'), ((21323, 21357), 'pytest.mark.backends', 'pytest.mark.backends', (['"""fock"""', '"""tf"""'], {}), "('fock', 'tf')\n", (21343, 21357), False, 'import pytest\n'), ((2197, 2232), 'numpy.allclose', 'np.allclose', (['f', '(1)'], {'atol': 'tol', 'rtol': '(0)'}), '(f, 1, atol=tol, rtol=0)\n', (2208, 2232), True, 'import numpy as np\n'), ((2682, 2699), 'numpy.arange', 'np.arange', (['cutoff'], {}), '(cutoff)\n', (2691, 2699), True, 'import numpy as np\n'), ((4793, 4802), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (4799, 4802), True, 'import numpy as np\n'), ((4819, 4830), 'numpy.angle', 'np.angle', (['a'], {}), '(a)\n', (4827, 4830), True, 'import numpy as np\n'), ((5393, 5444), 'numpy.allclose', 'np.allclose', (['mean_photon', 'mean_ex'], {'atol': 'tol', 'rtol': '(0)'}), '(mean_photon, mean_ex, atol=tol, rtol=0)\n', (5404, 5444), True, 'import numpy as np\n'), ((5460, 5502), 'numpy.allclose', 'np.allclose', (['var', 'var_ex'], {'atol': 'tol', 'rtol': '(0)'}), '(var, var_ex, atol=tol, rtol=0)\n', (5471, 5502), True, 'import numpy as np\n'), ((6112, 6163), 'numpy.allclose', 'np.allclose', (['mean_photon', 'mean_ex'], {'atol': 'tol', 'rtol': '(0)'}), '(mean_photon, mean_ex, atol=tol, rtol=0)\n', (6123, 6163), True, 'import numpy as np\n'), ((6179, 6221), 'numpy.allclose', 'np.allclose', (['var', 'var_ex'], {'atol': 'tol', 'rtol': '(0)'}), '(var, var_ex, atol=tol, rtol=0)\n', (6190, 6221), True, 'import numpy as np\n'), ((6879, 6896), 'numpy.arange', 'np.arange', (['cutoff'], {}), '(cutoff)\n', (6888, 6896), True, 'import numpy as np\n'), ((7112, 7157), 'numpy.allclose', 'np.allclose', (['rdm', 'rdm_exact'], {'atol': 'tol', 'rtol': '(0)'}), '(rdm, rdm_exact, atol=tol, rtol=0)\n', (7123, 7157), True, 'import numpy as np\n'), ((7699, 7716), 'numpy.arange', 'np.arange', (['cutoff'], {}), '(cutoff)\n', (7708, 7716), True, 'import numpy as np\n'), ((7891, 7935), 'numpy.allclose', 'np.allclose', (['ket', 'expected'], {'atol': 'tol', 'rtol': '(0)'}), '(ket, expected, atol=tol, rtol=0)\n', (7902, 7935), True, 'import numpy as np\n'), ((8407, 8448), 'numpy.allclose', 'np.allclose', (['rho1', 'rho2'], {'atol': 'tol', 'rtol': '(0)'}), '(rho1, rho2, atol=tol, rtol=0)\n', (8418, 8448), True, 'import numpy as np\n'), ((8462, 8479), 'numpy.arange', 'np.arange', (['cutoff'], {}), '(cutoff)\n', (8471, 8479), True, 'import numpy as np\n'), ((8499, 8535), 'numpy.diag', 'np.diag', (['(r ** n / (1 + r) ** (n + 1))'], {}), '(r ** n / (1 + r) ** (n + 1))\n', (8506, 8535), True, 'import numpy as np\n'), ((8676, 8721), 'numpy.allclose', 'np.allclose', (['rho1', 'expected'], {'atol': 'tol', 'rtol': '(0)'}), '(rho1, expected, atol=tol, rtol=0)\n', (8687, 8721), True, 'import numpy as np\n'), ((9477, 9516), 'numpy.all', 'np.all', (['(coherent_check == [True, False])'], {}), '(coherent_check == [True, False])\n', (9483, 9516), True, 'import numpy as np\n'), ((9532, 9583), 'numpy.allclose', 'np.allclose', (['alpha_list', '[a, 0.0]'], {'atol': 'tol', 'rtol': '(0)'}), '(alpha_list, [a, 0.0], atol=tol, rtol=0)\n', (9543, 9583), True, 'import numpy as np\n'), ((10136, 10176), 'numpy.all', 'np.all', (['(squeezing_check == [False, True])'], {}), '(squeezing_check == [False, True])\n', (10142, 10176), True, 'import numpy as np\n'), ((10192, 10253), 'numpy.allclose', 'np.allclose', (['z_list', '[[0.0, 0.0], [r, phi]]'], {'atol': 'tol', 'rtol': '(0)'}), '(z_list, [[0.0, 0.0], [r, phi]], atol=tol, rtol=0)\n', (10203, 10253), True, 'import numpy as np\n'), ((10617, 10642), 'numpy.array', 'np.array', (['[0, hbar / 2.0]'], {}), '([0, hbar / 2.0])\n', (10625, 10642), True, 'import numpy as np\n'), ((11435, 11466), 'numpy.array', 'np.array', (['[xphi_mean, xphi_var]'], {}), '([xphi_mean, xphi_var])\n', (11443, 11466), True, 'import numpy as np\n'), ((13803, 13849), 'numpy.allclose', 'np.allclose', (['res[0]', '(n0 * n1)'], {'atol': 'tol', 'rtol': '(0)'}), '(res[0], n0 * n1, atol=tol, rtol=0)\n', (13814, 13849), True, 'import numpy as np\n'), ((13865, 13907), 'numpy.allclose', 'np.allclose', (['res[1]', 'var'], {'atol': 'tol', 'rtol': '(0)'}), '(res[1], var, atol=tol, rtol=0)\n', (13876, 13907), True, 'import numpy as np\n'), ((14017, 14058), 'numpy.allclose', 'np.allclose', (['res[0]', 'n0'], {'atol': 'tol', 'rtol': '(0)'}), '(res[0], n0, atol=tol, rtol=0)\n', (14028, 14058), True, 'import numpy as np\n'), ((14074, 14116), 'numpy.allclose', 'np.allclose', (['res[1]', 'var'], {'atol': 'tol', 'rtol': '(0)'}), '(res[1], var, atol=tol, rtol=0)\n', (14085, 14116), True, 'import numpy as np\n'), ((14226, 14267), 'numpy.allclose', 'np.allclose', (['res[0]', 'n1'], {'atol': 'tol', 'rtol': '(0)'}), '(res[0], n1, atol=tol, rtol=0)\n', (14237, 14267), True, 'import numpy as np\n'), ((14283, 14325), 'numpy.allclose', 'np.allclose', (['res[1]', 'var'], {'atol': 'tol', 'rtol': '(0)'}), '(res[1], var, atol=tol, rtol=0)\n', (14294, 14325), True, 'import numpy as np\n'), ((15336, 15395), 'numpy.allclose', 'np.allclose', (['res[0]', '(2 * nbar ** 2 + nbar)'], {'atol': 'tol', 'rtol': '(0)'}), '(res[0], 2 * nbar ** 2 + nbar, atol=tol, rtol=0)\n', (15347, 15395), True, 'import numpy as np\n'), ((15454, 15497), 'numpy.allclose', 'np.allclose', (['res[0]', 'nbar'], {'atol': 'tol', 'rtol': '(0)'}), '(res[0], nbar, atol=tol, rtol=0)\n', (15465, 15497), True, 'import numpy as np\n'), ((15558, 15601), 'numpy.allclose', 'np.allclose', (['res[0]', 'nbar'], {'atol': 'tol', 'rtol': '(0)'}), '(res[0], nbar, atol=tol, rtol=0)\n', (15569, 15601), True, 'import numpy as np\n'), ((16408, 16452), 'numpy.allclose', 'np.allclose', (['res[0]', 'mean0'], {'atol': 'tol', 'rtol': '(0)'}), '(res[0], mean0, atol=tol, rtol=0)\n', (16419, 16452), True, 'import numpy as np\n'), ((16468, 16514), 'numpy.allclose', 'np.allclose', (['res[1]', 'var0'], {'atol': 'tol', 'rtol': '(0.01)'}), '(res[1], var0, atol=tol, rtol=0.01)\n', (16479, 16514), True, 'import numpy as np\n'), ((16684, 16728), 'numpy.allclose', 'np.allclose', (['res[0]', 'mean1'], {'atol': 'tol', 'rtol': '(0)'}), '(res[0], mean1, atol=tol, rtol=0)\n', (16695, 16728), True, 'import numpy as np\n'), ((16744, 16790), 'numpy.allclose', 'np.allclose', (['res[1]', 'var1'], {'atol': 'tol', 'rtol': '(0.01)'}), '(res[1], var1, atol=tol, rtol=0.01)\n', (16755, 16790), True, 'import numpy as np\n'), ((16927, 16979), 'numpy.allclose', 'np.allclose', (['res[0]', 'expected_mean'], {'atol': 'tol', 'rtol': '(0)'}), '(res[0], expected_mean, atol=tol, rtol=0)\n', (16938, 16979), True, 'import numpy as np\n'), ((17067, 17121), 'numpy.allclose', 'np.allclose', (['res[1]', 'expected_var'], {'atol': 'tol', 'rtol': '(0.01)'}), '(res[1], expected_var, atol=tol, rtol=0.01)\n', (17078, 17121), True, 'import numpy as np\n'), ((25266, 25307), 'numpy.allclose', 'np.allclose', (['purity1', '(1)'], {'atol': 'tol', 'rtol': '(0)'}), '(purity1, 1, atol=tol, rtol=0)\n', (25277, 25307), True, 'import numpy as np\n'), ((25877, 25895), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (25885, 25895), True, 'import numpy as np\n'), ((25940, 25967), 'numpy.allclose', 'np.allclose', (['dm', 'dm_compare'], {}), '(dm, dm_compare)\n', (25951, 25967), True, 'import numpy as np\n'), ((2022, 2031), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (2028, 2031), True, 'import numpy as np\n'), ((2033, 2044), 'numpy.angle', 'np.angle', (['a'], {}), '(a)\n', (2041, 2044), True, 'import numpy as np\n'), ((2476, 2485), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (2482, 2485), True, 'import numpy as np\n'), ((2487, 2498), 'numpy.angle', 'np.angle', (['a'], {}), '(a)\n', (2495, 2498), True, 'import numpy as np\n'), ((2795, 2812), 'numpy.abs', 'np.abs', (['ref_state'], {}), '(ref_state)\n', (2801, 2812), True, 'import numpy as np\n'), ((2878, 2908), 'numpy.tile', 'np.tile', (['ref_probs', 'batch_size'], {}), '(ref_probs, batch_size)\n', (2885, 2908), True, 'import numpy as np\n'), ((3280, 3322), 'pytest.skip', 'pytest.skip', (['"""Does not support batch mode"""'], {}), "('Does not support batch mode')\n", (3291, 3322), False, 'import pytest\n'), ((3388, 3397), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (3394, 3397), True, 'import numpy as np\n'), ((3399, 3410), 'numpy.angle', 'np.angle', (['a'], {}), '(a)\n', (3407, 3410), True, 'import numpy as np\n'), ((3851, 3893), 'pytest.skip', 'pytest.skip', (['"""Does not support batch mode"""'], {}), "('Does not support batch mode')\n", (3862, 3893), False, 'import pytest\n'), ((4461, 4503), 'pytest.skip', 'pytest.skip', (['"""Does not support batch mode"""'], {}), "('Does not support batch mode')\n", (4472, 4503), False, 'import pytest\n'), ((4669, 4678), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (4675, 4678), True, 'import numpy as np\n'), ((4680, 4691), 'numpy.angle', 'np.angle', (['a'], {}), '(a)\n', (4688, 4691), True, 'import numpy as np\n'), ((4860, 4869), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (4866, 4869), True, 'import numpy as np\n'), ((5706, 5748), 'pytest.skip', 'pytest.skip', (['"""Does not support batch mode"""'], {}), "('Does not support batch mode')\n", (5717, 5748), False, 'import pytest\n'), ((5883, 5892), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (5889, 5892), True, 'import numpy as np\n'), ((5894, 5905), 'numpy.angle', 'np.angle', (['a'], {}), '(a)\n', (5902, 5905), True, 'import numpy as np\n'), ((6707, 6716), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (6713, 6716), True, 'import numpy as np\n'), ((6718, 6729), 'numpy.angle', 'np.angle', (['a'], {}), '(a)\n', (6726, 6729), True, 'import numpy as np\n'), ((7060, 7095), 'numpy.tile', 'np.tile', (['rdm_exact', '[batch_size, 1]'], {}), '(rdm_exact, [batch_size, 1])\n', (7067, 7095), True, 'import numpy as np\n'), ((7370, 7379), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (7376, 7379), True, 'import numpy as np\n'), ((7381, 7392), 'numpy.angle', 'np.angle', (['a'], {}), '(a)\n', (7389, 7392), True, 'import numpy as np\n'), ((7542, 7590), 'pytest.skip', 'pytest.skip', (['"""Test only works with pure states."""'], {}), "('Test only works with pure states.')\n", (7553, 7590), False, 'import pytest\n'), ((7845, 7874), 'numpy.tile', 'np.tile', (['ket', '[batch_size, 1]'], {}), '(ket, [batch_size, 1])\n', (7852, 7874), True, 'import numpy as np\n'), ((9196, 9205), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (9202, 9205), True, 'import numpy as np\n'), ((9207, 9218), 'numpy.angle', 'np.angle', (['a'], {}), '(a)\n', (9215, 9218), True, 'import numpy as np\n'), ((9850, 9859), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (9856, 9859), True, 'import numpy as np\n'), ((9861, 9872), 'numpy.angle', 'np.angle', (['a'], {}), '(a)\n', (9869, 9872), True, 'import numpy as np\n'), ((10703, 10733), 'numpy.tile', 'np.tile', (['res_exact', 'batch_size'], {}), '(res_exact, batch_size)\n', (10710, 10733), True, 'import numpy as np\n'), ((11107, 11116), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (11113, 11116), True, 'import numpy as np\n'), ((11118, 11129), 'numpy.angle', 'np.angle', (['a'], {}), '(a)\n', (11126, 11129), True, 'import numpy as np\n'), ((11308, 11325), 'numpy.sqrt', 'np.sqrt', (['(2 * hbar)'], {}), '(2 * hbar)\n', (11315, 11325), True, 'import numpy as np\n'), ((11527, 11557), 'numpy.tile', 'np.tile', (['res_exact', 'batch_size'], {}), '(res_exact, batch_size)\n', (11534, 11557), True, 'import numpy as np\n'), ((12021, 12063), 'pytest.skip', 'pytest.skip', (['"""Does not support batch mode"""'], {}), "('Does not support batch mode')\n", (12032, 12063), False, 'import pytest\n'), ((12646, 12688), 'pytest.skip', 'pytest.skip', (['"""Does not support batch mode"""'], {}), "('Does not support batch mode')\n", (12657, 12688), False, 'import pytest\n'), ((12933, 12943), 'numpy.abs', 'np.abs', (['a0'], {}), '(a0)\n', (12939, 12943), True, 'import numpy as np\n'), ((12945, 12957), 'numpy.angle', 'np.angle', (['a0'], {}), '(a0)\n', (12953, 12957), True, 'import numpy as np\n'), ((13021, 13031), 'numpy.abs', 'np.abs', (['a1'], {}), '(a1)\n', (13027, 13031), True, 'import numpy as np\n'), ((13033, 13045), 'numpy.angle', 'np.angle', (['a1'], {}), '(a1)\n', (13041, 13045), True, 'import numpy as np\n'), ((14557, 14647), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""There can be no duplicates in the modes specified."""'}), "(ValueError, match=\n 'There can be no duplicates in the modes specified.')\n", (14570, 14647), False, 'import pytest\n'), ((14912, 14954), 'pytest.skip', 'pytest.skip', (['"""Does not support batch mode"""'], {}), "('Does not support batch mode')\n", (14923, 14954), False, 'import pytest\n'), ((15257, 15267), 'numpy.sinh', 'np.sinh', (['r'], {}), '(r)\n', (15264, 15267), True, 'import numpy as np\n'), ((15870, 15912), 'pytest.skip', 'pytest.skip', (['"""Does not support batch mode"""'], {}), "('Does not support batch mode')\n", (15881, 15912), False, 'import pytest\n'), ((17378, 17420), 'pytest.skip', 'pytest.skip', (['"""Does not support batch mode"""'], {}), "('Does not support batch mode')\n", (17389, 17420), False, 'import pytest\n'), ((17892, 17902), 'numpy.sinh', 'np.sinh', (['r'], {}), '(r)\n', (17899, 17902), True, 'import numpy as np\n'), ((18707, 18749), 'pytest.skip', 'pytest.skip', (['"""Does not support batch mode"""'], {}), "('Does not support batch mode')\n", (18718, 18749), False, 'import pytest\n'), ((19320, 19362), 'pytest.skip', 'pytest.skip', (['"""Does not support batch mode"""'], {}), "('Does not support batch mode')\n", (19331, 19362), False, 'import pytest\n'), ((19829, 19871), 'pytest.skip', 'pytest.skip', (['"""Does not support batch mode"""'], {}), "('Does not support batch mode')\n", (19840, 19871), False, 'import pytest\n'), ((20335, 20377), 'pytest.skip', 'pytest.skip', (['"""Does not support batch mode"""'], {}), "('Does not support batch mode')\n", (20346, 20377), False, 'import pytest\n'), ((20827, 20869), 'pytest.skip', 'pytest.skip', (['"""Does not support batch mode"""'], {}), "('Does not support batch mode')\n", (20838, 20869), False, 'import pytest\n'), ((20998, 21010), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (21005, 21010), True, 'import numpy as np\n'), ((21167, 21179), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (21174, 21179), True, 'import numpy as np\n'), ((21525, 21567), 'pytest.skip', 'pytest.skip', (['"""Does not support batch mode"""'], {}), "('Does not support batch mode')\n", (21536, 21567), False, 'import pytest\n'), ((22258, 22267), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (22264, 22267), True, 'import numpy as np\n'), ((22269, 22280), 'numpy.angle', 'np.angle', (['a'], {}), '(a)\n', (22277, 22280), True, 'import numpy as np\n'), ((22314, 22323), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (22320, 22323), True, 'import numpy as np\n'), ((22325, 22336), 'numpy.angle', 'np.angle', (['a'], {}), '(a)\n', (22333, 22336), True, 'import numpy as np\n'), ((23212, 23282), 'strawberryfields.utils.squeezed_state', 'utils.squeezed_state', (['r', 'phi'], {'basis': '"""fock"""', 'fock_dim': 'cutoff', 'hbar': 'hbar'}), "(r, phi, basis='fock', fock_dim=cutoff, hbar=hbar)\n", (23232, 23282), False, 'from strawberryfields import utils\n'), ((23320, 23377), 'strawberryfields.utils.squeezed_state', 'utils.squeezed_state', (['r', 'phi'], {'basis': '"""gaussian"""', 'hbar': 'hbar'}), "(r, phi, basis='gaussian', hbar=hbar)\n", (23340, 23377), False, 'from strawberryfields import utils\n'), ((23699, 23708), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (23705, 23708), True, 'import numpy as np\n'), ((23710, 23721), 'numpy.angle', 'np.angle', (['a'], {}), '(a)\n', (23718, 23721), True, 'import numpy as np\n'), ((23798, 23807), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (23804, 23807), True, 'import numpy as np\n'), ((23809, 23820), 'numpy.angle', 'np.angle', (['a'], {}), '(a)\n', (23817, 23820), True, 'import numpy as np\n'), ((24810, 24823), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (24818, 24823), True, 'import numpy as np\n'), ((2767, 2773), 'scipy.special.factorial', 'fac', (['n'], {}), '(n)\n', (2770, 2773), True, 'from scipy.special import factorial as fac\n'), ((3536, 3545), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (3542, 3545), True, 'import numpy as np\n'), ((3602, 3611), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (3608, 3611), True, 'import numpy as np\n'), ((4126, 4136), 'numpy.sinh', 'np.sinh', (['r'], {}), '(r)\n', (4133, 4136), True, 'import numpy as np\n'), ((4914, 4924), 'numpy.sinh', 'np.sinh', (['r'], {}), '(r)\n', (4921, 4924), True, 'import numpy as np\n'), ((5352, 5366), 'numpy.sinh', 'np.sinh', (['(2 * r)'], {}), '(2 * r)\n', (5359, 5366), True, 'import numpy as np\n'), ((6009, 6018), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (6015, 6018), True, 'import numpy as np\n'), ((6958, 6964), 'scipy.special.factorial', 'fac', (['n'], {}), '(n)\n', (6961, 6964), True, 'from scipy.special import factorial as fac\n'), ((7783, 7789), 'scipy.special.factorial', 'fac', (['n'], {}), '(n)\n', (7786, 7789), True, 'from scipy.special import factorial as fac\n'), ((13105, 13116), 'numpy.sinh', 'np.sinh', (['r0'], {}), '(r0)\n', (13112, 13116), True, 'import numpy as np\n'), ((13124, 13134), 'numpy.abs', 'np.abs', (['a0'], {}), '(a0)\n', (13130, 13134), True, 'import numpy as np\n'), ((13153, 13164), 'numpy.sinh', 'np.sinh', (['r1'], {}), '(r1)\n', (13160, 13164), True, 'import numpy as np\n'), ((13172, 13182), 'numpy.abs', 'np.abs', (['a1'], {}), '(a1)\n', (13178, 13182), True, 'import numpy as np\n'), ((13258, 13267), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (13264, 13267), True, 'import numpy as np\n'), ((16301, 16311), 'numpy.abs', 'np.abs', (['a0'], {}), '(a0)\n', (16307, 16311), True, 'import numpy as np\n'), ((16577, 16587), 'numpy.abs', 'np.abs', (['a1'], {}), '(a1)\n', (16583, 16587), True, 'import numpy as np\n'), ((21013, 21025), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (21020, 21025), True, 'import numpy as np\n'), ((21182, 21194), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (21189, 21194), True, 'import numpy as np\n'), ((22486, 22495), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (22492, 22495), True, 'import numpy as np\n'), ((22497, 22508), 'numpy.angle', 'np.angle', (['a'], {}), '(a)\n', (22505, 22508), True, 'import numpy as np\n'), ((22623, 22632), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (22629, 22632), True, 'import numpy as np\n'), ((22634, 22645), 'numpy.angle', 'np.angle', (['a'], {}), '(a)\n', (22642, 22645), True, 'import numpy as np\n'), ((23980, 23989), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (23986, 23989), True, 'import numpy as np\n'), ((23991, 24002), 'numpy.angle', 'np.angle', (['a'], {}), '(a)\n', (23999, 24002), True, 'import numpy as np\n'), ((24152, 24161), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (24158, 24161), True, 'import numpy as np\n'), ((24163, 24174), 'numpy.angle', 'np.angle', (['a'], {}), '(a)\n', (24171, 24174), True, 'import numpy as np\n'), ((5326, 5336), 'numpy.cosh', 'np.cosh', (['r'], {}), '(r)\n', (5333, 5336), True, 'import numpy as np\n'), ((5339, 5349), 'numpy.sinh', 'np.sinh', (['r'], {}), '(r)\n', (5346, 5349), True, 'import numpy as np\n'), ((6065, 6074), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (6071, 6074), True, 'import numpy as np\n'), ((8597, 8631), 'numpy.tile', 'np.tile', (['expected', '[batch_size, 1]'], {}), '(expected, [batch_size, 1])\n', (8604, 8631), True, 'import numpy as np\n'), ((11268, 11280), 'numpy.cos', 'np.cos', (['qphi'], {}), '(qphi)\n', (11274, 11280), True, 'import numpy as np\n'), ((11292, 11304), 'numpy.sin', 'np.sin', (['qphi'], {}), '(qphi)\n', (11298, 11304), True, 'import numpy as np\n'), ((11346, 11360), 'numpy.cosh', 'np.cosh', (['(2 * r)'], {}), '(2 * r)\n', (11353, 11360), True, 'import numpy as np\n'), ((13593, 13607), 'numpy.sinh', 'np.sinh', (['(2 * r)'], {}), '(2 * r)\n', (13600, 13607), True, 'import numpy as np\n'), ((16359, 16369), 'numpy.abs', 'np.abs', (['a0'], {}), '(a0)\n', (16365, 16369), True, 'import numpy as np\n'), ((16635, 16645), 'numpy.abs', 'np.abs', (['a1'], {}), '(a1)\n', (16641, 16645), True, 'import numpy as np\n'), ((25606, 25617), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (25612, 25617), True, 'import numpy as np\n'), ((25629, 25640), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (25635, 25640), True, 'import numpy as np\n'), ((4198, 4208), 'numpy.sinh', 'np.sinh', (['r'], {}), '(r)\n', (4205, 4208), True, 'import numpy as np\n'), ((4216, 4226), 'numpy.sinh', 'np.sinh', (['r'], {}), '(r)\n', (4223, 4226), True, 'import numpy as np\n'), ((5222, 5232), 'numpy.sinh', 'np.sinh', (['r'], {}), '(r)\n', (5229, 5232), True, 'import numpy as np\n'), ((11363, 11385), 'numpy.cos', 'np.cos', (['(phi - 2 * qphi)'], {}), '(phi - 2 * qphi)\n', (11369, 11385), True, 'import numpy as np\n'), ((11388, 11402), 'numpy.sinh', 'np.sinh', (['(2 * r)'], {}), '(2 * r)\n', (11395, 11402), True, 'import numpy as np\n'), ((13533, 13543), 'numpy.sinh', 'np.sinh', (['r'], {}), '(r)\n', (13540, 13543), True, 'import numpy as np\n'), ((13567, 13577), 'numpy.cosh', 'np.cosh', (['r'], {}), '(r)\n', (13574, 13577), True, 'import numpy as np\n'), ((13580, 13590), 'numpy.sinh', 'np.sinh', (['r'], {}), '(r)\n', (13587, 13590), True, 'import numpy as np\n'), ((20120, 20129), 'numpy.abs', 'np.abs', (['r'], {}), '(r)\n', (20126, 20129), True, 'import numpy as np\n'), ((2734, 2743), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (2740, 2743), True, 'import numpy as np\n'), ((5197, 5207), 'numpy.sinh', 'np.sinh', (['r'], {}), '(r)\n', (5204, 5207), True, 'import numpy as np\n'), ((5295, 5305), 'numpy.sinh', 'np.sinh', (['r'], {}), '(r)\n', (5302, 5305), True, 'import numpy as np\n'), ((6925, 6934), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (6931, 6934), True, 'import numpy as np\n'), ((7750, 7759), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (7756, 7759), True, 'import numpy as np\n'), ((5122, 5132), 'numpy.sinh', 'np.sinh', (['r'], {}), '(r)\n', (5129, 5132), True, 'import numpy as np\n'), ((5184, 5194), 'numpy.cosh', 'np.cosh', (['r'], {}), '(r)\n', (5191, 5194), True, 'import numpy as np\n'), ((5281, 5291), 'numpy.sinh', 'np.sinh', (['r'], {}), '(r)\n', (5288, 5291), True, 'import numpy as np\n'), ((13418, 13432), 'numpy.cosh', 'np.cosh', (['(2 * r)'], {}), '(2 * r)\n', (13425, 13432), True, 'import numpy as np\n'), ((5053, 5067), 'numpy.cosh', 'np.cosh', (['(2 * r)'], {}), '(2 * r)\n', (5060, 5067), True, 'import numpy as np\n'), ((5109, 5119), 'numpy.cosh', 'np.cosh', (['r'], {}), '(r)\n', (5116, 5119), True, 'import numpy as np\n'), ((5147, 5165), 'numpy.exp', 'np.exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (5153, 5165), True, 'import numpy as np\n'), ((13503, 13513), 'numpy.sinh', 'np.sinh', (['r'], {}), '(r)\n', (13510, 13513), True, 'import numpy as np\n'), ((5082, 5101), 'numpy.exp', 'np.exp', (['(-1.0j * phi)'], {}), '(-1.0j * phi)\n', (5088, 5101), True, 'import numpy as np\n'), ((5166, 5176), 'numpy.conj', 'np.conj', (['a'], {}), '(a)\n', (5173, 5176), True, 'import numpy as np\n'), ((13490, 13500), 'numpy.cosh', 'np.cosh', (['r'], {}), '(r)\n', (13497, 13500), True, 'import numpy as np\n'), ((13463, 13482), 'numpy.exp', 'np.exp', (['(-1.0j * phi)'], {}), '(-1.0j * phi)\n', (13469, 13482), True, 'import numpy as np\n')] |
import numpy as np
import requests
from io import BytesIO
from pathlib import Path
from PIL import Image
from urllib.parse import urlparse
# Use a Chrome-based user agent to avoid getting needlessly blocked.
USER_AGENT = (
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/51.0.2704.103 Safari/537.36'
)
def open_image(uri):
"""Opens and returns image at `uri`.
Always returns a HxWxC `np.array` containing the image, ready to be
consumed by any Terran algorithm. If the image is grayscale or has an alpha
channel, it'll get converted into `RGB`, so the number of channels will
always be 3.
Parameters
----------
uri : str or pathlib.Path
URI pointing to an image, which may be a filesystem location or a URL.
Returns
-------
numpy.ndarray
Array of size HxWxC containing the pixel values for the image, as
numpy.uint8.
"""
# Check if `uri` is a URL or a filesystem location.
if isinstance(uri, Path):
image = Image.open(uri)
elif urlparse(uri).scheme:
response = requests.get(uri, headers={'User-Agent': USER_AGENT})
image = Image.open(BytesIO(response.content))
else:
image = Image.open(Path(uri).expanduser())
image = np.asarray(image.convert('RGB'))
if len(image.shape) == 2:
# Grayscale image, turn it to a rank-3 tensor anyways.
image = np.stack([image] * 3, axis=-1)
return image
def resolve_images(path, batch_size=None):
"""Collects the paths of all images under `path`, yielding them in batches.
Ensures that the image is valid before returning it by attempting to open
it with PIL.
Parameters
----------
path : str or pathlib.Path
Path to recursively search for images in.
Yields
------
pathlib.Path or [pathlib.Path]
Path to every valid image found under `path`. If `batch_size` is
`None`, will return a single `pathlib.Path`. Otherwise, returns a list.
"""
if not isinstance(path, Path):
path = Path(path).expanduser()
batch = []
for f in path.glob('**/*'):
if not f.is_file():
continue
try:
Image.open(f).verify()
except OSError:
continue
# If no `batch_size` specified, just return the path.
if batch_size is None:
yield path.joinpath(f)
continue
batch.append(path.joinpath(f))
if len(batch) >= batch_size:
yield batch
batch = []
| [
"PIL.Image.open",
"urllib.parse.urlparse",
"pathlib.Path",
"io.BytesIO",
"requests.get",
"numpy.stack"
] | [((1046, 1061), 'PIL.Image.open', 'Image.open', (['uri'], {}), '(uri)\n', (1056, 1061), False, 'from PIL import Image\n'), ((1437, 1467), 'numpy.stack', 'np.stack', (['([image] * 3)'], {'axis': '(-1)'}), '([image] * 3, axis=-1)\n', (1445, 1467), True, 'import numpy as np\n'), ((1071, 1084), 'urllib.parse.urlparse', 'urlparse', (['uri'], {}), '(uri)\n', (1079, 1084), False, 'from urllib.parse import urlparse\n'), ((1112, 1165), 'requests.get', 'requests.get', (['uri'], {'headers': "{'User-Agent': USER_AGENT}"}), "(uri, headers={'User-Agent': USER_AGENT})\n", (1124, 1165), False, 'import requests\n'), ((1193, 1218), 'io.BytesIO', 'BytesIO', (['response.content'], {}), '(response.content)\n', (1200, 1218), False, 'from io import BytesIO\n'), ((2089, 2099), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (2093, 2099), False, 'from pathlib import Path\n'), ((2236, 2249), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (2246, 2249), False, 'from PIL import Image\n'), ((1257, 1266), 'pathlib.Path', 'Path', (['uri'], {}), '(uri)\n', (1261, 1266), False, 'from pathlib import Path\n')] |
import qctests.Argo_global_range_check
import util.testingProfile
import numpy
from util import obs_utils
##### Argo_global_range_check ---------------------------------------------------
def test_Argo_global_range_check_temperature():
'''
Make sure AGRC is flagging temperature excursions
'''
# should fail despite rounding
p = util.testingProfile.fakeProfile([-2.500000001], [100], latitude=0.0)
qc = qctests.Argo_global_range_check.test(p, None)
truth = numpy.zeros(1, dtype=bool)
truth[0] = True
assert numpy.array_equal(qc, truth), 'failed to flag temperature slightly colder than -2.5 C'
# -2.5 OK
p = util.testingProfile.fakeProfile([-2.5], [100], latitude=0.0)
qc = qctests.Argo_global_range_check.test(p, None)
truth = numpy.zeros(1, dtype=bool)
assert numpy.array_equal(qc, truth), 'incorrectly flagging -2.5 C'
# 40 OK
p = util.testingProfile.fakeProfile([40], [100], latitude=0.0)
qc = qctests.Argo_global_range_check.test(p, None)
truth = numpy.zeros(1, dtype=bool)
assert numpy.array_equal(qc, truth), 'incorrectly flagging 40 C'
# should fail despite rounding
p = util.testingProfile.fakeProfile([40.0000001], [100], latitude=0.0)
qc = qctests.Argo_global_range_check.test(p, None)
truth = numpy.zeros(1, dtype=bool)
truth[0] = True
assert numpy.array_equal(qc, truth), 'failed to flag temperature slightly warmer than 40 C'
def test_Argo_global_range_check_pressure():
'''
Make sure AGRC is flagging pressure excursions
'''
# should fail despite rounding
p = util.testingProfile.fakeProfile([5], obs_utils.pressure_to_depth([-5.00000001], 0.0), latitude=0.0)
qc = qctests.Argo_global_range_check.test(p, None)
truth = numpy.zeros(1, dtype=bool)
truth[0] = True
assert numpy.array_equal(qc, truth), 'failed to flag pressure slightly below -5 '
# -5 OK
p = util.testingProfile.fakeProfile([5], obs_utils.pressure_to_depth([-5], 0.0), latitude=0.0)
qc = qctests.Argo_global_range_check.test(p, None)
truth = numpy.zeros(1, dtype=bool)
assert numpy.array_equal(qc, truth), 'incorrectly flagging pressure of -5'
| [
"numpy.zeros",
"numpy.array_equal",
"util.obs_utils.pressure_to_depth"
] | [((489, 515), 'numpy.zeros', 'numpy.zeros', (['(1)'], {'dtype': 'bool'}), '(1, dtype=bool)\n', (500, 515), False, 'import numpy\n'), ((547, 575), 'numpy.array_equal', 'numpy.array_equal', (['qc', 'truth'], {}), '(qc, truth)\n', (564, 575), False, 'import numpy\n'), ((786, 812), 'numpy.zeros', 'numpy.zeros', (['(1)'], {'dtype': 'bool'}), '(1, dtype=bool)\n', (797, 812), False, 'import numpy\n'), ((824, 852), 'numpy.array_equal', 'numpy.array_equal', (['qc', 'truth'], {}), '(qc, truth)\n', (841, 852), False, 'import numpy\n'), ((1032, 1058), 'numpy.zeros', 'numpy.zeros', (['(1)'], {'dtype': 'bool'}), '(1, dtype=bool)\n', (1043, 1058), False, 'import numpy\n'), ((1070, 1098), 'numpy.array_equal', 'numpy.array_equal', (['qc', 'truth'], {}), '(qc, truth)\n', (1087, 1098), False, 'import numpy\n'), ((1307, 1333), 'numpy.zeros', 'numpy.zeros', (['(1)'], {'dtype': 'bool'}), '(1, dtype=bool)\n', (1318, 1333), False, 'import numpy\n'), ((1365, 1393), 'numpy.array_equal', 'numpy.array_equal', (['qc', 'truth'], {}), '(qc, truth)\n', (1382, 1393), False, 'import numpy\n'), ((1783, 1809), 'numpy.zeros', 'numpy.zeros', (['(1)'], {'dtype': 'bool'}), '(1, dtype=bool)\n', (1794, 1809), False, 'import numpy\n'), ((1841, 1869), 'numpy.array_equal', 'numpy.array_equal', (['qc', 'truth'], {}), '(qc, truth)\n', (1858, 1869), False, 'import numpy\n'), ((2096, 2122), 'numpy.zeros', 'numpy.zeros', (['(1)'], {'dtype': 'bool'}), '(1, dtype=bool)\n', (2107, 2122), False, 'import numpy\n'), ((2134, 2162), 'numpy.array_equal', 'numpy.array_equal', (['qc', 'truth'], {}), '(qc, truth)\n', (2151, 2162), False, 'import numpy\n'), ((1652, 1699), 'util.obs_utils.pressure_to_depth', 'obs_utils.pressure_to_depth', (['[-5.00000001]', '(0.0)'], {}), '([-5.00000001], 0.0)\n', (1679, 1699), False, 'from util import obs_utils\n'), ((1974, 2012), 'util.obs_utils.pressure_to_depth', 'obs_utils.pressure_to_depth', (['[-5]', '(0.0)'], {}), '([-5], 0.0)\n', (2001, 2012), False, 'from util import obs_utils\n')] |
#!/usr/bin/env python
# coding: utf-8
# # Generate website content
import pinklib.website as web
import os
from astropy.stats import median_absolute_deviation
import numpy as np
import importlib
import pandas as pd
# INPUT PARAMETERS
# Name of the binary file
cutouts_bin_name = 'cutouts_preprocessed'
# SOM parameters
layout = 'quadratic'
som_label = 'cyclic SOM'
# Outliers parameters
# Can be any number smaller than the total amount of cut-outs/sources in your binary file
number_of_outliers_to_show = 100
# Cutouts to website parameters
max_number_of_images_to_show = 10
# Paths and directories
SOM_directory = 'SOM_directory'
catalogue_path = os.path.join('SOM_directory', 'catalogue_LOTSS_DR1_final.pkl')
data_path = os.path.join('SOM_directory', 'LOTSS_DR1_final.bin')
trained_path = os.path.join('SOM_directory',
'resultLOTSS_DR1_final_10x10x67_0.443146905982625_12.533141373155_ID14.bin')
mapping_path = os.path.join('SOM_directory',
'mapping_resultLOTSS_DR1_final_10x10x67_0.443146905982625_12.533141373155_ID14.bin')
website_path = 'website'
if not os.path.exists(website_path):
os.mkdir(website_path)
outliers_path = os.path.join(website_path, 'outliers')
if not os.path.exists(website_path):
os.makedirs(website_path)
# Create SOM object (a data structure that holds the properties of a SOM)
data_som, number_of_channels, som_width, som_height, som_depth, neuron_width, neuron_height = web.unpack_trained_som(trained_path, layout)
my_som = web.SOM(data_som, number_of_channels, som_width, som_height, som_depth, layout,
SOM_directory, '', som_label, neuron_width,99)
# Plot SOM
web.plot_som(my_som,gap=2, save=True, save_dir=website_path, normalize=True, cmap='viridis')
# Spread of the sources over the SOM
# Create list of indexes to retrieve coordinates of the cut-outs
data_map, _, _, _, _ = web.load_som_mapping(mapping_path, my_som, verbose=True)
distance_to_bmu = np.min(data_map, axis=1)
distance_to_bmu_sorted_down_id = np.argsort(distance_to_bmu)[::-1]
closest_prototype_id = np.argmin(data_map, axis=1)
prototype_x = np.array([int(i%my_som.som_width) for i in closest_prototype_id])
prototype_y = np.array([int(i/my_som.som_width) for i in closest_prototype_id])
closest_to_prototype_count = np.bincount(closest_prototype_id)
print('Mean bincount:', np.mean(closest_to_prototype_count))
print('Standard deviation bincount:', np.std(closest_to_prototype_count))
print('median bincount:', np.median(closest_to_prototype_count))
print('Standard deviation bincount:', median_absolute_deviation(closest_to_prototype_count))
# Plot best matching unit heatmap
web.plot_som_bmu_heatmap(my_som, closest_prototype_id, cbar=False, save=True, save_dir=website_path)
# Generate website content
catalogue = pd.read_pickle(catalogue_path)
outliers = web.plot_and_save_outliers(my_som, outliers_path, data_path,
number_of_outliers_to_show, catalogue, closest_prototype_id,
distance_to_bmu_sorted_down_id, clip_threshold=False, plot_border=False,
debug=False, apply_clipping=False, overwrite=True, save=True)
# Plot outliers histogram
web.plot_distance_to_bmu_histogram(distance_to_bmu, number_of_outliers_to_show,
outliers_path, xmax=150, save=True)
# Save all max_number_of_images_to_show
web.save_all_prototypes_and_cutouts(my_som, data_path, website_path, max_number_of_images_to_show,
catalogue, catalogue, data_map,
figsize=5, save=True, plot_border=False, plot_cutout_index=False, apply_clipping=False,
clip_threshold=3, overwrite=True)
| [
"pinklib.website.unpack_trained_som",
"numpy.argsort",
"pinklib.website.SOM",
"pinklib.website.plot_som",
"pandas.read_pickle",
"os.path.exists",
"pinklib.website.plot_distance_to_bmu_histogram",
"numpy.mean",
"pinklib.website.plot_som_bmu_heatmap",
"os.mkdir",
"numpy.min",
"numpy.argmin",
"... | [((660, 722), 'os.path.join', 'os.path.join', (['"""SOM_directory"""', '"""catalogue_LOTSS_DR1_final.pkl"""'], {}), "('SOM_directory', 'catalogue_LOTSS_DR1_final.pkl')\n", (672, 722), False, 'import os\n'), ((735, 787), 'os.path.join', 'os.path.join', (['"""SOM_directory"""', '"""LOTSS_DR1_final.bin"""'], {}), "('SOM_directory', 'LOTSS_DR1_final.bin')\n", (747, 787), False, 'import os\n'), ((803, 918), 'os.path.join', 'os.path.join', (['"""SOM_directory"""', '"""resultLOTSS_DR1_final_10x10x67_0.443146905982625_12.533141373155_ID14.bin"""'], {}), "('SOM_directory',\n 'resultLOTSS_DR1_final_10x10x67_0.443146905982625_12.533141373155_ID14.bin'\n )\n", (815, 918), False, 'import os\n'), ((953, 1076), 'os.path.join', 'os.path.join', (['"""SOM_directory"""', '"""mapping_resultLOTSS_DR1_final_10x10x67_0.443146905982625_12.533141373155_ID14.bin"""'], {}), "('SOM_directory',\n 'mapping_resultLOTSS_DR1_final_10x10x67_0.443146905982625_12.533141373155_ID14.bin'\n )\n", (965, 1076), False, 'import os\n'), ((1194, 1232), 'os.path.join', 'os.path.join', (['website_path', '"""outliers"""'], {}), "(website_path, 'outliers')\n", (1206, 1232), False, 'import os\n'), ((1477, 1521), 'pinklib.website.unpack_trained_som', 'web.unpack_trained_som', (['trained_path', 'layout'], {}), '(trained_path, layout)\n', (1499, 1521), True, 'import pinklib.website as web\n'), ((1531, 1662), 'pinklib.website.SOM', 'web.SOM', (['data_som', 'number_of_channels', 'som_width', 'som_height', 'som_depth', 'layout', 'SOM_directory', '""""""', 'som_label', 'neuron_width', '(99)'], {}), "(data_som, number_of_channels, som_width, som_height, som_depth,\n layout, SOM_directory, '', som_label, neuron_width, 99)\n", (1538, 1662), True, 'import pinklib.website as web\n'), ((1695, 1793), 'pinklib.website.plot_som', 'web.plot_som', (['my_som'], {'gap': '(2)', 'save': '(True)', 'save_dir': 'website_path', 'normalize': '(True)', 'cmap': '"""viridis"""'}), "(my_som, gap=2, save=True, save_dir=website_path, normalize=\n True, cmap='viridis')\n", (1707, 1793), True, 'import pinklib.website as web\n'), ((1915, 1971), 'pinklib.website.load_som_mapping', 'web.load_som_mapping', (['mapping_path', 'my_som'], {'verbose': '(True)'}), '(mapping_path, my_som, verbose=True)\n', (1935, 1971), True, 'import pinklib.website as web\n'), ((1990, 2014), 'numpy.min', 'np.min', (['data_map'], {'axis': '(1)'}), '(data_map, axis=1)\n', (1996, 2014), True, 'import numpy as np\n'), ((2105, 2132), 'numpy.argmin', 'np.argmin', (['data_map'], {'axis': '(1)'}), '(data_map, axis=1)\n', (2114, 2132), True, 'import numpy as np\n'), ((2322, 2355), 'numpy.bincount', 'np.bincount', (['closest_prototype_id'], {}), '(closest_prototype_id)\n', (2333, 2355), True, 'import numpy as np\n'), ((2685, 2790), 'pinklib.website.plot_som_bmu_heatmap', 'web.plot_som_bmu_heatmap', (['my_som', 'closest_prototype_id'], {'cbar': '(False)', 'save': '(True)', 'save_dir': 'website_path'}), '(my_som, closest_prototype_id, cbar=False, save=\n True, save_dir=website_path)\n', (2709, 2790), True, 'import pinklib.website as web\n'), ((2827, 2857), 'pandas.read_pickle', 'pd.read_pickle', (['catalogue_path'], {}), '(catalogue_path)\n', (2841, 2857), True, 'import pandas as pd\n'), ((2869, 3137), 'pinklib.website.plot_and_save_outliers', 'web.plot_and_save_outliers', (['my_som', 'outliers_path', 'data_path', 'number_of_outliers_to_show', 'catalogue', 'closest_prototype_id', 'distance_to_bmu_sorted_down_id'], {'clip_threshold': '(False)', 'plot_border': '(False)', 'debug': '(False)', 'apply_clipping': '(False)', 'overwrite': '(True)', 'save': '(True)'}), '(my_som, outliers_path, data_path,\n number_of_outliers_to_show, catalogue, closest_prototype_id,\n distance_to_bmu_sorted_down_id, clip_threshold=False, plot_border=False,\n debug=False, apply_clipping=False, overwrite=True, save=True)\n', (2895, 3137), True, 'import pinklib.website as web\n'), ((3178, 3297), 'pinklib.website.plot_distance_to_bmu_histogram', 'web.plot_distance_to_bmu_histogram', (['distance_to_bmu', 'number_of_outliers_to_show', 'outliers_path'], {'xmax': '(150)', 'save': '(True)'}), '(distance_to_bmu,\n number_of_outliers_to_show, outliers_path, xmax=150, save=True)\n', (3212, 3297), True, 'import pinklib.website as web\n'), ((3373, 3638), 'pinklib.website.save_all_prototypes_and_cutouts', 'web.save_all_prototypes_and_cutouts', (['my_som', 'data_path', 'website_path', 'max_number_of_images_to_show', 'catalogue', 'catalogue', 'data_map'], {'figsize': '(5)', 'save': '(True)', 'plot_border': '(False)', 'plot_cutout_index': '(False)', 'apply_clipping': '(False)', 'clip_threshold': '(3)', 'overwrite': '(True)'}), '(my_som, data_path, website_path,\n max_number_of_images_to_show, catalogue, catalogue, data_map, figsize=5,\n save=True, plot_border=False, plot_cutout_index=False, apply_clipping=\n False, clip_threshold=3, overwrite=True)\n', (3408, 3638), True, 'import pinklib.website as web\n'), ((1121, 1149), 'os.path.exists', 'os.path.exists', (['website_path'], {}), '(website_path)\n', (1135, 1149), False, 'import os\n'), ((1155, 1177), 'os.mkdir', 'os.mkdir', (['website_path'], {}), '(website_path)\n', (1163, 1177), False, 'import os\n'), ((1240, 1268), 'os.path.exists', 'os.path.exists', (['website_path'], {}), '(website_path)\n', (1254, 1268), False, 'import os\n'), ((1274, 1299), 'os.makedirs', 'os.makedirs', (['website_path'], {}), '(website_path)\n', (1285, 1299), False, 'import os\n'), ((2048, 2075), 'numpy.argsort', 'np.argsort', (['distance_to_bmu'], {}), '(distance_to_bmu)\n', (2058, 2075), True, 'import numpy as np\n'), ((2380, 2415), 'numpy.mean', 'np.mean', (['closest_to_prototype_count'], {}), '(closest_to_prototype_count)\n', (2387, 2415), True, 'import numpy as np\n'), ((2455, 2489), 'numpy.std', 'np.std', (['closest_to_prototype_count'], {}), '(closest_to_prototype_count)\n', (2461, 2489), True, 'import numpy as np\n'), ((2517, 2554), 'numpy.median', 'np.median', (['closest_to_prototype_count'], {}), '(closest_to_prototype_count)\n', (2526, 2554), True, 'import numpy as np\n'), ((2594, 2647), 'astropy.stats.median_absolute_deviation', 'median_absolute_deviation', (['closest_to_prototype_count'], {}), '(closest_to_prototype_count)\n', (2619, 2647), False, 'from astropy.stats import median_absolute_deviation\n')] |
from matplotlib import pyplot
from math import cos, sin, atan
import numpy as np
import time
import seaborn
from matplotlib.animation import FuncAnimation
import sys
import json
class Neuron():
def __init__(self, x, y):
self.x = x
self.y = y
def draw(self, neuron_radius):
circle = pyplot.Circle((self.x, self.y), radius=neuron_radius, fill=False)
pyplot.gca().add_patch(circle)
return circle
class Layer():
def __init__(self, network, number_of_neurons, number_of_neurons_in_widest_layer, weights):
self.weights = weights
self.vertical_distance_between_layers = 6
self.horizontal_distance_between_neurons = 2
self.neuron_radius = 0.5
self.number_of_neurons_in_widest_layer = number_of_neurons_in_widest_layer + 1
self.previous_layer = self.__get_previous_layer(network)
self.network = network
self.y = self.__calculate_layer_y_position()
self.neurons = self.__intialise_neurons(number_of_neurons + 1)
def __intialise_neurons(self, number_of_neurons):
neurons = []
x = self.__calculate_left_margin_so_layer_is_centered(number_of_neurons)
for iteration in range(number_of_neurons):
neuron = Neuron(x, self.y)
neurons.append(neuron)
x += self.horizontal_distance_between_neurons
return neurons
def __calculate_left_margin_so_layer_is_centered(self, number_of_neurons):
return self.horizontal_distance_between_neurons * (
self.number_of_neurons_in_widest_layer - number_of_neurons) / 2
def __calculate_layer_y_position(self):
if self.previous_layer:
return self.previous_layer.y + self.vertical_distance_between_layers
else:
return 0
def __get_previous_layer(self, network):
if len(network.layers) > 0:
return network.layers[-1]
else:
return None
def __line_between_two_neurons(self, neuron1, neuron2, thickness):
angle = atan((neuron2.x - neuron1.x) / float(neuron2.y - neuron1.y))
x_adjustment = self.neuron_radius * sin(angle)
y_adjustment = self.neuron_radius * cos(angle)
line = pyplot.Line2D((neuron1.x - x_adjustment, neuron2.x + x_adjustment),
(neuron1.y - y_adjustment, neuron2.y + y_adjustment),
linewidth=thickness * 2,
color=(0.5 - min(0, thickness) / 2, 0.5 + max(0, thickness) / 2, 0))
pyplot.gca().add_line(line)
return line
def draw(self, layerType=0):
artists = []
i = 0
j = 0
if layerType != -1:
artists.append(self.neurons[0].draw(self.neuron_radius))
for neuron in self.neurons[1:]:
#print(len(self.neurons[1:]))
j = 0
artists.append(neuron.draw(self.neuron_radius))
if self.previous_layer:
for previous_layer_neuron in self.previous_layer.neurons:
#print(len(self.previous_layer.neurons))
artists.append(self.__line_between_two_neurons(neuron, previous_layer_neuron,
self.weights[i + j * len(self.neurons[1:])]))
j += 1
i += 1
# write Text
x_text = self.number_of_neurons_in_widest_layer * self.horizontal_distance_between_neurons
if layerType == 0:
pyplot.text(x_text, self.y, 'Input Layer', fontsize=12)
elif layerType == -1:
pyplot.text(x_text, self.y, 'Output Layer', fontsize=12)
else:
pyplot.text(x_text, self.y, 'Hidden Layer ' + str(layerType), fontsize=12)
class NeuralNetwork():
def __init__(self, number_of_neurons_in_widest_layer, biggest_weight):
self.number_of_neurons_in_widest_layer = number_of_neurons_in_widest_layer
self.biggest_weight = biggest_weight
self.layers = []
self.layertype = 0
def add_layer(self, number_of_neurons, weights):
norm_weigths = [w / self.biggest_weight for w in weights]
layer = Layer(self, number_of_neurons, self.number_of_neurons_in_widest_layer, norm_weigths)
self.layers.append(layer)
def draw(self):
artists = []
for i in range(len(self.layers)):
layer = self.layers[i]
if i == len(self.layers) - 1:
i = -1
artists.append(layer.draw(i))
pyplot.axis('scaled')
pyplot.axis('off')
pyplot.title('Neural Network architecture', fontsize=15)
return artists
class DrawNN():
def __init__(self, input_size, weights):
self.neural_network = [input_size]
for weight in weights:
self.neural_network.append(int(len(weight) / (self.neural_network[-1] + 1)))
self.weights = weights
self.weights.insert(0, [])
def draw(self):
widest_layer = max(self.neural_network)
biggest_weight = max([max([abs(x) for x in layer]) for layer in self.weights if layer != []])
network = NeuralNetwork(widest_layer, biggest_weight)
for l, w in zip(self.neural_network, self.weights):
network.add_layer(l, w)
return network.draw()
class NNVisualisation():
def __init__(self, input_size, weights):
self.input_size = input_size
self.old_weights = weights
self.new_weights = weights
self.list_weights = None
self.fig = pyplot.figure()
def __init__(self, input_size, weights, biases):
self.input_size = input_size
self.old_weights = None
self.new_weights = None
self.new_biases = None
self.list_weights = weights
self.list_biases = biases
self.fig = pyplot.figure()
def update(self, i):
pyplot.gca().clear()
old_weight_mod = 1.0 - i * 0.1
new_weight_mod = i * 0.1
modded_weights = []
for old_layer, new_layer in zip(self.old_weights, self.new_weights):
modded_layer = []
for old_weight, new_weight in zip(old_layer, new_layer):
modded_layer.append(old_weight * old_weight_mod + new_weight * new_weight_mod)
modded_weights.append(modded_layer)
network = DrawNN(self.input_size, modded_weights)
return network.draw()
def update_lists(self, i):
pyplot.gca().clear()
self.old_weights = self.list_weights[i]
self.new_weights = self.list_weights[i]
self.new_biases = self.list_biases[i]
modded_weights = []
for bias_layer, new_layer in zip(self.new_biases, self.new_weights):
modded_layer = []
for bias in bias_layer:
modded_layer.append(bias)
for new_weight in new_layer:
modded_layer.append(new_weight)
modded_weights.append(modded_layer)
network = DrawNN(self.input_size, modded_weights)
return network.draw()
def draw(self, new_weights):
self.new_weights = new_weights
anim = FuncAnimation(self.fig, self.update, frames=np.arange(0, 10), interval=200)
pyplot.show()
self.old_weights = new_weights
def draw(self):
anim = FuncAnimation(self.fig, self.update_lists, frames=np.arange(0, len(self.list_weights)), interval=500)
pyplot.show()
class NNVisualisationAdaptedFactory:
def createVisualisator(self, input_size, L, parameters):
return NNVisualisationAdapted(input_size, L, parameters)
def createVisualisatorList(self, input_size, L, parameters_list):
return NNVisualisationAdapted(input_size, L, parameters_list, None)
class NNVisualisationAdapted(NNVisualisation):
def __init__(self, input_size, L, parameters):
weights = self.extract_weights_to_array_form(parameters, L)
NNVisualisation.__init__(self, input_size, weights)
def __init__(self, input_size, L, parameters, pass_var):
old_list_weights = []
list_weights = []
old_list_biases = []
list_biases = []
for param in parameters:
old_list_weights.append(self.extract_weights_to_array_form_from_list(param, L))
old_list_biases.append(self.extract_biases_to_array_form_from_list(param, L))
for i in range(0, len(old_list_weights)):
list_weights.append([])
list_biases.append([])
for j in range(0, len(old_list_weights[i])):
list_weights[i].append([item for sublist in old_list_weights[i][j] for item in sublist])
for j in range(0, len(old_list_biases[i])):
list_biases[i].append([item for sublist in old_list_biases[i][j] for item in sublist])
NNVisualisation.__init__(self, input_size, list_weights, list_biases)
def draw(self, parameters, L):
new_weights = self.extract_weights_to_array_form(parameters, L)
NNVisualisation.draw(self, new_weights)
def draw(self):
NNVisualisation.draw(self)
def extract_weights_to_array_form(self, parameters, L):
weights = []
for i in range(1, L):
weights.append(parameters["W" + str(i)].reshape(-1).tolist())
return weights
def extract_weights_to_array_form_from_list(self, parameters, L):
weights = []
for i in range(1, L):
weights.append(parameters["W" + str(i)])
return weights
def extract_biases_to_array_form_from_list(self, parameters, L):
weights = []
for i in range(1, L):
weights.append(parameters["b" + str(i)])
return weights
if __name__ == "__main__":
json_filename = sys.argv[1]
with open(json_filename) as f: json_parameters = json.load(f)
# vis = NNVisualisation(2, [[1, 2, 3, 3, 2, 1], [3, 2, 1, 1, 2, 3, 3, 2, 1], [1, 2, 3]])
vis = NNVisualisationAdaptedFactory().createVisualisatorList(json_parameters[0], json_parameters[1],
json_parameters[2:])
weights = json_filename
# vis = NNVisualisation(2,json_parameters[2:],None)
vis.draw()
# vis.draw([[10, 2, 3, 3, 2, 10], [3, 2, 10, -10, 2, 3, 3, 2, -10], [-10, 2, 3]])
| [
"matplotlib.pyplot.text",
"matplotlib.pyplot.Circle",
"matplotlib.pyplot.title",
"matplotlib.pyplot.gca",
"math.cos",
"matplotlib.pyplot.figure",
"json.load",
"matplotlib.pyplot.axis",
"math.sin",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((317, 382), 'matplotlib.pyplot.Circle', 'pyplot.Circle', (['(self.x, self.y)'], {'radius': 'neuron_radius', 'fill': '(False)'}), '((self.x, self.y), radius=neuron_radius, fill=False)\n', (330, 382), False, 'from matplotlib import pyplot\n'), ((4552, 4573), 'matplotlib.pyplot.axis', 'pyplot.axis', (['"""scaled"""'], {}), "('scaled')\n", (4563, 4573), False, 'from matplotlib import pyplot\n'), ((4582, 4600), 'matplotlib.pyplot.axis', 'pyplot.axis', (['"""off"""'], {}), "('off')\n", (4593, 4600), False, 'from matplotlib import pyplot\n'), ((4609, 4665), 'matplotlib.pyplot.title', 'pyplot.title', (['"""Neural Network architecture"""'], {'fontsize': '(15)'}), "('Neural Network architecture', fontsize=15)\n", (4621, 4665), False, 'from matplotlib import pyplot\n'), ((5571, 5586), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (5584, 5586), False, 'from matplotlib import pyplot\n'), ((5862, 5877), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (5875, 5877), False, 'from matplotlib import pyplot\n'), ((7256, 7269), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (7267, 7269), False, 'from matplotlib import pyplot\n'), ((7455, 7468), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (7466, 7468), False, 'from matplotlib import pyplot\n'), ((9848, 9860), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9857, 9860), False, 'import json\n'), ((2158, 2168), 'math.sin', 'sin', (['angle'], {}), '(angle)\n', (2161, 2168), False, 'from math import cos, sin, atan\n'), ((2213, 2223), 'math.cos', 'cos', (['angle'], {}), '(angle)\n', (2216, 2223), False, 'from math import cos, sin, atan\n'), ((3527, 3582), 'matplotlib.pyplot.text', 'pyplot.text', (['x_text', 'self.y', '"""Input Layer"""'], {'fontsize': '(12)'}), "(x_text, self.y, 'Input Layer', fontsize=12)\n", (3538, 3582), False, 'from matplotlib import pyplot\n'), ((391, 403), 'matplotlib.pyplot.gca', 'pyplot.gca', ([], {}), '()\n', (401, 403), False, 'from matplotlib import pyplot\n'), ((2550, 2562), 'matplotlib.pyplot.gca', 'pyplot.gca', ([], {}), '()\n', (2560, 2562), False, 'from matplotlib import pyplot\n'), ((3625, 3681), 'matplotlib.pyplot.text', 'pyplot.text', (['x_text', 'self.y', '"""Output Layer"""'], {'fontsize': '(12)'}), "(x_text, self.y, 'Output Layer', fontsize=12)\n", (3636, 3681), False, 'from matplotlib import pyplot\n'), ((5912, 5924), 'matplotlib.pyplot.gca', 'pyplot.gca', ([], {}), '()\n', (5922, 5924), False, 'from matplotlib import pyplot\n'), ((6482, 6494), 'matplotlib.pyplot.gca', 'pyplot.gca', ([], {}), '()\n', (6492, 6494), False, 'from matplotlib import pyplot\n'), ((7216, 7232), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (7225, 7232), True, 'import numpy as np\n')] |
# -*- coding: ascii -*-
# $Id: CNCCanvas.py,v 1.7 2014/10/15 15:04:06 bnv Exp $
#
# Author: <EMAIL>
# Date: 24-Aug-2014
import math
import time
import bmath
try:
from Tkinter import *
import Tkinter
except ImportError:
from tkinter import *
import tkinter as Tkinter
from CNC import Tab, CNC
import Utils
import Camera
import tkExtra
# Probe mapping we need PIL and numpy
try:
from PIL import Image, ImageTk
import numpy
# Resampling image based on PIL library and converting to RGB.
# options possible: NEAREST, BILINEAR, BICUBIC, ANTIALIAS
RESAMPLE = Image.NEAREST # resize type
#RESAMPLE = Image.BILINEAR # resize type
except:
numpy = None
RESAMPLE = None
VIEW_XY = 0
VIEW_XZ = 1
VIEW_YZ = 2
VIEW_ISO1 = 3
VIEW_ISO2 = 4
VIEW_ISO3 = 5
VIEWS = ["X-Y", "X-Z", "Y-Z", "ISO1", "ISO2", "ISO3"]
INSERT_WIDTH2 = 3
GANTRY_R = 4
GANTRY_X = GANTRY_R*2 # 10
GANTRY_Y = GANTRY_R # 5
GANTRY_H = GANTRY_R*5 # 20
DRAW_TIME = 5 # Maximum draw time permitted
INSERT_COLOR = "Blue"
GANTRY_COLOR = "Red"
MARGIN_COLOR = "Magenta"
GRID_COLOR = "Gray"
BOX_SELECT = "Cyan"
TAB_COLOR = "DarkOrange"
TABS_COLOR = "Orange"
WORK_COLOR = "Orange"
CAMERA_COLOR = "Cyan"
CANVAS_COLOR = "White"
ENABLE_COLOR = "Black"
DISABLE_COLOR = "LightGray"
SELECT_COLOR = "Blue"
SELECT2_COLOR = "DarkCyan"
PROCESS_COLOR = "Green"
MOVE_COLOR = "DarkCyan"
RULER_COLOR = "Green"
PROBE_TEXT_COLOR = "Green"
INFO_COLOR = "Gold"
SELECTION_TAGS = ("sel", "sel2", "sel3", "sel4")
ACTION_SELECT = 0
ACTION_SELECT_SINGLE = 1
ACTION_SELECT_AREA = 2
ACTION_SELECT_DOUBLE = 3
ACTION_PAN = 10
ACTION_ORIGIN = 11
ACTION_MOVE = 20
ACTION_ROTATE = 21
ACTION_GANTRY = 22
ACTION_WPOS = 23
ACTION_RULER = 30
ACTION_ADDORIENT = 31
#ACTION_ADDTAB = 40
SHIFT_MASK = 1
CONTROL_MASK = 4
ALT_MASK = 8
CONTROLSHIFT_MASK = SHIFT_MASK | CONTROL_MASK
CLOSE_DISTANCE = 5
MAXDIST = 10000
ZOOM = 1.25
S60 = math.sin(math.radians(60))
C60 = math.cos(math.radians(60))
DEF_CURSOR = ""
MOUSE_CURSOR = {
ACTION_SELECT : DEF_CURSOR,
ACTION_SELECT_AREA : "right_ptr",
ACTION_PAN : "fleur",
ACTION_ORIGIN : "cross",
# ACTION_ORBIT : "exchange",
# ACTION_ZOOM_IN : "sizing",
# ACTION_ZOOM_OUT : "sizing",
# ACTION_ZOOM_ON : "sizing",
# ACTION_VIEW_CENTER : "cross",
# ACTION_VIEW_MOVE : "fleur",
# ACTION_VIEW_ROTATE : "exchange",
# ACTION_ADDTAB : "tcross",
ACTION_MOVE : "hand1",
ACTION_ROTATE : "exchange",
ACTION_GANTRY : "target",
ACTION_WPOS : "diamond_cross",
ACTION_RULER : "tcross",
ACTION_ADDORIENT : "tcross",
# ACTION_EDIT : "pencil",
}
# ------------------------------------------------------------------------------
def mouseCursor(action):
return MOUSE_CURSOR.get(action, DEF_CURSOR)
#==============================================================================
# Raise an alarm exception
#==============================================================================
class AlarmException(Exception):
pass
#==============================================================================
# Drawing canvas
#==============================================================================
class CNCCanvas(Canvas):
def __init__(self, master, app, *kw, **kwargs):
Canvas.__init__(self, master, *kw, **kwargs)
# Global variables
self.view = 0
self.app = app
self.cnc = app.cnc
self.gcode = app.gcode
self.actionVar = IntVar()
# Canvas binding
self.bind('<Configure>', self.configureEvent)
self.bind('<Motion>', self.motion)
self.bind('<Button-1>', self.click)
self.bind('<B1-Motion>', self.buttonMotion)
self.bind('<ButtonRelease-1>', self.release)
self.bind('<Double-1>', self.double)
self.bind('<B2-Motion>', self.pan)
self.bind('<ButtonRelease-2>', self.panRelease)
self.bind("<Button-4>", self.mouseZoomIn)
self.bind("<Button-5>", self.mouseZoomOut)
self.bind("<MouseWheel>", self.wheel)
self.bind('<Shift-Button-4>', self.panLeft)
self.bind('<Shift-Button-5>', self.panRight)
self.bind('<Control-Button-4>', self.panUp)
self.bind('<Control-Button-5>', self.panDown)
self.bind('<Control-Key-Left>', self.panLeft)
self.bind('<Control-Key-Right>',self.panRight)
self.bind('<Control-Key-Up>', self.panUp)
self.bind('<Control-Key-Down>', self.panDown)
self.bind('<Escape>', self.actionCancel)
# self.bind('<Key-a>', lambda e,s=self : s.event_generate("<<SelectAll>>"))
# self.bind('<Key-A>', lambda e,s=self : s.event_generate("<<SelectNone>>"))
# self.bind('<Key-e>', lambda e,s=self : s.event_generate("<<Expand>>"))
# self.bind('<Key-f>', self.fit2Screen)
# self.bind('<Key-g>', self.setActionGantry)
# self.bind('<Key-l>', lambda e,s=self : s.event_generate("<<EnableToggle>>"))
# self.bind('<Key-m>', self.setActionMove)
# self.bind('<Key-n>', lambda e,s=self : s.event_generate("<<ShowInfo>>"))
# self.bind('<Key-o>', self.setActionOrigin)
# self.bind('<Key-r>', self.setActionRuler)
# self.bind('<Key-s>', self.setActionSelect)
## self.bind('<Key-t>', self.setActionAddTab)
# self.bind('<Key-x>', self.setActionPan)
self.bind('<Key>', self.handleKey)
self.bind('<Control-Key-S>', self.cameraSave)
self.bind('<Control-Key-t>', self.__test)
self.bind('<Control-Key-equal>',self.menuZoomIn)
self.bind('<Control-Key-minus>',self.menuZoomOut)
# self.bind('<Control-Key-x>', self.cut)
# self.bind('<Control-Key-c>', self.copy)
# self.bind('<Control-Key-v>', self.paste)
# self.bind('<Key-space>', self.commandFocus)
# self.bind('<Control-Key-space>',self.commandFocus)
# self.bind('<Control-Key-a>', self.selectAll)
self.x0 = 0.0
self.y0 = 0.0
self.zoom = 1.0
self.__tzoom = 1.0 # delayed zoom (temporary)
self._items = {}
self._x = self._y = 0
self._xp = self._yp = 0
self.action = ACTION_SELECT
self._mouseAction = None
self._inDraw = False # semaphore for parsing
self._gantry1 = None
self._gantry2 = None
self._select = None
self._margin = None
self._amargin = None
self._workarea = None
self._vector = None
self._lastActive = None
self._lastGantry = None
self._probeImage = None
self._probeTkImage= None
self._probe = None
self.camera = Camera.Camera("aligncam")
self.cameraAnchor = CENTER # Camera anchor location "" for gantry
self.cameraRotation = 0.0 # camera Z angle
self.cameraXCenter = 0.0 # camera X center offset
self.cameraYCenter = 0.0 # camera Y center offset
self.cameraScale = 10.0 # camera pixels/unit
self.cameraEdge = False # edge detection
self.cameraR = 1.5875 # circle radius in units (mm/inched)
self.cameraDx = 0 # camera shift vs gantry
self.cameraDy = 0
self.cameraZ = None # if None it will not make any Z movement for the camera
self.cameraSwitch = False # Look at spindle(False) or camera(True)
self._cameraAfter = None # Camera anchor location "" for gantry
self._cameraMaxWidth = 640 # on zoom over this size crop the image
self._cameraMaxHeight= 480
self._cameraImage = None
self._cameraHori = None # cross hair items
self._cameraVert = None
self._cameraCircle = None
self._cameraCircle2 = None
self._tab = None
self._tabRect = None
self.draw_axes = True # Drawing flags
self.draw_grid = True
self.draw_margin = True
self.draw_probe = True
self.draw_workarea= True
self.draw_paths = True
self.draw_rapid = True # draw rapid motions
self._wx = self._wy = self._wz = 0. # work position
self._dx = self._dy = self._dz = 0. # work-machine position
self._vx0 = self._vy0 = self._vz0 = 0 # vector move coordinates
self._vx1 = self._vy1 = self._vz1 = 0 # vector move coordinates
self._orientSelected = None
#self.config(xscrollincrement=1, yscrollincrement=1)
self.reset()
self.initPosition()
# ----------------------------------------------------------------------
def reset(self):
self.zoom = 1.0
# ----------------------------------------------------------------------
# Set status message
# ----------------------------------------------------------------------
def status(self, msg):
#self.event_generate("<<Status>>", data=msg.encode("utf8"))
self.event_generate("<<Status>>", data=msg)
# ----------------------------------------------------------------------
def setMouseStatus(self, event):
data="%.4f %.4f %.4f" % self.canvas2xyz(self.canvasx(event.x), self.canvasy(event.y))
self.event_generate("<<Coords>>", data=data)
# ----------------------------------------------------------------------
# Update scrollbars
# ----------------------------------------------------------------------
def _updateScrollBars(self):
"""Update scroll region for new size"""
bb = self.bbox('all')
if bb is None: return
x1,y1,x2,y2 = bb
dx = x2-x1
dy = y2-y1
# make it 3 times bigger in each dimension
# so when we zoom in/out we don't touch the borders
self.configure(scrollregion=(x1-dx,y1-dy,x2+dx,y2+dy))
# ----------------------------------------------------------------------
def handleKey(self, event):
ctrl = event.state & CONTROL_MASK
if event.char == "a":
self.event_generate("<<SelectAll>>")
elif event.char == "A":
self.event_generate("<<SelectNone>>")
elif event.char == "e":
self.event_generate("<<Expand>>")
elif event.char == "f":
self.fit2Screen()
elif event.char == "g":
self.setActionGantry()
elif event.char == "l":
self.event_generate("<<EnableToggle>>")
elif event.char == "m":
self.setActionMove()
elif event.char == "n":
self.event_generate("<<ShowInfo>>")
elif event.char == "o":
self.setActionOrigin()
elif event.char == "r":
self.setActionRuler()
elif event.char == "s":
self.setActionSelect()
# elif event.char == "t":
# self.setActionAddTab()
elif event.char == "x":
self.setActionPan()
elif event.char == "z":
self.menuZoomIn()
elif event.char == "Z":
self.menuZoomOut()
# ----------------------------------------------------------------------
def setAction(self, action):
self.action = action
self.actionVar.set(action)
self._mouseAction = None
self.config(cursor=mouseCursor(self.action), background="White")
# ----------------------------------------------------------------------
def actionCancel(self, event=None):
if self.action != ACTION_SELECT or \
(self._mouseAction != ACTION_SELECT and self._mouseAction is not None):
self.setAction(ACTION_SELECT)
return "break"
#self.draw()
# ----------------------------------------------------------------------
def setActionSelect(self, event=None):
self.setAction(ACTION_SELECT)
self.status(_("Select objects with mouse"))
# ----------------------------------------------------------------------
def setActionPan(self, event=None):
self.setAction(ACTION_PAN)
self.status(_("Pan viewport"))
# ----------------------------------------------------------------------
def setActionOrigin(self, event=None):
self.setAction(ACTION_ORIGIN)
self.status(_("Click to set the origin (zero)"))
# ----------------------------------------------------------------------
def setActionMove(self, event=None):
self.setAction(ACTION_MOVE)
self.status(_("Move graphically objects"))
# ----------------------------------------------------------------------
def setActionGantry(self, event=None):
self.setAction(ACTION_GANTRY)
self.config(background="seashell")
self.status(_("Move CNC gantry to mouse location"))
# ----------------------------------------------------------------------
def setActionWPOS(self, event=None):
self.setAction(ACTION_WPOS)
self.config(background="ivory")
self.status(_("Set mouse location as current machine position (X/Y only)"))
# ----------------------------------------------------------------------
def setActionRuler(self, event=None):
self.setAction(ACTION_RULER)
self.status(_("Drag a ruler to measure distances"))
# ----------------------------------------------------------------------
def setActionAddMarker(self, event=None):
self.setAction(ACTION_ADDORIENT)
self.status(_("Add an orientation marker"))
# # ----------------------------------------------------------------------
# def setActionAddTab(self, event=None):
# self.setAction(ACTION_ADDTAB)
# self.status(_("Draw a square tab"))
# ----------------------------------------------------------------------
# Convert canvas cx,cy coordinates to machine space
# ----------------------------------------------------------------------
def canvas2Machine(self, cx, cy):
u = cx / self.zoom
v = cy / self.zoom
if self.view == VIEW_XY:
return u, -v, None
elif self.view == VIEW_XZ:
return u, None, -v
elif self.view == VIEW_YZ:
return None, u, -v
elif self.view == VIEW_ISO1:
return 0.5*(u/S60+v/C60), 0.5*(u/S60-v/C60), None
elif self.view == VIEW_ISO2:
return 0.5*(u/S60-v/C60), -0.5*(u/S60+v/C60), None
elif self.view == VIEW_ISO3:
return -0.5*(u/S60+v/C60), -0.5*(u/S60-v/C60), None
# ----------------------------------------------------------------------
# Image (pixel) coordinates to machine
# ----------------------------------------------------------------------
def image2Machine(self, x, y):
return self.canvas2Machine(self.canvasx(x), self.canvasy(y))
# ----------------------------------------------------------------------
# Move gantry to mouse location
# ----------------------------------------------------------------------
def actionGantry(self, x, y):
u,v,w = self.image2Machine(x,y)
self.app.goto(u,v,w)
self.setAction(ACTION_SELECT)
# ----------------------------------------------------------------------
# Set the work coordinates to mouse location
# ----------------------------------------------------------------------
def actionWPOS(self, x, y):
u,v,w = self.image2Machine(x,y)
self.app.dro.wcsSet(u,v,w)
self.setAction(ACTION_SELECT)
# ----------------------------------------------------------------------
# Add an orientation marker at mouse location
# ----------------------------------------------------------------------
def actionAddOrient(self, x, y):
cx,cy = self.snapPoint(self.canvasx(x), self.canvasy(y))
u,v,w = self.canvas2Machine(cx,cy)
if u is None or v is None:
self.status(_("ERROR: Cannot set X-Y marker with the current view"))
return
self._orientSelected = len(self.gcode.orient)
self.gcode.orient.add(CNC.vars["wx"], CNC.vars["wy"], u, v)
self.event_generate("<<OrientSelect>>", data=self._orientSelected)
#self.drawOrient()
self.setAction(ACTION_SELECT)
# ----------------------------------------------------------------------
# Find item selected
# ----------------------------------------------------------------------
def click(self, event):
self.focus_set()
self._x = self._xp = event.x
self._y = self._yp = event.y
if event.state & CONTROLSHIFT_MASK == CONTROLSHIFT_MASK:
self.actionGantry(event.x, event.y)
return
elif self.action == ACTION_SELECT:
#if event.state & CONTROLSHIFT_MASK == CONTROLSHIFT_MASK:
#self._mouseAction = ACTION_SELECT
#else:
self._mouseAction = ACTION_SELECT_SINGLE
elif self.action in (ACTION_MOVE, ACTION_RULER):
i = self.canvasx(event.x)
j = self.canvasy(event.y)
if self.action == ACTION_RULER and self._vector is not None:
# Check if we hit the existing ruler
coords = self.coords(self._vector)
if abs(coords[0]-i)<=CLOSE_DISTANCE and abs(coords[1]-j<=CLOSE_DISTANCE):
# swap coordinates
coords[0],coords[2] = coords[2], coords[0]
coords[1],coords[3] = coords[3], coords[1]
self.coords(self._vector, *coords)
self._vx0, self._vy0, self._vz0 = self.canvas2xyz(coords[0], coords[1])
self._mouseAction = self.action
return
elif abs(coords[2]-i)<=CLOSE_DISTANCE and abs(coords[3]-j<=CLOSE_DISTANCE):
self._mouseAction = self.action
return
if self._vector: self.delete(self._vector)
if self.action == ACTION_MOVE:
# Check if we clicked on a selected item
try:
for item in self.find_overlapping(i-CLOSE_DISTANCE, j-CLOSE_DISTANCE,
i+CLOSE_DISTANCE, j+CLOSE_DISTANCE):
tags = self.gettags(item)
if "sel" in tags or "sel2" in tags or \
"sel3" in tags or "sel4" in tags:
break
else:
self._mouseAction = ACTION_SELECT_SINGLE
return
fill = MOVE_COLOR
arrow = LAST
except:
self._mouseAction = ACTION_SELECT_SINGLE
return
else:
fill = RULER_COLOR
arrow = BOTH
self._vector = self.create_line((i,j,i,j), fill=fill, arrow=arrow)
self._vx0, self._vy0, self._vz0 = self.canvas2xyz(i,j)
self._mouseAction = self.action
# Move gantry to position
elif self.action == ACTION_GANTRY:
self.actionGantry(event.x,event.y)
# Move gantry to position
elif self.action == ACTION_WPOS:
self.actionWPOS(event.x,event.y)
# Add orientation marker
elif self.action == ACTION_ADDORIENT:
self.actionAddOrient(event.x,event.y)
# Set coordinate origin
elif self.action == ACTION_ORIGIN:
i = self.canvasx(event.x)
j = self.canvasy(event.y)
x,y,z = self.canvas2xyz(i,j)
self.app.insertCommand(_("origin %g %g %g")%(x,y,z),True)
self.setActionSelect()
elif self.action == ACTION_PAN:
self.pan(event)
# # Add tab
# elif self.action == ACTION_ADDTAB:
# i = self.canvasx(event.x)
# j = self.canvasy(event.y)
# x,y,z = self.canvas2xyz(i,j)
# x = round(x,CNC.digits)
# y = round(y,CNC.digits)
# z = round(z,CNC.digits)
# # use the same z as the last tab added in gcode
# if self.gcode.tabs: z = self.gcode.tabs[-1].z
# self._tab = Tab(x,y,x,y,z)
# self._tabRect = self._drawRect(
# self._tab.x-self._tab.dx, self._tab.y-self._tab.dy,
# self._tab.x+self._tab.dx, self._tab.y+self._tab.dy,
# fill=TAB_COLOR)
# self._mouseAction = self.action
# ----------------------------------------------------------------------
# Canvas motion button 1
# ----------------------------------------------------------------------
def buttonMotion(self, event):
if self._mouseAction == ACTION_SELECT_AREA:
self.coords(self._select,
self.canvasx(self._x),
self.canvasy(self._y),
self.canvasx(event.x),
self.canvasy(event.y))
elif self._mouseAction in (ACTION_SELECT_SINGLE, ACTION_SELECT_DOUBLE):
if abs(event.x-self._x)>4 or abs(event.y-self._y)>4:
self._mouseAction = ACTION_SELECT_AREA
self._select = self.create_rectangle(
self.canvasx(self._x),
self.canvasy(self._y),
self.canvasx(event.x),
self.canvasy(event.y),
outline=BOX_SELECT)
elif self._mouseAction in (ACTION_MOVE, ACTION_RULER):
coords = self.coords(self._vector)
i = self.canvasx(event.x)
j = self.canvasy(event.y)
coords[-2] = i
coords[-1] = j
self.coords(self._vector, *coords)
if self._mouseAction == ACTION_MOVE:
self.move("sel", event.x-self._xp, event.y-self._yp)
self.move("sel2", event.x-self._xp, event.y-self._yp)
self.move("sel3", event.x-self._xp, event.y-self._yp)
self.move("sel4", event.x-self._xp, event.y-self._yp)
self._xp = event.x
self._yp = event.y
self._vx1, self._vy1, self._vz1 = self.canvas2xyz(i,j)
dx=self._vx1-self._vx0
dy=self._vy1-self._vy0
dz=self._vz1-self._vz0
self.status(_("dx=%g dy=%g dz=%g length=%g angle=%g")\
% (dx,dy,dz,math.sqrt(dx**2+dy**2+dz**2),
math.degrees(math.atan2(dy,dx))))
elif self._mouseAction == ACTION_PAN:
self.pan(event)
# Resize tab
# elif self._mouseAction == ACTION_ADDTAB:
# i = self.canvasx(event.x)
# j = self.canvasy(event.y)
# x,y,z = self.canvas2xyz(i,j)
# x = round(x,CNC.digits)
# y = round(y,CNC.digits)
# self._tab.xmax = x
# self._tab.ymax = y
# self._rectCoords(self._tabRect,
# self._tab.x-self._tab.dx, self._tab.y-self._tab.dy,
# self._tab.x+self._tab.dx, self._tab.y+self._tab.dy)
self.setMouseStatus(event)
# ----------------------------------------------------------------------
# Canvas release button1. Select area
# ----------------------------------------------------------------------
def release(self, event):
if self._mouseAction in (ACTION_SELECT_SINGLE,
ACTION_SELECT_DOUBLE,
ACTION_SELECT_AREA):
if self._mouseAction == ACTION_SELECT_AREA:
#if event.state & SHIFT_MASK == 0:
if self._x < event.x: # From left->right enclosed
closest = self.find_enclosed(
self.canvasx(self._x),
self.canvasy(self._y),
self.canvasx(event.x),
self.canvasy(event.y))
else: # From right->left overlapping
closest = self.find_overlapping(
self.canvasx(self._x),
self.canvasy(self._y),
self.canvasx(event.x),
self.canvasy(event.y))
self.delete(self._select)
self._select = None
items = []
for i in closest:
try: items.append(self._items[i])
except: pass
elif self._mouseAction in (ACTION_SELECT_SINGLE, ACTION_SELECT_DOUBLE):
closest = self.find_closest( self.canvasx(event.x),
self.canvasy(event.y),
CLOSE_DISTANCE)
items = []
for i in closest:
try:
items.append(self._items[i])
#i = None
except KeyError:
tags = self.gettags(i)
if "Orient" in tags:
self.selectMarker(i)
return
#i = self.find_below(i)
pass
if not items: return
self.app.select(items, self._mouseAction==ACTION_SELECT_DOUBLE,
event.state&CONTROL_MASK==0)
self._mouseAction = None
elif self._mouseAction == ACTION_MOVE:
i = self.canvasx(event.x)
j = self.canvasy(event.y)
self._vx1, self._vy1, self._vz1 = self.canvas2xyz(i,j)
dx=self._vx1-self._vx0
dy=self._vy1-self._vy0
dz=self._vz1-self._vz0
self.status(_("Move by %g, %g, %g")%(dx,dy,dz))
self.app.insertCommand(("move %g %g %g")%(dx,dy,dz),True)
elif self._mouseAction == ACTION_PAN:
self.panRelease(event)
# # Finalize tab
# elif self._mouseAction == ACTION_ADDTAB:
# self._tab.correct()
# self.gcode.addUndo(self.gcode.addTabUndo(-1,self._tab))
# self._tab = None
# self._tabRect = None
# self.setActionSelect()
# self.event_generate("<<TabAdded>>")
# ----------------------------------------------------------------------
def double(self, event):
#self.app.selectBlocks()
self._mouseAction = ACTION_SELECT_DOUBLE
# ----------------------------------------------------------------------
def motion(self, event):
self.setMouseStatus(event)
#-----------------------------------------------------------------------
# Testing routine
#-----------------------------------------------------------------------
def __test(self, event):
i = self.canvasx(event.x)
j = self.canvasy(event.y)
x,y,z = self.canvas2xyz(i,j)
blocks = self.app.editor.getSelectedBlocks()
from bmath import Vector
P = Vector(x,y)
# for bid in blocks:
# for path in self.gcode.toPath(bid):
# print path
# print path.isInside(P)
# ----------------------------------------------------------------------
# Snap to the closest point if any
# ----------------------------------------------------------------------
def snapPoint(self, cx, cy):
xs,ys = None,None
if CNC.inch:
dmin = (self.zoom/25.4)**2 # 1mm maximum distance ...
else:
dmin = (self.zoom)**2
dmin = (CLOSE_DISTANCE*self.zoom)**2
# ... and if we are closer than 5pixels
for item in self.find_closest(cx, cy, CLOSE_DISTANCE):
try:
bid,lid = self._items[item]
except KeyError:
continue
# Very cheap and inaccurate approach :)
coords = self.coords(item)
x = coords[0] # first
y = coords[1] # point
d = (cx-x)**2 + (cy-y)**2
if d<dmin:
dmin = d
xs,ys = x,y
x = coords[-2] # last
y = coords[-1] # point
d = (cx-x)**2 + (cy-y)**2
if d<dmin:
dmin = d
xs,ys = x,y
# I need to check the real code and if
# an arc check also the center?
if xs is not None:
return xs, ys
else:
return cx, cy
#----------------------------------------------------------------------
# Get margins of selected items
#----------------------------------------------------------------------
def getMargins(self):
bbox = self.bbox("sel")
if not bbox: return None
x1,y1,x2,y2 = bbox
dx = (x2-x1-1)/self.zoom
dy = (y2-y1-1)/self.zoom
return dx,dy
#----------------------------------------------------------------------
def xview(self, *args):
ret = Canvas.xview(self, *args)
if args: self.cameraPosition()
return ret
#----------------------------------------------------------------------
def yview(self, *args):
ret = Canvas.yview(self, *args)
if args: self.cameraPosition()
return ret
#----------------------------------------------------------------------
def configureEvent(self, event):
self.cameraPosition()
# ----------------------------------------------------------------------
def pan(self, event):
if self._mouseAction == ACTION_PAN:
self.scan_dragto(event.x, event.y, gain=1)
self.cameraPosition()
else:
self.config(cursor=mouseCursor(ACTION_PAN))
self.scan_mark(event.x, event.y)
self._mouseAction = ACTION_PAN
# ----------------------------------------------------------------------
def panRelease(self, event):
self._mouseAction = None
self.config(cursor=mouseCursor(self.action))
# ----------------------------------------------------------------------
def panLeft(self, event=None):
self.xview(SCROLL, -1, UNITS)
def panRight(self, event=None):
self.xview(SCROLL, 1, UNITS)
def panUp(self, event=None):
self.yview(SCROLL, -1, UNITS)
def panDown(self, event=None):
self.yview(SCROLL, 1, UNITS)
# ----------------------------------------------------------------------
# Delay zooming to cascade multiple zoom actions
# ----------------------------------------------------------------------
def zoomCanvas(self, x, y, zoom):
self._tx = x
self._ty = y
self.__tzoom *= zoom
self.after_idle(self._zoomCanvas)
# ----------------------------------------------------------------------
# Zoom on screen position x,y by a factor zoom
# ----------------------------------------------------------------------
def _zoomCanvas(self, event=None): #x, y, zoom):
x = self._tx
y = self._ty
zoom = self.__tzoom
#def zoomCanvas(self, x, y, zoom):
self.__tzoom = 1.0
self.zoom *= zoom
x0 = self.canvasx(0)
y0 = self.canvasy(0)
for i in self.find_all():
self.scale(i, 0, 0, zoom, zoom)
# Update last insert
if self._lastGantry:
self._drawGantry(*self.plotCoords([self._lastGantry])[0])
else:
self._drawGantry(0,0)
self._updateScrollBars()
x0 -= self.canvasx(0)
y0 -= self.canvasy(0)
# Perform pin zoom
dx = self.canvasx(x) * (1.0-zoom)
dy = self.canvasy(y) * (1.0-zoom)
# Drag to new location to center viewport
self.scan_mark(0,0)
self.scan_dragto(int(round(dx-x0)), int(round(dy-y0)), 1)
# Resize probe image if any
if self._probe:
self._projectProbeImage()
self.itemconfig(self._probe, image=self._probeTkImage)
self.cameraUpdate()
# ----------------------------------------------------------------------
# Return selected objects bounding box
# ----------------------------------------------------------------------
def selBbox(self):
x1 = None
for tag in ("sel","sel2","sel3","sel4"):
bb = self.bbox(tag)
if bb is None:
continue
elif x1 is None:
x1,y1,x2,y2 = bb
else:
x1 = min(x1,bb[0])
y1 = min(y1,bb[1])
x2 = max(x2,bb[2])
y2 = max(y2,bb[3])
if x1 is None:
return self.bbox('all')
return x1,y1,x2,y2
# ----------------------------------------------------------------------
# Zoom to Fit to Screen
# ----------------------------------------------------------------------
def fit2Screen(self, event=None):
bb = self.selBbox()
if bb is None: return
x1,y1,x2,y2 = bb
try:
zx = float(self.winfo_width()) / (x2-x1)
except:
return
try:
zy = float(self.winfo_height()) / (y2-y1)
except:
return
if zx > 1.0:
self.__tzoom = min(zx,zy)
else:
self.__tzoom = max(zx,zy)
self._tx = self._ty = 0
self._zoomCanvas()
# Find position of new selection
x1,y1,x2,y2 = self.selBbox()
xm = (x1+x2)//2
ym = (y1+y2)//2
sx1,sy1,sx2,sy2 = map(float,self.cget("scrollregion").split())
midx = float(xm-sx1) / (sx2-sx1)
midy = float(ym-sy1) / (sy2-sy1)
a,b = self.xview()
d = (b-a)/2.0
self.xview_moveto(midx-d)
a,b = self.yview()
d = (b-a)/2.0
self.yview_moveto(midy-d)
self.cameraPosition()
# ----------------------------------------------------------------------
def menuZoomIn(self, event=None):
x = int(self.cget("width" ))//2
y = int(self.cget("height"))//2
self.zoomCanvas(x, y, 2.0)
# ----------------------------------------------------------------------
def menuZoomOut(self, event=None):
x = int(self.cget("width" ))//2
y = int(self.cget("height"))//2
self.zoomCanvas(x, y, 0.5)
# ----------------------------------------------------------------------
def mouseZoomIn(self, event):
self.zoomCanvas(event.x, event.y, ZOOM)
# ----------------------------------------------------------------------
def mouseZoomOut(self, event):
self.zoomCanvas(event.x, event.y, 1.0/ZOOM)
# ----------------------------------------------------------------------
def wheel(self, event):
self.zoomCanvas(event.x, event.y, pow(ZOOM,(event.delta//120)))
# ----------------------------------------------------------------------
# Change the insert marker location
# ----------------------------------------------------------------------
def activeMarker(self, item):
if item is None: return
b,i = item
if i is None: return
block = self.gcode[b]
item = block.path(i)
if item is not None and item != self._lastActive:
if self._lastActive is not None:
self.itemconfig(self._lastActive, arrow=NONE)
self._lastActive = item
self.itemconfig(self._lastActive, arrow=LAST)
#----------------------------------------------------------------------
# Display gantry
#----------------------------------------------------------------------
def gantry(self, wx, wy, wz, mx, my, mz):
self._lastGantry = (wx,wy,wz)
self._drawGantry(*self.plotCoords([(wx,wy,wz)])[0])
if self._cameraImage and self.cameraAnchor==NONE:
self.cameraPosition()
dx = wx-mx
dy = wy-my
dz = wz-mz
if abs(dx-self._dx) > 0.0001 or \
abs(dy-self._dy) > 0.0001 or \
abs(dz-self._dz) > 0.0001:
self._dx = dx
self._dy = dy
self._dz = dz
if not self.draw_workarea: return
xmin = self._dx-CNC.travel_x
ymin = self._dy-CNC.travel_y
zmin = self._dz-CNC.travel_z
xmax = self._dx
ymax = self._dy
zmax = self._dz
xyz = [(xmin, ymin, 0.),
(xmax, ymin, 0.),
(xmax, ymax, 0.),
(xmin, ymax, 0.),
(xmin, ymin, 0.)]
coords = []
for x,y in self.plotCoords(xyz):
coords.append(x)
coords.append(y)
self.coords(self._workarea, *coords)
#----------------------------------------------------------------------
# Clear highlight of selection
#----------------------------------------------------------------------
def clearSelection(self):
if self._lastActive is not None:
self.itemconfig(self._lastActive, arrow=NONE)
self._lastActive = None
for i in self.find_withtag("sel"):
bid,lid = self._items[i]
if bid:
try:
block = self.gcode[bid]
if block.color:
fill = block.color
else:
fill = ENABLE_COLOR
except IndexError:
fill = ENABLE_COLOR
else:
fill = ENABLE_COLOR
self.itemconfig(i, width=1, fill=fill)
self.itemconfig("sel2", width=1, fill=DISABLE_COLOR)
self.itemconfig("sel3", width=1, fill=TAB_COLOR)
self.itemconfig("sel4", width=1, fill=DISABLE_COLOR)
for i in SELECTION_TAGS: self.dtag(i)
self.delete("info")
#----------------------------------------------------------------------
# Highlight selected items
#----------------------------------------------------------------------
def select(self, items):
for b, i in items:
block = self.gcode[b]
if i is None:
sel = block.enable and "sel" or "sel2"
for path in block._path:
if path is not None:
self.addtag_withtag(sel, path)
sel = block.enable and "sel3" or "sel4"
for tab in block.tabs:
path = tab.path
if path is not None:
self.addtag_withtag(sel, tab.path)
elif isinstance(i,int):
path = block.path(i)
if path:
sel = block.enable and "sel" or "sel2"
self.addtag_withtag(sel, path)
elif isinstance(i,Tab):
path = i.path
if path:
sel = block.enable and "sel3" or "sel4"
self.addtag_withtag(sel, path)
self.itemconfig("sel", width=2, fill=SELECT_COLOR)
self.itemconfig("sel2", width=2, fill=SELECT2_COLOR)
self.itemconfig("sel3", width=2, fill=TAB_COLOR)
self.itemconfig("sel4", width=2, fill=TABS_COLOR)
for i in SELECTION_TAGS: self.tag_raise(i)
self.drawMargin()
#----------------------------------------------------------------------
# Select orientation marker
#----------------------------------------------------------------------
def selectMarker(self, item):
# find marker
for i,paths in enumerate(self.gcode.orient.paths):
if item in paths:
self._orientSelected = i
for j in paths:
self.itemconfig(j, width=2)
self.event_generate("<<OrientSelect>>", data=i)
return
self._orientSelected = None
#----------------------------------------------------------------------
# Highlight marker that was selected
#----------------------------------------------------------------------
def orientChange(self, marker):
self.itemconfig("Orient", width=1)
if marker >=0:
self._orientSelected = marker
try:
for i in self.gcode.orient.paths[self._orientSelected]:
self.itemconfig(i, width=2)
except IndexError:
self.drawOrient()
else:
self._orientSelected = None
#----------------------------------------------------------------------
# Display graphical information on selected blocks
#----------------------------------------------------------------------
def showInfo(self, blocks):
self.delete("info") # clear any previous information
for bid in blocks:
block = self.gcode.blocks[bid]
xyz = [(block.xmin, block.ymin, 0.),
(block.xmax, block.ymin, 0.),
(block.xmax, block.ymax, 0.),
(block.xmin, block.ymax, 0.),
(block.xmin, block.ymin, 0.)]
self.create_line(self.plotCoords(xyz),
fill=INFO_COLOR,
tag="info")
xc = (block.xmin + block.xmax)/2.0
yc = (block.ymin + block.ymax)/2.0
r = min(block.xmax-xc, block.ymax-yc)
closed, direction = self.gcode.info(bid)
if closed==0: # open path
if direction==1:
sf = math.pi/4.0
ef = 2.0*math.pi - sf
else:
ef = math.pi/4.0
sf = 2.0*math.pi - ef
elif closed==1:
if direction==1:
sf = 0.
ef = 2.0*math.pi
else:
ef = 0.
sf = 2.0*math.pi
elif closed is None:
continue
n = 64
df = (ef-sf)/float(n)
xyz = []
f = sf
for i in range(n+1):
xyz.append((xc+r*math.sin(f), yc+r*math.cos(f), 0.)) # towards up
f += df
self.create_line(self.plotCoords(xyz),
fill=INFO_COLOR,
width=5,
arrow=LAST,
arrowshape=(32,40,12),
tag="info")
#-----------------------------------------------------------------------
def cameraOn(self, event=None):
if not self.camera.start(): return
self.cameraRefresh()
#-----------------------------------------------------------------------
def cameraOff(self, event=None):
self.delete(self._cameraImage)
self.delete(self._cameraHori)
self.delete(self._cameraVert)
self.delete(self._cameraCircle)
self.delete(self._cameraCircle2)
self._cameraImage = None
if self._cameraAfter:
self.after_cancel(self._cameraAfter)
self._cameraAfter = None
self.camera.stop()
#-----------------------------------------------------------------------
def cameraUpdate(self):
if not self.camera.isOn(): return
if self._cameraAfter:
self.after_cancel(self._cameraAfter)
self._cameraAfter = None
self.cameraRefresh()
self.cameraPosition()
#-----------------------------------------------------------------------
def cameraRefresh(self):
if not self.camera.read():
self.cameraOff()
return
self.camera.rotation = self.cameraRotation
self.camera.xcenter = self.cameraXCenter
self.camera.ycenter = self.cameraYCenter
if self.cameraEdge: self.camera.canny(50,200)
if self.cameraAnchor==NONE or self.zoom/self.cameraScale>1.0:
self.camera.resize(self.zoom/self.cameraScale, self._cameraMaxWidth, self._cameraMaxHeight)
if self._cameraImage is None:
self._cameraImage = self.create_image((0,0), tag="CameraImage")
self.lower(self._cameraImage)
# create cross hair at dummy location we will correct latter
self._cameraHori = self.create_line(0,0,1,0, fill=CAMERA_COLOR, tag="CrossHair")
self._cameraVert = self.create_line(0,0,0,1, fill=CAMERA_COLOR, tag="CrossHair")
self._cameraCircle = self.create_oval(0,0, 1,1, outline=CAMERA_COLOR, tag="CrossHair")
self._cameraCircle2 = self.create_oval(0,0, 1,1, outline=CAMERA_COLOR,
dash=(3,3), tag="CrossHair")
self.cameraPosition()
try:
self.itemconfig(self._cameraImage, image=self.camera.toTk())
except:
pass
self._cameraAfter = self.after(100, self.cameraRefresh);
#-----------------------------------------------------------------------
def cameraFreeze(self, freeze):
if self.camera.isOn():
self.camera.freeze(freeze)
#-----------------------------------------------------------------------
def cameraSave(self, event=None):
try:
self._count += 1
except:
self._count = 1
self.camera.save("camera%02d.png"%(self._count))
# ----------------------------------------------------------------------
# Reposition camera and crosshair
# ----------------------------------------------------------------------
def cameraPosition(self):
if self._cameraImage is None: return
w = self.winfo_width()
h = self.winfo_height()
hc,wc = self.camera.image.shape[:2]
wc //= 2
hc //= 2
x = w//2 # everything on center
y = h//2
if self.cameraAnchor == NONE:
if self._lastGantry is not None:
x,y = self.plotCoords([self._lastGantry])[0]
else:
x = y = 0
if not self.cameraSwitch:
x += self.cameraDx * self.zoom
y -= self.cameraDy * self.zoom
r = self.cameraR * self.zoom
else:
if self.cameraAnchor != CENTER:
if N in self.cameraAnchor:
y = hc
elif S in self.cameraAnchor:
y = h-hc
if W in self.cameraAnchor:
x = wc
elif E in self.cameraAnchor:
x = w-wc
x = self.canvasx(x)
y = self.canvasy(y)
if self.zoom/self.cameraScale>1.0:
r = self.cameraR * self.zoom
else:
r = self.cameraR * self.cameraScale
self.coords(self._cameraImage, x, y)
self.coords(self._cameraHori, x-wc, y, x+wc, y)
self.coords(self._cameraVert, x, y-hc, x, y+hc)
self.coords(self._cameraCircle, x-r, y-r, x+r, y+r)
self.coords(self._cameraCircle2, x-r*2, y-r*2, x+r*2, y+r*2)
# ----------------------------------------------------------------------
# Crop center of camera and search it in subsequent movements
# ----------------------------------------------------------------------
def cameraMakeTemplate(self, r):
if self._cameraImage is None: return
self._template = self.camera.getCenterTemplate(r)
# ----------------------------------------------------------------------
def cameraMatchTemplate(self):
return self.camera.matchTemplate(self._template)
#----------------------------------------------------------------------
# Parse and draw the file from the editor to g-code commands
#----------------------------------------------------------------------
def draw(self, view=None): #, lines):
if self._inDraw : return
self._inDraw = True
self.__tzoom = 1.0
xyz = self.canvas2xyz(
self.canvasx(self.winfo_width()/2),
self.canvasy(self.winfo_height()/2))
if view is not None: self.view = view
self._last = (0.,0.,0.)
self.initPosition()
self.drawPaths()
self.drawGrid()
self.drawMargin()
self.drawWorkarea()
self.drawProbe()
self.drawOrient()
self.drawAxes()
# self.tag_lower(self._workarea)
if self._gantry1: self.tag_raise(self._gantry1)
if self._gantry2: self.tag_raise(self._gantry2)
self._updateScrollBars()
ij = self.plotCoords([xyz])[0]
dx = int(round(self.canvasx(self.winfo_width()/2) - ij[0]))
dy = int(round(self.canvasy(self.winfo_height()/2) - ij[1]))
self.scan_mark(0,0)
self.scan_dragto(int(round(dx)), int(round(dy)), 1)
self._inDraw = False
#----------------------------------------------------------------------
# Initialize gantry position
#----------------------------------------------------------------------
def initPosition(self):
self.configure(background=CANVAS_COLOR)
self.delete(ALL)
self._cameraImage = None
gr = max(3,int(CNC.vars["diameter"]/2.0*self.zoom))
if self.view == VIEW_XY:
self._gantry1 = self.create_oval( (-gr,-gr), ( gr, gr),
width=2,
outline=GANTRY_COLOR)
self._gantry2 = None
else:
gx = gr
gy = gr//2
gh = 3*gr
if self.view in (VIEW_XZ, VIEW_YZ):
self._gantry1 = None
self._gantry2 = self.create_line((-gx,-gh, 0,0, gx,-gh, -gx,-gh),
width=2,
fill=GANTRY_COLOR)
else:
self._gantry1 = self.create_oval((-gx,-gh-gy, gx,-gh+gy),
width=2,
outline=GANTRY_COLOR)
self._gantry2 = self.create_line((-gx, -gh, 0, 0, gx, -gh),
width=2,
fill=GANTRY_COLOR)
self._lastInsert = None
self._lastActive = None
self._select = None
self._vector = None
self._items.clear()
self.cnc.initPath()
self.cnc.resetAllMargins()
#----------------------------------------------------------------------
# Draw gantry location
#----------------------------------------------------------------------
def _drawGantry(self, x, y):
gr = max(3,int(CNC.vars["diameter"]/2.0*self.zoom))
if self._gantry2 is None:
self.coords(self._gantry1, (x-gr, y-gr, x+gr, y+gr))
else:
gx = gr
gy = gr//2
gh = 3*gr
if self._gantry1 is None:
self.coords(self._gantry2, (x-gx,y-gh, x,y, x+gx,y-gh, x-gx,y-gh))
else:
self.coords(self._gantry1, (x-gx, y-gh-gy, x+gx, y-gh+gy))
self.coords(self._gantry2, (x-gx, y-gh, x, y, x+gx, y-gh))
#----------------------------------------------------------------------
# Draw system axes
#----------------------------------------------------------------------
def drawAxes(self):
self.delete("Axes")
if not self.draw_axes: return
dx = CNC.vars["axmax"] - CNC.vars["axmin"]
dy = CNC.vars["aymax"] - CNC.vars["aymin"]
d = min(dx,dy)
try:
s = math.pow(10.0, int(math.log10(d)))
except:
if CNC.inch:
s = 10.0
else:
s = 100.0
xyz = [(0.,0.,0.), (s, 0., 0.)]
self.create_line(self.plotCoords(xyz), tag="Axes", fill="Red", dash=(3,1), arrow=LAST)
xyz = [(0.,0.,0.), (0., s, 0.)]
self.create_line(self.plotCoords(xyz), tag="Axes", fill="Green", dash=(3,1), arrow=LAST)
xyz = [(0.,0.,0.), (0., 0., s)]
self.create_line(self.plotCoords(xyz), tag="Axes", fill="Blue", dash=(3,1), arrow=LAST)
#----------------------------------------------------------------------
# Draw margins of selected blocks
#----------------------------------------------------------------------
def drawMargin(self):
if self._margin: self.delete(self._margin)
if self._amargin: self.delete(self._amargin)
self._margin = self._amargin = None
if not self.draw_margin: return
if CNC.isMarginValid():
xyz = [(CNC.vars["xmin"], CNC.vars["ymin"], 0.),
(CNC.vars["xmax"], CNC.vars["ymin"], 0.),
(CNC.vars["xmax"], CNC.vars["ymax"], 0.),
(CNC.vars["xmin"], CNC.vars["ymax"], 0.),
(CNC.vars["xmin"], CNC.vars["ymin"], 0.)]
self._margin = self.create_line(
self.plotCoords(xyz),
fill=MARGIN_COLOR)
self.tag_lower(self._margin)
if not CNC.isAllMarginValid(): return
xyz = [(CNC.vars["axmin"], CNC.vars["aymin"], 0.),
(CNC.vars["axmax"], CNC.vars["aymin"], 0.),
(CNC.vars["axmax"], CNC.vars["aymax"], 0.),
(CNC.vars["axmin"], CNC.vars["aymax"], 0.),
(CNC.vars["axmin"], CNC.vars["aymin"], 0.)]
self._amargin = self.create_line(
self.plotCoords(xyz),
dash=(3,2),
fill=MARGIN_COLOR)
self.tag_lower(self._amargin)
#----------------------------------------------------------------------
# Change rectangle coordinates
#----------------------------------------------------------------------
def _rectCoords(self, rect, xmin, ymin, xmax, ymax, z=0.0):
self.coords(rect, Tkinter._flatten(self.plotCoords(
[(xmin, ymin, z),
(xmax, ymin, z),
(xmax, ymax, z),
(xmin, ymax, z),
(xmin, ymin, z)]
)))
#----------------------------------------------------------------------
# Draw a 3D path
#----------------------------------------------------------------------
def _drawPath(self, path, z=0.0, **kwargs):
xyz = []
for segment in path:
xyz.append((segment.A[0], segment.A[1], z))
xyz.append((segment.B[0], segment.B[1], z))
rect = self.create_line(
self.plotCoords(xyz),
**kwargs),
return rect
#----------------------------------------------------------------------
# Draw a 3D rectangle
#----------------------------------------------------------------------
def _drawRect(self, xmin, ymin, xmax, ymax, z=0.0, **kwargs):
xyz = [(xmin, ymin, z),
(xmax, ymin, z),
(xmax, ymax, z),
(xmin, ymax, z),
(xmin, ymin, z)]
rect = self.create_line(
self.plotCoords(xyz),
**kwargs),
return rect
#----------------------------------------------------------------------
# Draw a workspace rectangle
#----------------------------------------------------------------------
def drawWorkarea(self):
if self._workarea: self.delete(self._workarea)
if not self.draw_workarea: return
xmin = self._dx-CNC.travel_x
ymin = self._dy-CNC.travel_y
zmin = self._dz-CNC.travel_z
xmax = self._dx
ymax = self._dy
zmax = self._dz
self._workarea = self._drawRect(xmin, ymin, xmax, ymax, 0., fill=WORK_COLOR, dash=(3,2))
self.tag_lower(self._workarea)
#----------------------------------------------------------------------
# Draw coordinates grid
#----------------------------------------------------------------------
def drawGrid(self):
self.delete("Grid")
if not self.draw_grid: return
if self.view in (VIEW_XY, VIEW_ISO1, VIEW_ISO2, VIEW_ISO3):
xmin = (CNC.vars["axmin"]//10) *10
xmax = (CNC.vars["axmax"]//10+1)*10
ymin = (CNC.vars["aymin"]//10) *10
ymax = (CNC.vars["aymax"]//10+1)*10
for i in range(int(CNC.vars["aymin"]//10), int(CNC.vars["aymax"]//10)+2):
y = i*10.0
xyz = [(xmin,y,0), (xmax,y,0)]
item = self.create_line(self.plotCoords(xyz),
tag="Grid",
fill=GRID_COLOR,
dash=(1,3))
self.tag_lower(item)
for i in range(int(CNC.vars["axmin"]//10), int(CNC.vars["axmax"]//10)+2):
x = i*10.0
xyz = [(x,ymin,0), (x,ymax,0)]
item = self.create_line(self.plotCoords(xyz),
fill=GRID_COLOR,
tag="Grid",
dash=(1,3))
self.tag_lower(item)
#----------------------------------------------------------------------
# Display orientation markers
#----------------------------------------------------------------------
def drawOrient(self, event=None):
self.delete("Orient")
#if not self.draw_probe: return
if self.view in (VIEW_XZ, VIEW_YZ): return
# Draw orient markers
if CNC.inch:
w = 0.1
else:
w = 2.5
self.gcode.orient.clearPaths()
for i,(xm,ym,x,y) in enumerate(self.gcode.orient.markers):
paths = []
# Machine position (cross)
item = self.create_line(self.plotCoords([(xm-w,ym,0.),(xm+w,ym,0.)]),
tag="Orient",
fill="Green")
self.tag_lower(item)
paths.append(item)
item = self.create_line(self.plotCoords([(xm,ym-w,0.),(xm,ym+w,0.)]),
tag="Orient",
fill="Green")
self.tag_lower(item)
paths.append(item)
# GCode position (cross)
item = self.create_line(self.plotCoords([(x-w,y,0.),(x+w,y,0.)]),
tag="Orient",
fill="Red")
self.tag_lower(item)
paths.append(item)
item = self.create_line(self.plotCoords([(x,y-w,0.),(x,y+w,0.)]),
tag="Orient",
fill="Red")
self.tag_lower(item)
paths.append(item)
# Draw error if any
try:
err = self.gcode.orient.errors[i]
item = self.create_oval(self.plotCoords([(xm-err,ym-err,0.),(xm+err,ym+err,0.)]),
tag="Orient",
outline="Red")
self.tag_lower(item)
paths.append(item)
err = self.gcode.orient.errors[i]
item = self.create_oval(self.plotCoords([(x-err,y-err,0.),(x+err,y+err,0.)]),
tag="Orient",
outline="Red")
self.tag_lower(item)
paths.append(item)
except IndexError:
pass
# Connecting line
item = self.create_line(self.plotCoords([(xm,ym,0.),(x,y,0.)]),
tag="Orient",
fill="Blue",
dash=(1,1))
self.tag_lower(item)
paths.append(item)
self.gcode.orient.addPath(paths)
if self._orientSelected is not None:
try:
for item in self.gcode.orient.paths[self._orientSelected]:
self.itemconfig(item, width=2)
except (IndexError, TclError):
pass
#----------------------------------------------------------------------
# Display probe
#----------------------------------------------------------------------
def drawProbe(self):
self.delete("Probe")
if self._probe:
self.delete(self._probe)
self._probe = None
if not self.draw_probe: return
if self.view in (VIEW_XZ, VIEW_YZ): return
# Draw probe grid
probe = self.gcode.probe
for x in bmath.frange(probe.xmin, probe.xmax+0.00001, probe.xstep()):
xyz = [(x,probe.ymin,0.), (x,probe.ymax,0.)]
item = self.create_line(self.plotCoords(xyz),
tag="Probe",
fill='Yellow')
self.tag_lower(item)
for y in bmath.frange(probe.ymin, probe.ymax+0.00001, probe.ystep()):
xyz = [(probe.xmin,y,0.), (probe.xmax,y,0.)]
item = self.create_line(self.plotCoords(xyz),
tag="Probe",
fill='Yellow')
self.tag_lower(item)
# Draw probe points
for i,uv in enumerate(self.plotCoords(probe.points)):
item = self.create_text(uv,
text="%.*f"%(CNC.digits,probe.points[i][2]),
tag="Probe",
justify=CENTER,
fill=PROBE_TEXT_COLOR)
self.tag_lower(item)
# Draw image map if numpy exists
#if numpy is not None and probe.matrix and self.view == VIEW_XY:
if numpy is not None and probe.matrix and self.view in (VIEW_XY, VIEW_ISO1, VIEW_ISO2, VIEW_ISO3):
array = numpy.array(list(reversed(probe.matrix)), numpy.float32)
lw = array.min()
hg = array.max()
mx = max(abs(hg),abs(lw))
#print "matrix=",probe.matrix
#print "size=",array.size
#print "array=",array
#print "Limits:", lw, hg, mx
# scale should be:
# -mx .. 0 .. mx
# -127 0 127
# -127 = light-blue
# 0 = white
# 127 = light-red
dc = mx/127. # step in colors
if abs(dc)<1e-8: return
palette = []
for x in bmath.frange(lw, hg+1e-10, (hg-lw)/255.):
i = int(math.floor(x / dc))
j = i + i>>1 # 1.5*i
if i<0:
palette.append(0xff+j)
palette.append(0xff+j)
palette.append(0xff)
elif i>0:
palette.append(0xff)
palette.append(0xff-j)
palette.append(0xff-j)
else:
palette.append(0xff)
palette.append(0xff)
palette.append(0xff)
#print ">>", x,i,palette[-3], palette[-2], palette[-1]
#print "palette size=",len(palette)/3
array = numpy.floor((array-lw)/(hg-lw)*255)
self._probeImage = Image.fromarray(array.astype(numpy.int16)).convert('L')
self._probeImage.putpalette(palette)
# Add transparency for a possible composite operation latter on ISO*
self._probeImage = self._probeImage.convert("RGBA")
x,y = self._projectProbeImage()
self._probe = self.create_image(x,y, image=self._probeTkImage, anchor='sw')
self.tag_lower(self._probe)
#----------------------------------------------------------------------
# Create the tkimage for the current projection
#----------------------------------------------------------------------
def _projectProbeImage(self):
probe = self.gcode.probe
size = (int((probe.xmax-probe.xmin + probe._xstep)*self.zoom),
int((probe.ymax-probe.ymin + probe._ystep)*self.zoom))
marginx = int(probe._xstep/2. * self.zoom)
marginy = int(probe._ystep/2. * self.zoom)
crop = (marginx, marginy, size[0]-marginx, size[1]-marginy)
image = self._probeImage.resize((size), resample=RESAMPLE).crop(crop)
if self.view in (VIEW_ISO1, VIEW_ISO2, VIEW_ISO3):
w, h = image.size
size2 = (int(S60*(w+h)),
int(C60*(w+h)))
if self.view == VIEW_ISO1:
transform = ( 0.5/S60, 0.5/C60, -h/2,
-0.5/S60, 0.5/C60, h/2)
xy = self.plotCoords([(probe.xmin, probe.ymin, 0.),
(probe.xmax, probe.ymin, 0.)])
x = xy[0][0]
y = xy[1][1]
elif self.view == VIEW_ISO2:
transform = ( 0.5/S60,-0.5/C60, w/2,
0.5/S60, 0.5/C60, -w/2)
xy = self.plotCoords([(probe.xmin, probe.ymax, 0.),
(probe.xmin, probe.ymin, 0.)])
x = xy[0][0]
y = xy[1][1]
else:
transform = (-0.5/S60,-0.5/C60, w+h/2,
0.5/S60,-0.5/C60, h/2)
xy = self.plotCoords([(probe.xmax, probe.ymax, 0.),
(probe.xmin, probe.ymax, 0.)])
x = xy[0][0]
y = xy[1][1]
affine = image.transform(size2, Image.AFFINE,
transform,
resample=RESAMPLE)
# Super impose a white image
white = Image.new('RGBA', affine.size, (255,)*4)
# compose the two images affine and white with mask the affine
image = Image.composite(affine, white, affine)
del white
else:
x,y = self.plotCoords([(probe.xmin, probe.ymin, 0.)])[0]
self._probeTkImage = ImageTk.PhotoImage(image)
return x,y
#----------------------------------------------------------------------
# Draw the paths for the whole gcode file
#----------------------------------------------------------------------
def drawPaths(self):
if not self.draw_paths:
for block in self.gcode.blocks:
block.resetPath()
return
try:
n = 1
startTime = before = time.time()
self.cnc.resetAllMargins()
drawG = self.draw_rapid or self.draw_paths or self.draw_margin
bid = self.app.editor.getSelectedBlocks()
for i,block in enumerate(self.gcode.blocks):
if i in bid: selected=True
else: selected = False
start = True # start location found
block.resetPath()
# Draw block tabs
if self.draw_paths:
for tab in block.tabs:
#from bpath import Path
#if not isinstance(tab.path, Path):
# print("cnv not bpath: ", type(tab.path))
# continue
#Set width and color for tabs
#FIXME: For some reason the width/color updates only when i manualy click redraw button
if block.enable:
width = 2
if selected:
color = TABS_COLOR
else:
color = TAB_COLOR
else:
width = 1
color = DISABLE_COLOR
#Draw
item = self._drawPath(tab.path, 0., fill=color, width=width)
self._items[item[0]] = i,tab
self.tag_lower(item)
# Draw block
for j,line in enumerate(block):
n -= 1
if n==0:
if time.time() - startTime > DRAW_TIME:
raise AlarmException()
# Force a periodic update since this loop can take time
if time.time() - before > 1.0:
self.update()
before = time.time()
n = 1000
try:
cmd = self.gcode.evaluate(CNC.compileLine(line))
if isinstance(cmd,tuple):
cmd = None
else:
cmd = CNC.breakLine(cmd)
except AlarmException:
raise
except:
sys.stderr.write(_(">>> ERROR: %s\n")%(str(sys.exc_info()[1])))
sys.stderr.write(_(" line: %s\n")%(line))
cmd = None
if cmd is None or not drawG:
block.addPath(None)
else:
path = self.drawPath(block, cmd)
self._items[path] = i,j
block.addPath(path)
if start and self.cnc.gcode in (1,2,3):
# Mark as start the first non-rapid motion
block.startPath(self.cnc.x, self.cnc.y, self.cnc.z)
start = False
block.endPath(self.cnc.x, self.cnc.y, self.cnc.z)
except AlarmException:
self.status("Rendering takes TOO Long. Interrupted...")
#----------------------------------------------------------------------
# Create path for one g command
#----------------------------------------------------------------------
def drawPath(self, block, cmds):
self.cnc.motionStart(cmds)
xyz = self.cnc.motionPath()
self.cnc.motionEnd()
if xyz:
self.cnc.pathLength(block, xyz)
if self.cnc.gcode in (1,2,3):
block.pathMargins(xyz)
self.cnc.pathMargins(block)
if block.enable:
if self.cnc.gcode == 0 and self.draw_rapid:
xyz[0] = self._last
self._last = xyz[-1]
else:
if self.cnc.gcode == 0:
return None
coords = self.plotCoords(xyz)
if coords:
if block.enable:
if block.color:
fill = block.color
else:
fill = ENABLE_COLOR
else:
fill = DISABLE_COLOR
if self.cnc.gcode == 0:
if self.draw_rapid:
return self.create_line(coords,
fill=fill, width=0, dash=(4,3))
elif self.draw_paths:
return self.create_line(coords, fill=fill,
width=0, cap="projecting")
return None
#----------------------------------------------------------------------
# Return plotting coordinates for a 3d xyz path
#
# NOTE: Use the Tkinter._flatten() to pass to self.coords() function
#----------------------------------------------------------------------
def plotCoords(self, xyz):
coords = None
if self.view == VIEW_XY:
coords = [(p[0]*self.zoom,-p[1]*self.zoom) for p in xyz]
elif self.view == VIEW_XZ:
coords = [(p[0]*self.zoom,-p[2]*self.zoom) for p in xyz]
elif self.view == VIEW_YZ:
coords = [(p[1]*self.zoom,-p[2]*self.zoom) for p in xyz]
elif self.view == VIEW_ISO1:
coords = [(( p[0]*S60 + p[1]*S60)*self.zoom,
(+p[0]*C60 - p[1]*C60 - p[2])*self.zoom)
for p in xyz]
elif self.view == VIEW_ISO2:
coords = [(( p[0]*S60 - p[1]*S60)*self.zoom,
(-p[0]*C60 - p[1]*C60 - p[2])*self.zoom)
for p in xyz]
elif self.view == VIEW_ISO3:
coords = [((-p[0]*S60 - p[1]*S60)*self.zoom,
(-p[0]*C60 + p[1]*C60 - p[2])*self.zoom)
for p in xyz]
# Check limits
for i,(x,y) in enumerate(coords):
if abs(x)>MAXDIST or abs(y)>MAXDIST:
if x<-MAXDIST: x = -MAXDIST
elif x> MAXDIST: x = MAXDIST
if y<-MAXDIST: y = -MAXDIST
elif y> MAXDIST: y = MAXDIST
coords[i] = (x,y)
return coords
#----------------------------------------------------------------------
# Canvas to real coordinates
#----------------------------------------------------------------------
def canvas2xyz(self, i, j):
coords = None
if self.view == VIEW_XY:
x = i / self.zoom
y = -j / self.zoom
z = 0
elif self.view == VIEW_XZ:
x = i / self.zoom
y = 0
z = -j / self.zoom
elif self.view == VIEW_YZ:
x = 0
y = i / self.zoom
z = -j / self.zoom
elif self.view == VIEW_ISO1:
x = (i/S60 + j/C60) / self.zoom / 2
y = (i/S60 - j/C60) / self.zoom / 2
z = 0
elif self.view == VIEW_ISO2:
x = (i/S60 - j/C60) / self.zoom / 2
y = -(i/S60 + j/C60) / self.zoom / 2
z = 0
elif self.view == VIEW_ISO3:
x = -(i/S60 + j/C60) / self.zoom / 2
y = -(i/S60 - j/C60) / self.zoom / 2
z = 0
return x,y,z
#==============================================================================
# Canvas Frame with toolbar
#==============================================================================
class CanvasFrame(Frame):
def __init__(self, master, app, *kw, **kwargs):
Frame.__init__(self, master, *kw, **kwargs)
self.app = app
self.draw_axes = BooleanVar()
self.draw_grid = BooleanVar()
self.draw_margin = BooleanVar()
self.draw_probe = BooleanVar()
self.draw_paths = BooleanVar()
self.draw_rapid = BooleanVar()
self.draw_workarea=BooleanVar()
self.draw_camera = BooleanVar()
self.view = StringVar()
self.loadConfig()
self.view.trace('w', self.viewChange)
toolbar = Frame(self, relief=RAISED)
toolbar.grid(row=0, column=0, columnspan=2, sticky=EW)
self.canvas = CNCCanvas(self, app, takefocus=True, background="White")
self.canvas.grid(row=1, column=0, sticky=NSEW)
sb = Scrollbar(self, orient=VERTICAL, command=self.canvas.yview)
sb.grid(row=1, column=1, sticky=NS)
self.canvas.config(yscrollcommand=sb.set)
sb = Scrollbar(self, orient=HORIZONTAL, command=self.canvas.xview)
sb.grid(row=2, column=0, sticky=EW)
self.canvas.config(xscrollcommand=sb.set)
self.createCanvasToolbar(toolbar)
self.grid_rowconfigure(1, weight=1)
self.grid_columnconfigure(0, weight=1)
#----------------------------------------------------------------------
def addWidget(self, widget):
self.app.widgets.append(widget)
#----------------------------------------------------------------------
def loadConfig(self):
global INSERT_COLOR, GANTRY_COLOR, MARGIN_COLOR, GRID_COLOR
global BOX_SELECT, ENABLE_COLOR, DISABLE_COLOR, SELECT_COLOR
global SELECT2_COLOR, PROCESS_COLOR, MOVE_COLOR, RULER_COLOR
global CAMERA_COLOR, PROBE_TEXT_COLOR, CANVAS_COLOR
global DRAW_TIME
self.draw_axes.set( bool(int(Utils.getBool("Canvas", "axes", True))))
self.draw_grid.set( bool(int(Utils.getBool("Canvas", "grid", True))))
self.draw_margin.set( bool(int(Utils.getBool("Canvas", "margin", True))))
#self.draw_probe.set( bool(int(Utils.getBool("Canvas", "probe", False))))
self.draw_paths.set( bool(int(Utils.getBool("Canvas", "paths", True))))
self.draw_rapid.set( bool(int(Utils.getBool("Canvas", "rapid", True))))
self.draw_workarea.set(bool(int(Utils.getBool("Canvas", "workarea",True))))
#self.draw_camera.set( bool(int(Utils.getBool("Canvas", "camera", False))))
self.view.set(Utils.getStr("Canvas", "view", VIEWS[0]))
DRAW_TIME = Utils.getInt("Canvas", "drawtime", DRAW_TIME)
INSERT_COLOR = Utils.getStr("Color", "canvas.insert", INSERT_COLOR)
GANTRY_COLOR = Utils.getStr("Color", "canvas.gantry", GANTRY_COLOR)
MARGIN_COLOR = Utils.getStr("Color", "canvas.margin", MARGIN_COLOR)
GRID_COLOR = Utils.getStr("Color", "canvas.grid", GRID_COLOR)
BOX_SELECT = Utils.getStr("Color", "canvas.selectbox",BOX_SELECT)
ENABLE_COLOR = Utils.getStr("Color", "canvas.enable", ENABLE_COLOR)
DISABLE_COLOR = Utils.getStr("Color", "canvas.disable",DISABLE_COLOR)
SELECT_COLOR = Utils.getStr("Color", "canvas.select", SELECT_COLOR)
SELECT2_COLOR = Utils.getStr("Color", "canvas.select2",SELECT2_COLOR)
PROCESS_COLOR = Utils.getStr("Color", "canvas.process",PROCESS_COLOR)
MOVE_COLOR = Utils.getStr("Color", "canvas.move", MOVE_COLOR)
RULER_COLOR = Utils.getStr("Color", "canvas.ruler", RULER_COLOR)
CAMERA_COLOR = Utils.getStr("Color", "canvas.camera", CAMERA_COLOR)
PROBE_TEXT_COLOR = Utils.getStr("Color", "canvas.probetext", PROBE_TEXT_COLOR)
CANVAS_COLOR = Utils.getStr("Color", "canvas.background", CANVAS_COLOR)
#----------------------------------------------------------------------
def saveConfig(self):
Utils.setInt( "Canvas", "drawtime",DRAW_TIME)
Utils.setStr( "Canvas", "view", self.view.get())
Utils.setBool("Canvas", "axes", self.draw_axes.get())
Utils.setBool("Canvas", "grid", self.draw_grid.get())
Utils.setBool("Canvas", "margin", self.draw_margin.get())
Utils.setBool("Canvas", "probe", self.draw_probe.get())
Utils.setBool("Canvas", "paths", self.draw_paths.get())
Utils.setBool("Canvas", "rapid", self.draw_rapid.get())
Utils.setBool("Canvas", "workarea",self.draw_workarea.get())
#Utils.setBool("Canvas", "camera", self.draw_camera.get())
#----------------------------------------------------------------------
# Canvas toolbar FIXME XXX should be moved to CNCCanvas
#----------------------------------------------------------------------
def createCanvasToolbar(self, toolbar):
b = OptionMenu(toolbar, self.view, *VIEWS)
b.config(padx=0, pady=1)
b.unbind("F10")
b.pack(side=LEFT)
tkExtra.Balloon.set(b, _("Change viewing angle"))
b = Button(toolbar, image=Utils.icons["zoom_in"],
command=self.canvas.menuZoomIn)
tkExtra.Balloon.set(b, _("Zoom In [Ctrl-=]"))
b.pack(side=LEFT)
b = Button(toolbar, image=Utils.icons["zoom_out"],
command=self.canvas.menuZoomOut)
tkExtra.Balloon.set(b, _("Zoom Out [Ctrl--]"))
b.pack(side=LEFT)
b = Button(toolbar, image=Utils.icons["zoom_on"],
command=self.canvas.fit2Screen)
tkExtra.Balloon.set(b, _("Fit to screen [F]"))
b.pack(side=LEFT)
Label(toolbar, text=_("Tool:"),
image=Utils.icons["sep"],
compound=LEFT).pack(side=LEFT, padx=2)
# -----
# Tools
# -----
b = Radiobutton(toolbar, image=Utils.icons["select"],
indicatoron=FALSE,
variable=self.canvas.actionVar,
value=ACTION_SELECT,
command=self.canvas.setActionSelect)
tkExtra.Balloon.set(b, _("Select tool [S]"))
self.addWidget(b)
b.pack(side=LEFT)
b = Radiobutton(toolbar, image=Utils.icons["pan"],
indicatoron=FALSE,
variable=self.canvas.actionVar,
value=ACTION_PAN,
command=self.canvas.setActionPan)
tkExtra.Balloon.set(b, _("Pan viewport [X]"))
b.pack(side=LEFT)
# b = Radiobutton(toolbar, image=Utils.icons["gantry"],
# indicatoron=FALSE,
# variable=self.canvas.actionVar,
# value=ACTION_GANTRY,
# command=self.canvas.setActionGantry)
# tkExtra.Balloon.set(b, _("Move gantry [g]"))
# self.addWidget(b)
# b.pack(side=LEFT)
#
# b = Radiobutton(toolbar, image=Utils.icons["origin"],
# indicatoron=FALSE,
# variable=self.canvas.actionVar,
# value=ACTION_WPOS,
# command=self.canvas.setActionWPOS)
# tkExtra.Balloon.set(b, _("Set WPOS to mouse location"))
# self.addWidget(b)
# b.pack(side=LEFT)
b = Radiobutton(toolbar, image=Utils.icons["ruler"],
indicatoron=FALSE,
variable=self.canvas.actionVar,
value=ACTION_RULER,
command=self.canvas.setActionRuler)
tkExtra.Balloon.set(b, _("Ruler [R]"))
b.pack(side=LEFT)
# -----------
# Draw flags
# -----------
Label(toolbar, text=_("Draw:"),
image=Utils.icons["sep"],
compound=LEFT).pack(side=LEFT, padx=2)
b = Checkbutton(toolbar,
image=Utils.icons["axes"],
indicatoron=False,
variable=self.draw_axes,
command=self.drawAxes)
tkExtra.Balloon.set(b, _("Toggle display of axes"))
b.pack(side=LEFT)
b = Checkbutton(toolbar,
image=Utils.icons["grid"],
indicatoron=False,
variable=self.draw_grid,
command=self.drawGrid)
tkExtra.Balloon.set(b, _("Toggle display of grid lines"))
b.pack(side=LEFT)
b = Checkbutton(toolbar,
image=Utils.icons["margins"],
indicatoron=False,
variable=self.draw_margin,
command=self.drawMargin)
tkExtra.Balloon.set(b, _("Toggle display of margins"))
b.pack(side=LEFT)
b = Checkbutton(toolbar,
text="P",
image=Utils.icons["measure"],
indicatoron=False,
variable=self.draw_probe,
command=self.drawProbe)
tkExtra.Balloon.set(b, _("Toggle display of probe"))
b.pack(side=LEFT)
b = Checkbutton(toolbar,
image=Utils.icons["endmill"],
indicatoron=False,
variable=self.draw_paths,
command=self.toggleDrawFlag)
tkExtra.Balloon.set(b, _("Toggle display of paths (G1,G2,G3)"))
b.pack(side=LEFT)
b = Checkbutton(toolbar,
image=Utils.icons["rapid"],
indicatoron=False,
variable=self.draw_rapid,
command=self.toggleDrawFlag)
tkExtra.Balloon.set(b, _("Toggle display of rapid motion (G0)"))
b.pack(side=LEFT)
b = Checkbutton(toolbar,
image=Utils.icons["workspace"],
indicatoron=False,
variable=self.draw_workarea,
command=self.drawWorkarea)
tkExtra.Balloon.set(b, _("Toggle display of workarea"))
b.pack(side=LEFT)
b = Checkbutton(toolbar,
image=Utils.icons["camera"],
indicatoron=False,
variable=self.draw_camera,
command=self.drawCamera)
tkExtra.Balloon.set(b, _("Toggle display of camera"))
b.pack(side=LEFT)
if Camera.cv is None: b.config(state=DISABLED)
b = Button(toolbar,
image=Utils.icons["refresh"],
command=self.viewChange)
tkExtra.Balloon.set(b, _("Redraw display [Ctrl-R]"))
b.pack(side=LEFT)
# -----------
self.drawTime = tkExtra.Combobox(toolbar,
width=3,
background="White",
command=self.drawTimeChange)
tkExtra.Balloon.set(self.drawTime, _("Draw timeout in seconds"))
self.drawTime.fill(["inf", "1", "2", "3", "5", "10", "20", "30", "60", "120"])
self.drawTime.set(DRAW_TIME)
self.drawTime.pack(side=RIGHT)
Label(toolbar, text=_("Timeout:")).pack(side=RIGHT)
#----------------------------------------------------------------------
def redraw(self, event=None):
self.canvas.reset()
self.event_generate("<<ViewChange>>")
#----------------------------------------------------------------------
def viewChange(self, a=None, b=None, c=None):
self.event_generate("<<ViewChange>>")
# ----------------------------------------------------------------------
def viewXY(self, event=None):
self.view.set(VIEWS[VIEW_XY])
# ----------------------------------------------------------------------
def viewXZ(self, event=None):
self.view.set(VIEWS[VIEW_XZ])
# ----------------------------------------------------------------------
def viewYZ(self, event=None):
self.view.set(VIEWS[VIEW_YZ])
# ----------------------------------------------------------------------
def viewISO1(self, event=None):
self.view.set(VIEWS[VIEW_ISO1])
# ----------------------------------------------------------------------
def viewISO2(self, event=None):
self.view.set(VIEWS[VIEW_ISO2])
# ----------------------------------------------------------------------
def viewISO3(self, event=None):
self.view.set(VIEWS[VIEW_ISO3])
#----------------------------------------------------------------------
def toggleDrawFlag(self):
self.canvas.draw_axes = self.draw_axes.get()
self.canvas.draw_grid = self.draw_grid.get()
self.canvas.draw_margin = self.draw_margin.get()
self.canvas.draw_probe = self.draw_probe.get()
self.canvas.draw_paths = self.draw_paths.get()
self.canvas.draw_rapid = self.draw_rapid.get()
self.canvas.draw_workarea = self.draw_workarea.get()
self.event_generate("<<ViewChange>>")
#----------------------------------------------------------------------
def drawAxes(self, value=None):
if value is not None: self.draw_axes.set(value)
self.canvas.draw_axes = self.draw_axes.get()
self.canvas.drawAxes()
#----------------------------------------------------------------------
def drawGrid(self, value=None):
if value is not None: self.draw_grid.set(value)
self.canvas.draw_grid = self.draw_grid.get()
self.canvas.drawGrid()
#----------------------------------------------------------------------
def drawMargin(self, value=None):
if value is not None: self.draw_margin.set(value)
self.canvas.draw_margin = self.draw_margin.get()
self.canvas.drawMargin()
#----------------------------------------------------------------------
def drawProbe(self, value=None):
if value is not None: self.draw_probe.set(value)
self.canvas.draw_probe = self.draw_probe.get()
self.canvas.drawProbe()
#----------------------------------------------------------------------
def drawWorkarea(self, value=None):
if value is not None: self.draw_workarea.set(value)
self.canvas.draw_workarea = self.draw_workarea.get()
self.canvas.drawWorkarea()
#----------------------------------------------------------------------
def drawCamera(self, value=None):
if value is not None: self.draw_camera.set(value)
if self.draw_camera.get():
self.canvas.cameraOn()
else:
self.canvas.cameraOff()
#----------------------------------------------------------------------
def drawTimeChange(self):
global DRAW_TIME
try:
DRAW_TIME = int(self.drawTime.get())
except ValueError:
DRAW_TIME = 5*60
self.viewChange()
| [
"math.floor",
"PIL.Image.new",
"math.sqrt",
"Utils.setInt",
"math.cos",
"CNC.CNC.isMarginValid",
"math.log10",
"Utils.getStr",
"Camera.Camera",
"CNC.CNC.isAllMarginValid",
"CNC.CNC.compileLine",
"PIL.ImageTk.PhotoImage",
"tkExtra.Combobox",
"bmath.frange",
"numpy.floor",
"math.radians"... | [((2049, 2065), 'math.radians', 'math.radians', (['(60)'], {}), '(60)\n', (2061, 2065), False, 'import math\n'), ((2082, 2098), 'math.radians', 'math.radians', (['(60)'], {}), '(60)\n', (2094, 2098), False, 'import math\n'), ((6467, 6492), 'Camera.Camera', 'Camera.Camera', (['"""aligncam"""'], {}), "('aligncam')\n", (6480, 6492), False, 'import Camera\n'), ((23162, 23174), 'bmath.Vector', 'Vector', (['x', 'y'], {}), '(x, y)\n', (23168, 23174), False, 'from bmath import Vector\n'), ((43980, 43999), 'CNC.CNC.isMarginValid', 'CNC.isMarginValid', ([], {}), '()\n', (43997, 43999), False, 'from CNC import Tab, CNC\n'), ((54287, 54312), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['image'], {}), '(image)\n', (54305, 54312), False, 'from PIL import Image, ImageTk\n'), ((62512, 62557), 'Utils.getInt', 'Utils.getInt', (['"""Canvas"""', '"""drawtime"""', 'DRAW_TIME'], {}), "('Canvas', 'drawtime', DRAW_TIME)\n", (62524, 62557), False, 'import Utils\n'), ((62581, 62633), 'Utils.getStr', 'Utils.getStr', (['"""Color"""', '"""canvas.insert"""', 'INSERT_COLOR'], {}), "('Color', 'canvas.insert', INSERT_COLOR)\n", (62593, 62633), False, 'import Utils\n'), ((62652, 62704), 'Utils.getStr', 'Utils.getStr', (['"""Color"""', '"""canvas.gantry"""', 'GANTRY_COLOR'], {}), "('Color', 'canvas.gantry', GANTRY_COLOR)\n", (62664, 62704), False, 'import Utils\n'), ((62723, 62775), 'Utils.getStr', 'Utils.getStr', (['"""Color"""', '"""canvas.margin"""', 'MARGIN_COLOR'], {}), "('Color', 'canvas.margin', MARGIN_COLOR)\n", (62735, 62775), False, 'import Utils\n'), ((62794, 62842), 'Utils.getStr', 'Utils.getStr', (['"""Color"""', '"""canvas.grid"""', 'GRID_COLOR'], {}), "('Color', 'canvas.grid', GRID_COLOR)\n", (62806, 62842), False, 'import Utils\n'), ((62863, 62916), 'Utils.getStr', 'Utils.getStr', (['"""Color"""', '"""canvas.selectbox"""', 'BOX_SELECT'], {}), "('Color', 'canvas.selectbox', BOX_SELECT)\n", (62875, 62916), False, 'import Utils\n'), ((62934, 62986), 'Utils.getStr', 'Utils.getStr', (['"""Color"""', '"""canvas.enable"""', 'ENABLE_COLOR'], {}), "('Color', 'canvas.enable', ENABLE_COLOR)\n", (62946, 62986), False, 'import Utils\n'), ((63005, 63059), 'Utils.getStr', 'Utils.getStr', (['"""Color"""', '"""canvas.disable"""', 'DISABLE_COLOR'], {}), "('Color', 'canvas.disable', DISABLE_COLOR)\n", (63017, 63059), False, 'import Utils\n'), ((63077, 63129), 'Utils.getStr', 'Utils.getStr', (['"""Color"""', '"""canvas.select"""', 'SELECT_COLOR'], {}), "('Color', 'canvas.select', SELECT_COLOR)\n", (63089, 63129), False, 'import Utils\n'), ((63148, 63202), 'Utils.getStr', 'Utils.getStr', (['"""Color"""', '"""canvas.select2"""', 'SELECT2_COLOR'], {}), "('Color', 'canvas.select2', SELECT2_COLOR)\n", (63160, 63202), False, 'import Utils\n'), ((63220, 63274), 'Utils.getStr', 'Utils.getStr', (['"""Color"""', '"""canvas.process"""', 'PROCESS_COLOR'], {}), "('Color', 'canvas.process', PROCESS_COLOR)\n", (63232, 63274), False, 'import Utils\n'), ((63292, 63340), 'Utils.getStr', 'Utils.getStr', (['"""Color"""', '"""canvas.move"""', 'MOVE_COLOR'], {}), "('Color', 'canvas.move', MOVE_COLOR)\n", (63304, 63340), False, 'import Utils\n'), ((63361, 63411), 'Utils.getStr', 'Utils.getStr', (['"""Color"""', '"""canvas.ruler"""', 'RULER_COLOR'], {}), "('Color', 'canvas.ruler', RULER_COLOR)\n", (63373, 63411), False, 'import Utils\n'), ((63431, 63483), 'Utils.getStr', 'Utils.getStr', (['"""Color"""', '"""canvas.camera"""', 'CAMERA_COLOR'], {}), "('Color', 'canvas.camera', CAMERA_COLOR)\n", (63443, 63483), False, 'import Utils\n'), ((63505, 63564), 'Utils.getStr', 'Utils.getStr', (['"""Color"""', '"""canvas.probetext"""', 'PROBE_TEXT_COLOR'], {}), "('Color', 'canvas.probetext', PROBE_TEXT_COLOR)\n", (63517, 63564), False, 'import Utils\n'), ((63584, 63640), 'Utils.getStr', 'Utils.getStr', (['"""Color"""', '"""canvas.background"""', 'CANVAS_COLOR'], {}), "('Color', 'canvas.background', CANVAS_COLOR)\n", (63596, 63640), False, 'import Utils\n'), ((63740, 63785), 'Utils.setInt', 'Utils.setInt', (['"""Canvas"""', '"""drawtime"""', 'DRAW_TIME'], {}), "('Canvas', 'drawtime', DRAW_TIME)\n", (63752, 63785), False, 'import Utils\n'), ((68875, 68963), 'tkExtra.Combobox', 'tkExtra.Combobox', (['toolbar'], {'width': '(3)', 'background': '"""White"""', 'command': 'self.drawTimeChange'}), "(toolbar, width=3, background='White', command=self.\n drawTimeChange)\n", (68891, 68963), False, 'import tkExtra\n'), ((44392, 44414), 'CNC.CNC.isAllMarginValid', 'CNC.isAllMarginValid', ([], {}), '()\n', (44412, 44414), False, 'from CNC import Tab, CNC\n'), ((51542, 51589), 'bmath.frange', 'bmath.frange', (['lw', '(hg + 1e-10)', '((hg - lw) / 255.0)'], {}), '(lw, hg + 1e-10, (hg - lw) / 255.0)\n', (51554, 51589), False, 'import bmath\n'), ((52030, 52073), 'numpy.floor', 'numpy.floor', (['((array - lw) / (hg - lw) * 255)'], {}), '((array - lw) / (hg - lw) * 255)\n', (52041, 52073), False, 'import numpy\n'), ((54024, 54066), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', 'affine.size', '((255,) * 4)'], {}), "('RGBA', affine.size, (255,) * 4)\n", (54033, 54066), False, 'from PIL import Image, ImageTk\n'), ((54142, 54180), 'PIL.Image.composite', 'Image.composite', (['affine', 'white', 'affine'], {}), '(affine, white, affine)\n', (54157, 54180), False, 'from PIL import Image, ImageTk\n'), ((54672, 54683), 'time.time', 'time.time', ([], {}), '()\n', (54681, 54683), False, 'import time\n'), ((62451, 62491), 'Utils.getStr', 'Utils.getStr', (['"""Canvas"""', '"""view"""', 'VIEWS[0]'], {}), "('Canvas', 'view', VIEWS[0])\n", (62463, 62491), False, 'import Utils\n'), ((43151, 43164), 'math.log10', 'math.log10', (['d'], {}), '(d)\n', (43161, 43164), False, 'import math\n'), ((51596, 51614), 'math.floor', 'math.floor', (['(x / dc)'], {}), '(x / dc)\n', (51606, 51614), False, 'import math\n'), ((61840, 61877), 'Utils.getBool', 'Utils.getBool', (['"""Canvas"""', '"""axes"""', '(True)'], {}), "('Canvas', 'axes', True)\n", (61853, 61877), False, 'import Utils\n'), ((61918, 61955), 'Utils.getBool', 'Utils.getBool', (['"""Canvas"""', '"""grid"""', '(True)'], {}), "('Canvas', 'grid', True)\n", (61931, 61955), False, 'import Utils\n'), ((61996, 62035), 'Utils.getBool', 'Utils.getBool', (['"""Canvas"""', '"""margin"""', '(True)'], {}), "('Canvas', 'margin', True)\n", (62009, 62035), False, 'import Utils\n'), ((62154, 62192), 'Utils.getBool', 'Utils.getBool', (['"""Canvas"""', '"""paths"""', '(True)'], {}), "('Canvas', 'paths', True)\n", (62167, 62192), False, 'import Utils\n'), ((62232, 62270), 'Utils.getBool', 'Utils.getBool', (['"""Canvas"""', '"""rapid"""', '(True)'], {}), "('Canvas', 'rapid', True)\n", (62245, 62270), False, 'import Utils\n'), ((62310, 62351), 'Utils.getBool', 'Utils.getBool', (['"""Canvas"""', '"""workarea"""', '(True)'], {}), "('Canvas', 'workarea', True)\n", (62323, 62351), False, 'import Utils\n'), ((55953, 55964), 'time.time', 'time.time', ([], {}), '()\n', (55962, 55964), False, 'import time\n'), ((56022, 56043), 'CNC.CNC.compileLine', 'CNC.compileLine', (['line'], {}), '(line)\n', (56037, 56043), False, 'from CNC import Tab, CNC\n'), ((56120, 56138), 'CNC.CNC.breakLine', 'CNC.breakLine', (['cmd'], {}), '(cmd)\n', (56133, 56138), False, 'from CNC import Tab, CNC\n'), ((19698, 19736), 'math.sqrt', 'math.sqrt', (['(dx ** 2 + dy ** 2 + dz ** 2)'], {}), '(dx ** 2 + dy ** 2 + dz ** 2)\n', (19707, 19736), False, 'import math\n'), ((35456, 35467), 'math.sin', 'math.sin', (['f'], {}), '(f)\n', (35464, 35467), False, 'import math\n'), ((35474, 35485), 'math.cos', 'math.cos', (['f'], {}), '(f)\n', (35482, 35485), False, 'import math\n'), ((55750, 55761), 'time.time', 'time.time', ([], {}), '()\n', (55759, 55761), False, 'import time\n'), ((55888, 55899), 'time.time', 'time.time', ([], {}), '()\n', (55897, 55899), False, 'import time\n'), ((19746, 19764), 'math.atan2', 'math.atan2', (['dy', 'dx'], {}), '(dy, dx)\n', (19756, 19764), False, 'import math\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 4 17:44:53 2021
@author: mlampert
"""
import os
import copy
#FLAP imports and settings
import flap
import flap_nstx
import flap_mdsplus
flap_nstx.register('NSTX_GPI')
flap_nstx.register('NSTX_THOMSON')
flap_mdsplus.register('NSTX_MDSPlus')
thisdir = os.path.dirname(os.path.realpath(__file__))
fn = os.path.join(thisdir,"../flap_nstx.cfg")
flap.config.read(file_name=fn)
#Scientific imports
import numpy as np
def get_nstx_thomson_gradient(exp_id=None,
pressure=False,
temperature=False,
density=False,
r_pos=None,
spline_data=False,
output_name=None,
device_coordinates=True,
flux_coordinates=False):
#Data is RADIUS x TIME
if pressure+density+temperature != 1:
raise ValueError('Only one of the inputs should be set (pressure, temperature, density)!')
if device_coordinates+flux_coordinates !=1:
raise ValueError('Either device_coordinates or flux_coordinates can be set, not both.')
# thomson=flap_nstx_thomson_data(exp_id,
# pressure=pressure,
# temperature=temperature,
# density=density,
# output_name='THOMSON_FOR_GRADIENT')
thomson=flap.get_data('NSTX_THOMSON',
exp_id=exp_id,
object_name='THOMSON_FOR_GRADIENT',
options={'pressure':pressure,
'temperature':temperature,
'density':density,
'force_mdsplus':True})
# thomson_spline=flap_nstx_thomson_data(exp_id,
# pressure=pressure,
# temperature=temperature,
# density=density,
# spline_data=True,
# output_name=None)
thomson_spline=flap.get_data('NSTX_THOMSON',
exp_id=exp_id,
object_name=None,
options={'pressure':pressure,
'temperature':temperature,
'density':density,
'spline_data':True,
'force_mdsplus':True})
if device_coordinates:
radial_coordinate=thomson.coordinate('Device R')[0][:,0]
spline_radial_coordinate=thomson_spline.coordinate('Device R')[0][:,0]
if flux_coordinates:
radial_coordinate=thomson.coordinate('Flux r')[0][:,0]
spline_radial_coordinate=thomson_spline.coordinate('Flux r')[0][:,0]
time_vector=thomson.coordinate('Time')[0][0,:]
data=thomson.data
error=thomson.error
interp_data=thomson_spline.data
#Calculation of the numerical gradient and interpolating the values for the given r_pos
data_gradient=np.asarray([(data[2:,i]-data[:-2,i])/(2*(radial_coordinate[2:]-radial_coordinate[:-2])) for i in range(len(time_vector))]).T
data_gradient_error=np.asarray([(np.abs(error[2:,i])+np.abs(error[:-2,i]))/(2*(radial_coordinate[2:]-radial_coordinate[:-2])) for i in range(len(time_vector))]).T
interp_data_gradient=np.asarray([(interp_data[2:,i]-interp_data[:-2,i])/(2*(spline_radial_coordinate[2:]-spline_radial_coordinate[:-2])) for i in range(len(time_vector))]).T
#Interpolation for the r_pos
if r_pos is not None:
r_pos_gradient=np.asarray([np.interp(r_pos, radial_coordinate[1:-1], data_gradient[:,i]) for i in range(len(time_vector))])
r_pos_gradient_spline=np.asarray([np.interp(r_pos, spline_radial_coordinate[1:-1], interp_data_gradient[:,i]) for i in range(len(time_vector))])
ind_r=np.argmin(np.abs(radial_coordinate[1:-1]-r_pos))
if radial_coordinate[ind_r] < r_pos:
R1=radial_coordinate[1:-1][ind_r]
R2=radial_coordinate[1:-1][ind_r+1]
ind_R1=ind_r
ind_R2=ind_r+1
else:
R1=radial_coordinate[1:-1][ind_r-1]
R2=radial_coordinate[1:-1][ind_r]
ind_R1=ind_r-1
ind_R2=ind_r
#Result of error propagation (basically average biased error between the two neighboring radii)
r_pos_gradient_error=np.abs((r_pos-R1)/(R2-R1))*data_gradient_error[ind_R2,:]+\
np.abs((r_pos-R2)/(R2-R1))*data_gradient_error[ind_R1,:]
coord = []
coord.append(copy.deepcopy(flap.Coordinate(name='Time',
unit='s',
mode=flap.CoordinateMode(equidistant=True),
start=time_vector[0],
step=time_vector[1]-time_vector[0],
#shape=time_arr.shape,
dimension_list=[0]
)))
coord.append(copy.deepcopy(flap.Coordinate(name='Sample',
unit='n.a.',
mode=flap.CoordinateMode(equidistant=True),
start=0,
step=1,
dimension_list=[0]
)))
if device_coordinates:
grad_unit='/m'
if flux_coordinates:
grad_unit='/psi'
if pressure:
data_unit = flap.Unit(name='Pressure gradient',unit='kPa'+grad_unit)
elif temperature:
data_unit = flap.Unit(name='Temperature gradient',unit='keV'+grad_unit)
elif density:
data_unit = flap.Unit(name='Density gradient',unit='m-3'+grad_unit)
if not spline_data:
d = flap.DataObject(exp_id=exp_id,
data_array=r_pos_gradient,
error=r_pos_gradient_error,
data_unit=data_unit,
coordinates=coord,
data_title='NSTX Thomson gradient')
else:
d = flap.DataObject(exp_id=exp_id,
data_array=r_pos_gradient_spline,
data_unit=data_unit,
coordinates=coord,
data_title='NSTX Thomson gradient spline')
else:
coord = []
coord.append(copy.deepcopy(flap.Coordinate(name='Time',
unit='s',
mode=flap.CoordinateMode(equidistant=True),
start=time_vector[0],
step=time_vector[1]-time_vector[0],
#shape=time_arr.shape,
dimension_list=[1]
)))
coord.append(copy.deepcopy(flap.Coordinate(name='Sample',
unit='n.a.',
mode=flap.CoordinateMode(equidistant=True),
start=0,
step=1,
dimension_list=[1]
)))
if pressure:
data_unit = flap.Unit(name='Pressure gradient',unit='kPa/m')
elif temperature:
data_unit = flap.Unit(name='Temperature gradient',unit='keV/m')
elif density:
data_unit = flap.Unit(name='Density gradient',unit='m-3/m')
if device_coordinates:
radial_coordinate_name='Device R'
radial_unit='m'
if flux_coordinates:
radial_coordinate_name='Flux r'
radial_unit=''
if not spline_data:
coord.append(copy.deepcopy(flap.Coordinate(name=radial_coordinate_name,
unit=radial_unit,
mode=flap.CoordinateMode(equidistant=False),
values=radial_coordinate[1:-1],
shape=radial_coordinate[1:-1].shape,
dimension_list=[0]
)))
d = flap.DataObject(exp_id=exp_id,
data_array=data_gradient,
error=data_gradient_error,
data_unit=data_unit,
coordinates=coord,
data_title='NSTX Thomson gradient')
else:
coord.append(copy.deepcopy(flap.Coordinate(name=radial_coordinate_name,
unit=radial_unit,
mode=flap.CoordinateMode(equidistant=False),
values=spline_radial_coordinate[1:-1],
shape=spline_radial_coordinate[1:-1].shape,
dimension_list=[0]
)))
d = flap.DataObject(exp_id=exp_id,
data_array=interp_data_gradient,
data_unit=data_unit,
coordinates=coord,
data_title='NSTX Thomson gradient spline')
if output_name is not None:
flap.add_data_object(d,output_name)
return d
| [
"flap_mdsplus.register",
"numpy.abs",
"flap.config.read",
"flap_nstx.register",
"os.path.join",
"flap.Unit",
"os.path.realpath",
"flap.DataObject",
"flap.add_data_object",
"flap.get_data",
"numpy.interp",
"flap.CoordinateMode"
] | [((211, 241), 'flap_nstx.register', 'flap_nstx.register', (['"""NSTX_GPI"""'], {}), "('NSTX_GPI')\n", (229, 241), False, 'import flap_nstx\n'), ((242, 276), 'flap_nstx.register', 'flap_nstx.register', (['"""NSTX_THOMSON"""'], {}), "('NSTX_THOMSON')\n", (260, 276), False, 'import flap_nstx\n'), ((278, 315), 'flap_mdsplus.register', 'flap_mdsplus.register', (['"""NSTX_MDSPlus"""'], {}), "('NSTX_MDSPlus')\n", (299, 315), False, 'import flap_mdsplus\n'), ((376, 417), 'os.path.join', 'os.path.join', (['thisdir', '"""../flap_nstx.cfg"""'], {}), "(thisdir, '../flap_nstx.cfg')\n", (388, 417), False, 'import os\n'), ((417, 447), 'flap.config.read', 'flap.config.read', ([], {'file_name': 'fn'}), '(file_name=fn)\n', (433, 447), False, 'import flap\n'), ((343, 369), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (359, 369), False, 'import os\n'), ((1550, 1742), 'flap.get_data', 'flap.get_data', (['"""NSTX_THOMSON"""'], {'exp_id': 'exp_id', 'object_name': '"""THOMSON_FOR_GRADIENT"""', 'options': "{'pressure': pressure, 'temperature': temperature, 'density': density,\n 'force_mdsplus': True}"}), "('NSTX_THOMSON', exp_id=exp_id, object_name=\n 'THOMSON_FOR_GRADIENT', options={'pressure': pressure, 'temperature':\n temperature, 'density': density, 'force_mdsplus': True})\n", (1563, 1742), False, 'import flap\n'), ((2309, 2504), 'flap.get_data', 'flap.get_data', (['"""NSTX_THOMSON"""'], {'exp_id': 'exp_id', 'object_name': 'None', 'options': "{'pressure': pressure, 'temperature': temperature, 'density': density,\n 'spline_data': True, 'force_mdsplus': True}"}), "('NSTX_THOMSON', exp_id=exp_id, object_name=None, options={\n 'pressure': pressure, 'temperature': temperature, 'density': density,\n 'spline_data': True, 'force_mdsplus': True})\n", (2322, 2504), False, 'import flap\n'), ((9873, 9909), 'flap.add_data_object', 'flap.add_data_object', (['d', 'output_name'], {}), '(d, output_name)\n', (9893, 9909), False, 'import flap\n'), ((4211, 4250), 'numpy.abs', 'np.abs', (['(radial_coordinate[1:-1] - r_pos)'], {}), '(radial_coordinate[1:-1] - r_pos)\n', (4217, 4250), True, 'import numpy as np\n'), ((5926, 5985), 'flap.Unit', 'flap.Unit', ([], {'name': '"""Pressure gradient"""', 'unit': "('kPa' + grad_unit)"}), "(name='Pressure gradient', unit='kPa' + grad_unit)\n", (5935, 5985), False, 'import flap\n'), ((6252, 6422), 'flap.DataObject', 'flap.DataObject', ([], {'exp_id': 'exp_id', 'data_array': 'r_pos_gradient', 'error': 'r_pos_gradient_error', 'data_unit': 'data_unit', 'coordinates': 'coord', 'data_title': '"""NSTX Thomson gradient"""'}), "(exp_id=exp_id, data_array=r_pos_gradient, error=\n r_pos_gradient_error, data_unit=data_unit, coordinates=coord,\n data_title='NSTX Thomson gradient')\n", (6267, 6422), False, 'import flap\n'), ((6605, 6757), 'flap.DataObject', 'flap.DataObject', ([], {'exp_id': 'exp_id', 'data_array': 'r_pos_gradient_spline', 'data_unit': 'data_unit', 'coordinates': 'coord', 'data_title': '"""NSTX Thomson gradient spline"""'}), "(exp_id=exp_id, data_array=r_pos_gradient_spline, data_unit=\n data_unit, coordinates=coord, data_title='NSTX Thomson gradient spline')\n", (6620, 6757), False, 'import flap\n'), ((7810, 7859), 'flap.Unit', 'flap.Unit', ([], {'name': '"""Pressure gradient"""', 'unit': '"""kPa/m"""'}), "(name='Pressure gradient', unit='kPa/m')\n", (7819, 7859), False, 'import flap\n'), ((8697, 8866), 'flap.DataObject', 'flap.DataObject', ([], {'exp_id': 'exp_id', 'data_array': 'data_gradient', 'error': 'data_gradient_error', 'data_unit': 'data_unit', 'coordinates': 'coord', 'data_title': '"""NSTX Thomson gradient"""'}), "(exp_id=exp_id, data_array=data_gradient, error=\n data_gradient_error, data_unit=data_unit, coordinates=coord, data_title\n ='NSTX Thomson gradient')\n", (8712, 8866), False, 'import flap\n'), ((9548, 9699), 'flap.DataObject', 'flap.DataObject', ([], {'exp_id': 'exp_id', 'data_array': 'interp_data_gradient', 'data_unit': 'data_unit', 'coordinates': 'coord', 'data_title': '"""NSTX Thomson gradient spline"""'}), "(exp_id=exp_id, data_array=interp_data_gradient, data_unit=\n data_unit, coordinates=coord, data_title='NSTX Thomson gradient spline')\n", (9563, 9699), False, 'import flap\n'), ((3932, 3994), 'numpy.interp', 'np.interp', (['r_pos', 'radial_coordinate[1:-1]', 'data_gradient[:, i]'], {}), '(r_pos, radial_coordinate[1:-1], data_gradient[:, i])\n', (3941, 3994), True, 'import numpy as np\n'), ((4071, 4147), 'numpy.interp', 'np.interp', (['r_pos', 'spline_radial_coordinate[1:-1]', 'interp_data_gradient[:, i]'], {}), '(r_pos, spline_radial_coordinate[1:-1], interp_data_gradient[:, i])\n', (4080, 4147), True, 'import numpy as np\n'), ((4734, 4766), 'numpy.abs', 'np.abs', (['((r_pos - R1) / (R2 - R1))'], {}), '((r_pos - R1) / (R2 - R1))\n', (4740, 4766), True, 'import numpy as np\n'), ((4822, 4854), 'numpy.abs', 'np.abs', (['((r_pos - R2) / (R2 - R1))'], {}), '((r_pos - R2) / (R2 - R1))\n', (4828, 4854), True, 'import numpy as np\n'), ((6033, 6095), 'flap.Unit', 'flap.Unit', ([], {'name': '"""Temperature gradient"""', 'unit': "('keV' + grad_unit)"}), "(name='Temperature gradient', unit='keV' + grad_unit)\n", (6042, 6095), False, 'import flap\n'), ((7909, 7961), 'flap.Unit', 'flap.Unit', ([], {'name': '"""Temperature gradient"""', 'unit': '"""keV/m"""'}), "(name='Temperature gradient', unit='keV/m')\n", (7918, 7961), False, 'import flap\n'), ((6139, 6197), 'flap.Unit', 'flap.Unit', ([], {'name': '"""Density gradient"""', 'unit': "('m-3' + grad_unit)"}), "(name='Density gradient', unit='m-3' + grad_unit)\n", (6148, 6197), False, 'import flap\n'), ((8007, 8055), 'flap.Unit', 'flap.Unit', ([], {'name': '"""Density gradient"""', 'unit': '"""m-3/m"""'}), "(name='Density gradient', unit='m-3/m')\n", (8016, 8055), False, 'import flap\n'), ((3525, 3545), 'numpy.abs', 'np.abs', (['error[2:, i]'], {}), '(error[2:, i])\n', (3531, 3545), True, 'import numpy as np\n'), ((3545, 3566), 'numpy.abs', 'np.abs', (['error[:-2, i]'], {}), '(error[:-2, i])\n', (3551, 3566), True, 'import numpy as np\n'), ((5065, 5102), 'flap.CoordinateMode', 'flap.CoordinateMode', ([], {'equidistant': '(True)'}), '(equidistant=True)\n', (5084, 5102), False, 'import flap\n'), ((5546, 5583), 'flap.CoordinateMode', 'flap.CoordinateMode', ([], {'equidistant': '(True)'}), '(equidistant=True)\n', (5565, 5583), False, 'import flap\n'), ((7065, 7102), 'flap.CoordinateMode', 'flap.CoordinateMode', ([], {'equidistant': '(True)'}), '(equidistant=True)\n', (7084, 7102), False, 'import flap\n'), ((7546, 7583), 'flap.CoordinateMode', 'flap.CoordinateMode', ([], {'equidistant': '(True)'}), '(equidistant=True)\n', (7565, 7583), False, 'import flap\n'), ((8449, 8487), 'flap.CoordinateMode', 'flap.CoordinateMode', ([], {'equidistant': '(False)'}), '(equidistant=False)\n', (8468, 8487), False, 'import flap\n'), ((9217, 9255), 'flap.CoordinateMode', 'flap.CoordinateMode', ([], {'equidistant': '(False)'}), '(equidistant=False)\n', (9236, 9255), False, 'import flap\n')] |
import os
import mlflow
import numpy as np
import pandas as pd
import pytest
from matplotlib import pyplot as plt
from plotly import graph_objects as go
from mlflow_extend import logging as lg
from mlflow_extend.testing.utils import (
_get_default_args,
_read_data,
assert_file_exists_in_artifacts,
assert_not_conflict_with_fluent_apis,
)
def test_new_apis_do_not_conflict_native_apis() -> None:
assert_not_conflict_with_fluent_apis(lg.__all__)
@pytest.mark.parametrize("path", ["test.txt", "dir/test.txt", "dir/dir/test.txt"])
def test_artifact_context(path: str) -> None:
with mlflow.start_run() as run:
with lg._artifact_context(path) as tmp_path:
open(tmp_path, "w").close()
assert_file_exists_in_artifacts(run, path)
def test_log_params_flatten() -> None:
with mlflow.start_run() as run:
params = {"a": {"b": 0}}
lg.log_params_flatten(params)
lg.log_params_flatten(params, parent_key="d")
lg.log_params_flatten(params, sep="_")
loaded_run = mlflow.get_run(run.info.run_id)
assert loaded_run.data.params == {"a.b": "0", "a_b": "0", "d.a.b": "0"}
def test_log_metrics_flatten() -> None:
with mlflow.start_run() as run:
metrics = {"a": {"b": 0.0}}
lg.log_metrics_flatten(metrics)
lg.log_metrics_flatten(metrics, parent_key="d")
lg.log_metrics_flatten(metrics, sep="_")
loaded_run = mlflow.get_run(run.info.run_id)
assert loaded_run.data.metrics == {"a.b": 0.0, "a_b": 0.0, "d.a.b": 0.0}
def test_log_plt_figure() -> None:
fig, ax = plt.subplots()
ax.plot([0, 1], [0, 1])
with mlflow.start_run() as run:
path = "test.png"
lg.log_figure(fig, path)
assert_file_exists_in_artifacts(run, path)
def test_log_plotly_figure() -> None:
fig = go.Figure(data=[go.Bar(x=[1, 2, 3], y=[1, 3, 2])])
with mlflow.start_run() as run:
path = "test.html"
lg.log_figure(fig, path)
assert_file_exists_in_artifacts(run, path)
path = "test.png"
msg = '"{}" is not an HTML file.'.format(path)
with pytest.raises(ValueError, match=msg):
lg.log_figure(fig, path)
def test_log_figure() -> None:
fig, ax = plt.subplots()
ax.plot([0, 1], [0, 1])
with mlflow.start_run() as run:
path = "test.png"
lg.log_figure(fig, path)
assert_file_exists_in_artifacts(run, path)
fig = go.Figure(data=[go.Bar(x=[1, 2, 3], y=[1, 3, 2])])
with mlflow.start_run() as run:
path = "test.html"
lg.log_figure(fig, path)
assert_file_exists_in_artifacts(run, path)
path = "test.png"
msg = '"{}" is not an HTML file.'.format(path)
with pytest.raises(ValueError, match=msg):
lg.log_figure(fig, path)
def test_log_figure_invalid_fig_type() -> None:
with mlflow.start_run():
with pytest.raises(TypeError, match="Invalid figure type"):
lg.log_figure("figure", "test.png")
@pytest.mark.parametrize("path", ["test.json", "test.yaml", "test.yml"])
def test_log_dict(path: str) -> None:
data = {"a": 0}
with mlflow.start_run() as run:
lg.log_dict(data, path)
assert_file_exists_in_artifacts(run, path)
artifacts_dir = run.info.artifact_uri.replace("file://", "")
loaded_data = _read_data(os.path.join(artifacts_dir, path))
assert loaded_data == data
def test_log_pickle() -> None:
with mlflow.start_run() as run:
path = "test.pkl"
lg.log_pickle({"a": 0}, path)
assert_file_exists_in_artifacts(run, path)
@pytest.mark.parametrize("fmt", ["json", ".json", "yaml", ".yaml", "yml", ".yml"])
def test_log_dict_with_fmt(fmt: str) -> None:
data = {"a": 0}
path = "test.{}".format(fmt.lstrip("."))
with mlflow.start_run() as run:
lg.log_dict(data, path, fmt)
assert_file_exists_in_artifacts(run, path)
artifacts_dir = run.info.artifact_uri.replace("file://", "")
loaded_data = _read_data(os.path.join(artifacts_dir, path))
assert loaded_data == data
def test_log_dict_with_invalid_format() -> None:
data = {"a": 0}
fmt = "abc"
path = "test.{}".format(fmt)
with mlflow.start_run():
with pytest.raises(ValueError, match="Invalid file format: {}.".format(fmt)):
lg.log_dict(data, path, fmt)
@pytest.mark.parametrize("fmt", ["csv", "feather"])
def test_log_df(fmt: str) -> None:
df = pd.DataFrame({"a": [0]})
path = "test.{}".format(fmt)
with mlflow.start_run() as run:
lg.log_df(df, path, fmt)
assert_file_exists_in_artifacts(run, path)
artifacts_dir = run.info.artifact_uri.replace("file://", "")
readers = {"csv": pd.read_csv, "feather": pd.read_feather}
loaded_df = readers[fmt](os.path.join(artifacts_dir, path))
pd.testing.assert_frame_equal(loaded_df, df)
def test_log_df_with_invalid_format() -> None:
df = pd.DataFrame({"a": [0]})
fmt = "abc"
path = "test.{}".format(fmt)
with mlflow.start_run():
with pytest.raises(ValueError, match="Invalid file format: {}.".format(fmt)):
lg.log_df(df, path, fmt)
@pytest.mark.parametrize("text", ["test", ""])
def test_log_text(text: str) -> None:
path = "test.txt"
with mlflow.start_run() as run:
lg.log_text(text, path)
assert_file_exists_in_artifacts(run, path)
artifacts_dir = run.info.artifact_uri.replace("file://", "")
with open(os.path.join(artifacts_dir, path), "r") as f:
assert text == f.read()
def test_log_numpy() -> None:
array = np.array([0])
path = "test.npy"
with mlflow.start_run() as run:
lg.log_numpy(array, path)
assert_file_exists_in_artifacts(run, path)
artifacts_dir = run.info.artifact_uri.replace("file://", "")
loaded_array = np.load(os.path.join(artifacts_dir, path))
np.testing.assert_array_equal(loaded_array, array)
def test_log_confusion_matrix() -> None:
with mlflow.start_run() as run:
default_path = _get_default_args(lg.log_confusion_matrix)["path"]
lg.log_confusion_matrix([[1, 2], [3, 4]])
assert_file_exists_in_artifacts(run, default_path)
with mlflow.start_run() as run:
path = "cm.png"
lg.log_confusion_matrix([[1, 2], [3, 4]], path)
assert_file_exists_in_artifacts(run, path)
def test_log_feature_importance() -> None:
default_path = _get_default_args(lg.log_feature_importance)["path"]
with mlflow.start_run() as run:
lg.log_feature_importance(["a", "b", "c"], [1, 2, 3], "gain")
assert_file_exists_in_artifacts(run, default_path)
with mlflow.start_run() as run:
lg.log_feature_importance(["a", "b", "c"], [1, 2, 3], "gain", normalize=True)
assert_file_exists_in_artifacts(run, default_path)
with mlflow.start_run() as run:
path = "feat_imp.png"
lg.log_feature_importance(["a", "b", "c"], [1, 2, 3], "gain", path=path)
assert_file_exists_in_artifacts(run, path)
@pytest.mark.parametrize("limit", [2, 3, 4])
def test_log_feature_importance_with_limit(limit: int) -> None:
with mlflow.start_run() as run:
default_path = _get_default_args(lg.log_feature_importance)["path"]
lg.log_feature_importance(["a", "b", "c"], [1, 2, 3], "gain", limit=limit)
assert_file_exists_in_artifacts(run, default_path)
def test_log_roc_curve() -> None:
default_path = _get_default_args(lg.log_roc_curve)["path"]
with mlflow.start_run() as run:
lg.log_roc_curve([0, 1], [0, 1])
assert_file_exists_in_artifacts(run, default_path)
with mlflow.start_run() as run:
lg.log_roc_curve([0, 1], [0, 1], 0.5)
assert_file_exists_in_artifacts(run, default_path)
with mlflow.start_run() as run:
path = "roc.png"
lg.log_roc_curve([0, 1], [0, 1], path=path)
assert_file_exists_in_artifacts(run, path)
def test_log_pr_curve() -> None:
default_path = _get_default_args(lg.log_pr_curve)["path"]
with mlflow.start_run() as run:
lg.log_pr_curve([1, 0], [1, 0])
assert_file_exists_in_artifacts(run, default_path)
with mlflow.start_run() as run:
lg.log_pr_curve([1, 0], [1, 0], 0.5)
assert_file_exists_in_artifacts(run, default_path)
with mlflow.start_run() as run:
path = "pr.png"
lg.log_pr_curve([1, 0], [1, 0], path=path)
assert_file_exists_in_artifacts(run, path)
| [
"mlflow_extend.logging.log_metrics_flatten",
"numpy.array",
"pandas.testing.assert_frame_equal",
"mlflow_extend.testing.utils._get_default_args",
"mlflow_extend.logging.log_pr_curve",
"plotly.graph_objects.Bar",
"mlflow_extend.logging.log_feature_importance",
"mlflow_extend.logging.log_roc_curve",
"... | [((472, 557), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""path"""', "['test.txt', 'dir/test.txt', 'dir/dir/test.txt']"], {}), "('path', ['test.txt', 'dir/test.txt',\n 'dir/dir/test.txt'])\n", (495, 557), False, 'import pytest\n'), ((3012, 3083), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""path"""', "['test.json', 'test.yaml', 'test.yml']"], {}), "('path', ['test.json', 'test.yaml', 'test.yml'])\n", (3035, 3083), False, 'import pytest\n'), ((3609, 3694), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fmt"""', "['json', '.json', 'yaml', '.yaml', 'yml', '.yml']"], {}), "('fmt', ['json', '.json', 'yaml', '.yaml', 'yml',\n '.yml'])\n", (3632, 3694), False, 'import pytest\n'), ((4368, 4418), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fmt"""', "['csv', 'feather']"], {}), "('fmt', ['csv', 'feather'])\n", (4391, 4418), False, 'import pytest\n'), ((5172, 5217), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""text"""', "['test', '']"], {}), "('text', ['test', ''])\n", (5195, 5217), False, 'import pytest\n'), ((7037, 7080), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""limit"""', '[2, 3, 4]'], {}), "('limit', [2, 3, 4])\n", (7060, 7080), False, 'import pytest\n'), ((420, 468), 'mlflow_extend.testing.utils.assert_not_conflict_with_fluent_apis', 'assert_not_conflict_with_fluent_apis', (['lg.__all__'], {}), '(lg.__all__)\n', (456, 468), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((1047, 1078), 'mlflow.get_run', 'mlflow.get_run', (['run.info.run_id'], {}), '(run.info.run_id)\n', (1061, 1078), False, 'import mlflow\n'), ((1432, 1463), 'mlflow.get_run', 'mlflow.get_run', (['run.info.run_id'], {}), '(run.info.run_id)\n', (1446, 1463), False, 'import mlflow\n'), ((1592, 1606), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1604, 1606), True, 'from matplotlib import pyplot as plt\n'), ((2246, 2260), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2258, 2260), True, 'from matplotlib import pyplot as plt\n'), ((4463, 4487), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [0]}"], {}), "({'a': [0]})\n", (4475, 4487), True, 'import pandas as pd\n'), ((4839, 4883), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['loaded_df', 'df'], {}), '(loaded_df, df)\n', (4868, 4883), True, 'import pandas as pd\n'), ((4942, 4966), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [0]}"], {}), "({'a': [0]})\n", (4954, 4966), True, 'import pandas as pd\n'), ((5600, 5613), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (5608, 5613), True, 'import numpy as np\n'), ((5890, 5940), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['loaded_array', 'array'], {}), '(loaded_array, array)\n', (5919, 5940), True, 'import numpy as np\n'), ((609, 627), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (625, 627), False, 'import mlflow\n'), ((737, 779), 'mlflow_extend.testing.utils.assert_file_exists_in_artifacts', 'assert_file_exists_in_artifacts', (['run', 'path'], {}), '(run, path)\n', (768, 779), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((830, 848), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (846, 848), False, 'import mlflow\n'), ((898, 927), 'mlflow_extend.logging.log_params_flatten', 'lg.log_params_flatten', (['params'], {}), '(params)\n', (919, 927), True, 'from mlflow_extend import logging as lg\n'), ((936, 981), 'mlflow_extend.logging.log_params_flatten', 'lg.log_params_flatten', (['params'], {'parent_key': '"""d"""'}), "(params, parent_key='d')\n", (957, 981), True, 'from mlflow_extend import logging as lg\n'), ((990, 1028), 'mlflow_extend.logging.log_params_flatten', 'lg.log_params_flatten', (['params'], {'sep': '"""_"""'}), "(params, sep='_')\n", (1011, 1028), True, 'from mlflow_extend import logging as lg\n'), ((1206, 1224), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (1222, 1224), False, 'import mlflow\n'), ((1277, 1308), 'mlflow_extend.logging.log_metrics_flatten', 'lg.log_metrics_flatten', (['metrics'], {}), '(metrics)\n', (1299, 1308), True, 'from mlflow_extend import logging as lg\n'), ((1317, 1364), 'mlflow_extend.logging.log_metrics_flatten', 'lg.log_metrics_flatten', (['metrics'], {'parent_key': '"""d"""'}), "(metrics, parent_key='d')\n", (1339, 1364), True, 'from mlflow_extend import logging as lg\n'), ((1373, 1413), 'mlflow_extend.logging.log_metrics_flatten', 'lg.log_metrics_flatten', (['metrics'], {'sep': '"""_"""'}), "(metrics, sep='_')\n", (1395, 1413), True, 'from mlflow_extend import logging as lg\n'), ((1644, 1662), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (1660, 1662), False, 'import mlflow\n'), ((1705, 1729), 'mlflow_extend.logging.log_figure', 'lg.log_figure', (['fig', 'path'], {}), '(fig, path)\n', (1718, 1729), True, 'from mlflow_extend import logging as lg\n'), ((1738, 1780), 'mlflow_extend.testing.utils.assert_file_exists_in_artifacts', 'assert_file_exists_in_artifacts', (['run', 'path'], {}), '(run, path)\n', (1769, 1780), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((1891, 1909), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (1907, 1909), False, 'import mlflow\n'), ((1953, 1977), 'mlflow_extend.logging.log_figure', 'lg.log_figure', (['fig', 'path'], {}), '(fig, path)\n', (1966, 1977), True, 'from mlflow_extend import logging as lg\n'), ((1986, 2028), 'mlflow_extend.testing.utils.assert_file_exists_in_artifacts', 'assert_file_exists_in_artifacts', (['run', 'path'], {}), '(run, path)\n', (2017, 2028), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((2298, 2316), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (2314, 2316), False, 'import mlflow\n'), ((2359, 2383), 'mlflow_extend.logging.log_figure', 'lg.log_figure', (['fig', 'path'], {}), '(fig, path)\n', (2372, 2383), True, 'from mlflow_extend import logging as lg\n'), ((2392, 2434), 'mlflow_extend.testing.utils.assert_file_exists_in_artifacts', 'assert_file_exists_in_artifacts', (['run', 'path'], {}), '(run, path)\n', (2423, 2434), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((2506, 2524), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (2522, 2524), False, 'import mlflow\n'), ((2568, 2592), 'mlflow_extend.logging.log_figure', 'lg.log_figure', (['fig', 'path'], {}), '(fig, path)\n', (2581, 2592), True, 'from mlflow_extend import logging as lg\n'), ((2601, 2643), 'mlflow_extend.testing.utils.assert_file_exists_in_artifacts', 'assert_file_exists_in_artifacts', (['run', 'path'], {}), '(run, path)\n', (2632, 2643), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((2873, 2891), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (2889, 2891), False, 'import mlflow\n'), ((3151, 3169), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (3167, 3169), False, 'import mlflow\n'), ((3186, 3209), 'mlflow_extend.logging.log_dict', 'lg.log_dict', (['data', 'path'], {}), '(data, path)\n', (3197, 3209), True, 'from mlflow_extend import logging as lg\n'), ((3218, 3260), 'mlflow_extend.testing.utils.assert_file_exists_in_artifacts', 'assert_file_exists_in_artifacts', (['run', 'path'], {}), '(run, path)\n', (3249, 3260), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((3356, 3389), 'os.path.join', 'os.path.join', (['artifacts_dir', 'path'], {}), '(artifacts_dir, path)\n', (3368, 3389), False, 'import os\n'), ((3464, 3482), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (3480, 3482), False, 'import mlflow\n'), ((3525, 3554), 'mlflow_extend.logging.log_pickle', 'lg.log_pickle', (["{'a': 0}", 'path'], {}), "({'a': 0}, path)\n", (3538, 3554), True, 'from mlflow_extend import logging as lg\n'), ((3563, 3605), 'mlflow_extend.testing.utils.assert_file_exists_in_artifacts', 'assert_file_exists_in_artifacts', (['run', 'path'], {}), '(run, path)\n', (3594, 3605), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((3812, 3830), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (3828, 3830), False, 'import mlflow\n'), ((3847, 3875), 'mlflow_extend.logging.log_dict', 'lg.log_dict', (['data', 'path', 'fmt'], {}), '(data, path, fmt)\n', (3858, 3875), True, 'from mlflow_extend import logging as lg\n'), ((3884, 3926), 'mlflow_extend.testing.utils.assert_file_exists_in_artifacts', 'assert_file_exists_in_artifacts', (['run', 'path'], {}), '(run, path)\n', (3915, 3926), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((4022, 4055), 'os.path.join', 'os.path.join', (['artifacts_dir', 'path'], {}), '(artifacts_dir, path)\n', (4034, 4055), False, 'import os\n'), ((4218, 4236), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (4234, 4236), False, 'import mlflow\n'), ((4531, 4549), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (4547, 4549), False, 'import mlflow\n'), ((4566, 4590), 'mlflow_extend.logging.log_df', 'lg.log_df', (['df', 'path', 'fmt'], {}), '(df, path, fmt)\n', (4575, 4590), True, 'from mlflow_extend import logging as lg\n'), ((4599, 4641), 'mlflow_extend.testing.utils.assert_file_exists_in_artifacts', 'assert_file_exists_in_artifacts', (['run', 'path'], {}), '(run, path)\n', (4630, 4641), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((4800, 4833), 'os.path.join', 'os.path.join', (['artifacts_dir', 'path'], {}), '(artifacts_dir, path)\n', (4812, 4833), False, 'import os\n'), ((5026, 5044), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (5042, 5044), False, 'import mlflow\n'), ((5288, 5306), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (5304, 5306), False, 'import mlflow\n'), ((5323, 5346), 'mlflow_extend.logging.log_text', 'lg.log_text', (['text', 'path'], {}), '(text, path)\n', (5334, 5346), True, 'from mlflow_extend import logging as lg\n'), ((5355, 5397), 'mlflow_extend.testing.utils.assert_file_exists_in_artifacts', 'assert_file_exists_in_artifacts', (['run', 'path'], {}), '(run, path)\n', (5386, 5397), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((5646, 5664), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (5662, 5664), False, 'import mlflow\n'), ((5681, 5706), 'mlflow_extend.logging.log_numpy', 'lg.log_numpy', (['array', 'path'], {}), '(array, path)\n', (5693, 5706), True, 'from mlflow_extend import logging as lg\n'), ((5715, 5757), 'mlflow_extend.testing.utils.assert_file_exists_in_artifacts', 'assert_file_exists_in_artifacts', (['run', 'path'], {}), '(run, path)\n', (5746, 5757), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((5851, 5884), 'os.path.join', 'os.path.join', (['artifacts_dir', 'path'], {}), '(artifacts_dir, path)\n', (5863, 5884), False, 'import os\n'), ((5993, 6011), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (6009, 6011), False, 'import mlflow\n'), ((6102, 6143), 'mlflow_extend.logging.log_confusion_matrix', 'lg.log_confusion_matrix', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (6125, 6143), True, 'from mlflow_extend import logging as lg\n'), ((6152, 6202), 'mlflow_extend.testing.utils.assert_file_exists_in_artifacts', 'assert_file_exists_in_artifacts', (['run', 'default_path'], {}), '(run, default_path)\n', (6183, 6202), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((6213, 6231), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (6229, 6231), False, 'import mlflow\n'), ((6272, 6319), 'mlflow_extend.logging.log_confusion_matrix', 'lg.log_confusion_matrix', (['[[1, 2], [3, 4]]', 'path'], {}), '([[1, 2], [3, 4]], path)\n', (6295, 6319), True, 'from mlflow_extend import logging as lg\n'), ((6328, 6370), 'mlflow_extend.testing.utils.assert_file_exists_in_artifacts', 'assert_file_exists_in_artifacts', (['run', 'path'], {}), '(run, path)\n', (6359, 6370), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((6435, 6479), 'mlflow_extend.testing.utils._get_default_args', '_get_default_args', (['lg.log_feature_importance'], {}), '(lg.log_feature_importance)\n', (6452, 6479), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((6497, 6515), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (6513, 6515), False, 'import mlflow\n'), ((6532, 6593), 'mlflow_extend.logging.log_feature_importance', 'lg.log_feature_importance', (["['a', 'b', 'c']", '[1, 2, 3]', '"""gain"""'], {}), "(['a', 'b', 'c'], [1, 2, 3], 'gain')\n", (6557, 6593), True, 'from mlflow_extend import logging as lg\n'), ((6602, 6652), 'mlflow_extend.testing.utils.assert_file_exists_in_artifacts', 'assert_file_exists_in_artifacts', (['run', 'default_path'], {}), '(run, default_path)\n', (6633, 6652), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((6663, 6681), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (6679, 6681), False, 'import mlflow\n'), ((6698, 6775), 'mlflow_extend.logging.log_feature_importance', 'lg.log_feature_importance', (["['a', 'b', 'c']", '[1, 2, 3]', '"""gain"""'], {'normalize': '(True)'}), "(['a', 'b', 'c'], [1, 2, 3], 'gain', normalize=True)\n", (6723, 6775), True, 'from mlflow_extend import logging as lg\n'), ((6784, 6834), 'mlflow_extend.testing.utils.assert_file_exists_in_artifacts', 'assert_file_exists_in_artifacts', (['run', 'default_path'], {}), '(run, default_path)\n', (6815, 6834), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((6845, 6863), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (6861, 6863), False, 'import mlflow\n'), ((6910, 6982), 'mlflow_extend.logging.log_feature_importance', 'lg.log_feature_importance', (["['a', 'b', 'c']", '[1, 2, 3]', '"""gain"""'], {'path': 'path'}), "(['a', 'b', 'c'], [1, 2, 3], 'gain', path=path)\n", (6935, 6982), True, 'from mlflow_extend import logging as lg\n'), ((6991, 7033), 'mlflow_extend.testing.utils.assert_file_exists_in_artifacts', 'assert_file_exists_in_artifacts', (['run', 'path'], {}), '(run, path)\n', (7022, 7033), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((7154, 7172), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (7170, 7172), False, 'import mlflow\n'), ((7265, 7339), 'mlflow_extend.logging.log_feature_importance', 'lg.log_feature_importance', (["['a', 'b', 'c']", '[1, 2, 3]', '"""gain"""'], {'limit': 'limit'}), "(['a', 'b', 'c'], [1, 2, 3], 'gain', limit=limit)\n", (7290, 7339), True, 'from mlflow_extend import logging as lg\n'), ((7348, 7398), 'mlflow_extend.testing.utils.assert_file_exists_in_artifacts', 'assert_file_exists_in_artifacts', (['run', 'default_path'], {}), '(run, default_path)\n', (7379, 7398), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((7454, 7489), 'mlflow_extend.testing.utils._get_default_args', '_get_default_args', (['lg.log_roc_curve'], {}), '(lg.log_roc_curve)\n', (7471, 7489), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((7507, 7525), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (7523, 7525), False, 'import mlflow\n'), ((7542, 7574), 'mlflow_extend.logging.log_roc_curve', 'lg.log_roc_curve', (['[0, 1]', '[0, 1]'], {}), '([0, 1], [0, 1])\n', (7558, 7574), True, 'from mlflow_extend import logging as lg\n'), ((7583, 7633), 'mlflow_extend.testing.utils.assert_file_exists_in_artifacts', 'assert_file_exists_in_artifacts', (['run', 'default_path'], {}), '(run, default_path)\n', (7614, 7633), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((7644, 7662), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (7660, 7662), False, 'import mlflow\n'), ((7679, 7716), 'mlflow_extend.logging.log_roc_curve', 'lg.log_roc_curve', (['[0, 1]', '[0, 1]', '(0.5)'], {}), '([0, 1], [0, 1], 0.5)\n', (7695, 7716), True, 'from mlflow_extend import logging as lg\n'), ((7725, 7775), 'mlflow_extend.testing.utils.assert_file_exists_in_artifacts', 'assert_file_exists_in_artifacts', (['run', 'default_path'], {}), '(run, default_path)\n', (7756, 7775), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((7786, 7804), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (7802, 7804), False, 'import mlflow\n'), ((7846, 7889), 'mlflow_extend.logging.log_roc_curve', 'lg.log_roc_curve', (['[0, 1]', '[0, 1]'], {'path': 'path'}), '([0, 1], [0, 1], path=path)\n', (7862, 7889), True, 'from mlflow_extend import logging as lg\n'), ((7898, 7940), 'mlflow_extend.testing.utils.assert_file_exists_in_artifacts', 'assert_file_exists_in_artifacts', (['run', 'path'], {}), '(run, path)\n', (7929, 7940), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((7995, 8029), 'mlflow_extend.testing.utils._get_default_args', '_get_default_args', (['lg.log_pr_curve'], {}), '(lg.log_pr_curve)\n', (8012, 8029), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((8047, 8065), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (8063, 8065), False, 'import mlflow\n'), ((8082, 8113), 'mlflow_extend.logging.log_pr_curve', 'lg.log_pr_curve', (['[1, 0]', '[1, 0]'], {}), '([1, 0], [1, 0])\n', (8097, 8113), True, 'from mlflow_extend import logging as lg\n'), ((8122, 8172), 'mlflow_extend.testing.utils.assert_file_exists_in_artifacts', 'assert_file_exists_in_artifacts', (['run', 'default_path'], {}), '(run, default_path)\n', (8153, 8172), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((8183, 8201), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (8199, 8201), False, 'import mlflow\n'), ((8218, 8254), 'mlflow_extend.logging.log_pr_curve', 'lg.log_pr_curve', (['[1, 0]', '[1, 0]', '(0.5)'], {}), '([1, 0], [1, 0], 0.5)\n', (8233, 8254), True, 'from mlflow_extend import logging as lg\n'), ((8263, 8313), 'mlflow_extend.testing.utils.assert_file_exists_in_artifacts', 'assert_file_exists_in_artifacts', (['run', 'default_path'], {}), '(run, default_path)\n', (8294, 8313), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((8324, 8342), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (8340, 8342), False, 'import mlflow\n'), ((8383, 8425), 'mlflow_extend.logging.log_pr_curve', 'lg.log_pr_curve', (['[1, 0]', '[1, 0]'], {'path': 'path'}), '([1, 0], [1, 0], path=path)\n', (8398, 8425), True, 'from mlflow_extend import logging as lg\n'), ((8434, 8476), 'mlflow_extend.testing.utils.assert_file_exists_in_artifacts', 'assert_file_exists_in_artifacts', (['run', 'path'], {}), '(run, path)\n', (8465, 8476), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((649, 675), 'mlflow_extend.logging._artifact_context', 'lg._artifact_context', (['path'], {}), '(path)\n', (669, 675), True, 'from mlflow_extend import logging as lg\n'), ((2124, 2160), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (2137, 2160), False, 'import pytest\n'), ((2174, 2198), 'mlflow_extend.logging.log_figure', 'lg.log_figure', (['fig', 'path'], {}), '(fig, path)\n', (2187, 2198), True, 'from mlflow_extend import logging as lg\n'), ((2739, 2775), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (2752, 2775), False, 'import pytest\n'), ((2789, 2813), 'mlflow_extend.logging.log_figure', 'lg.log_figure', (['fig', 'path'], {}), '(fig, path)\n', (2802, 2813), True, 'from mlflow_extend import logging as lg\n'), ((2906, 2959), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""Invalid figure type"""'}), "(TypeError, match='Invalid figure type')\n", (2919, 2959), False, 'import pytest\n'), ((2973, 3008), 'mlflow_extend.logging.log_figure', 'lg.log_figure', (['"""figure"""', '"""test.png"""'], {}), "('figure', 'test.png')\n", (2986, 3008), True, 'from mlflow_extend import logging as lg\n'), ((4336, 4364), 'mlflow_extend.logging.log_dict', 'lg.log_dict', (['data', 'path', 'fmt'], {}), '(data, path, fmt)\n', (4347, 4364), True, 'from mlflow_extend import logging as lg\n'), ((5144, 5168), 'mlflow_extend.logging.log_df', 'lg.log_df', (['df', 'path', 'fmt'], {}), '(df, path, fmt)\n', (5153, 5168), True, 'from mlflow_extend import logging as lg\n'), ((5478, 5511), 'os.path.join', 'os.path.join', (['artifacts_dir', 'path'], {}), '(artifacts_dir, path)\n', (5490, 5511), False, 'import os\n'), ((6043, 6085), 'mlflow_extend.testing.utils._get_default_args', '_get_default_args', (['lg.log_confusion_matrix'], {}), '(lg.log_confusion_matrix)\n', (6060, 6085), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((7204, 7248), 'mlflow_extend.testing.utils._get_default_args', '_get_default_args', (['lg.log_feature_importance'], {}), '(lg.log_feature_importance)\n', (7221, 7248), False, 'from mlflow_extend.testing.utils import _get_default_args, _read_data, assert_file_exists_in_artifacts, assert_not_conflict_with_fluent_apis\n'), ((1847, 1879), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': '[1, 2, 3]', 'y': '[1, 3, 2]'}), '(x=[1, 2, 3], y=[1, 3, 2])\n', (1853, 1879), True, 'from plotly import graph_objects as go\n'), ((2462, 2494), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': '[1, 2, 3]', 'y': '[1, 3, 2]'}), '(x=[1, 2, 3], y=[1, 3, 2])\n', (2468, 2494), True, 'from plotly import graph_objects as go\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 12 13:20:48 2019
@author: asabater
"""
from PIL import Image
import numpy as np
from yolo3.model import preprocess_true_boxes
def rand(a=0, b=1):
return np.random.rand()*(b-a) + a
import cv2
# Perform data augmentation over an annotation line
# An annotation line can contain more than one frame that will be processed
# with the same augmentation parameters
def get_random_data_cv2(annotation_line, input_shape, random=True, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True):
'''random preprocessing for real-time data augmentation'''
line = annotation_line.split()
# numpy array: BGR, 0-255
images = [ cv2.imread(i) for i in line[0].split(',') ]
# height, width, channel
ih, iw, _ = images[0].shape
h, w = input_shape
box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])
if not random:
# resize image
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
dx = (w-nw)//2
dy = (h-nh)//2
if proc_img:
# resize
new_images, images_data = [None]*len(images), [None]*len(images)
for i in range(len(images)):
images[i] = cv2.resize(images[i], (nw, nh), interpolation=cv2.INTER_AREA)
# convert into PIL Image object
images[i] = Image.fromarray(images[i][:, :, ::-1])
new_images[i] = Image.new('RGB', (w,h), (128,128,128))
new_images[i].paste(images[i], (dx, dy))
# convert into numpy array: RGB, 0-1
images_data[i] = np.array(new_images[i])/255.
# correct boxes
box_data = np.zeros((max_boxes,5))
if len(box)>0:
np.random.shuffle(box)
if len(box)>max_boxes: box = box[:max_boxes]
box[:, [0,2]] = box[:, [0,2]]*scale + dx
box[:, [1,3]] = box[:, [1,3]]*scale + dy
box_data[:len(box)] = box
images_data = images_data[0] if len(images_data) == 1 else np.stack(images_data)
return images_data, box_data
# resize image
new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter)
scale = rand(.25, 2)
if new_ar < 1:
nh = int(scale*h)
nw = int(nh*new_ar)
else:
nw = int(scale*w)
nh = int(nw/new_ar)
# resize
images = [ cv2.resize(image, (nw, nh), interpolation=cv2.INTER_AREA) for image in images ]
images = [ Image.fromarray(image[:, :, ::-1]) for image in images ]
# place image
dx = int(rand(0, w-nw))
dy = int(rand(0, h-nh))
new_images = [ Image.new('RGB', (w,h), (128,128,128)) for i in range(len(images)) ]
for i in range(len(images)): new_images[i].paste(images[i], (dx, dy))
# convert into numpy array: BGR, 0-255
images = [ np.asarray(new_image)[:, :, ::-1] for new_image in new_images ]
# horizontal flip (faster than cv2.flip())
h_flip = rand() < 0.5
if h_flip:
images = [ image[:, ::-1] for image in images ]
# distort image
hue = rand(-hue, hue) * 179
sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)
val = rand(1, val) if rand()<.5 else 1/rand(1, val)
images_data = []
for i in range(len(images)):
img_hsv = cv2.cvtColor(images[i], cv2.COLOR_BGR2HSV)
H = img_hsv[:, :, 0].astype(np.float32)
S = img_hsv[:, :, 1].astype(np.float32)
V = img_hsv[:, :, 2].astype(np.float32)
H += hue
np.clip(H, a_min=0, a_max=179, out=H)
S *= sat
np.clip(S, a_min=0, a_max=255, out=S)
V *= val
np.clip(V, a_min=0, a_max=255, out=V)
img_hsv[:, :, 0] = H.astype(np.uint8)
img_hsv[:, :, 1] = S.astype(np.uint8)
img_hsv[:, :, 2] = V.astype(np.uint8)
# convert into numpy array: RGB, 0-1
images_data.append(cv2.cvtColor(img_hsv, cv2.COLOR_HSV2RGB) / 255.0)
# correct boxes
box_data = np.zeros((max_boxes,5))
if len(box)>0:
np.random.shuffle(box)
box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx
box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy
if h_flip:
box[:, [0,2]] = w - box[:, [2,0]]
box[:, 0:2][box[:, 0:2]<0] = 0
box[:, 2][box[:, 2]>w] = w
box[:, 3][box[:, 3]>h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box
if len(box)>max_boxes: box = box[:max_boxes]
box_data[:len(box)] = box
images_data = images_data[0] if len(images_data) == 1 else np.stack(images_data)
return images_data, box_data
def data_generator_custom(annotation_lines, batch_size, input_shape, anchors,
num_classes, random, multi_scale):
'''data generator for fit_generator'''
n = len(annotation_lines)
i = 0
valid_img_sizes = np.arange(320, 608+1, 32)
while True:
image_data = []
box_data = []
if multi_scale:
size = np.random.choice(valid_img_sizes)
input_shape = [size,size]
for b in range(batch_size):
if i==0:
np.random.shuffle(annotation_lines)
images, box = get_random_data_cv2(annotation_lines[i], input_shape, random=random)
image_data.append(images)
box_data.append(box)
i = (i+1) % n
image_data = np.array(image_data)
box_data = np.array(box_data)
y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
yield [image_data, *y_true], np.zeros(batch_size)
def data_generator_wrapper_custom(annotation_lines, batch_size, input_shape,
anchors, num_classes, random, multi_scale=False):
n = len(annotation_lines)
if n==0 or batch_size<=0: return None
return data_generator_custom(annotation_lines, batch_size, input_shape,
anchors, num_classes, random, multi_scale)
| [
"numpy.clip",
"PIL.Image.fromarray",
"numpy.random.rand",
"numpy.logical_and",
"numpy.random.choice",
"PIL.Image.new",
"yolo3.model.preprocess_true_boxes",
"numpy.asarray",
"numpy.stack",
"numpy.zeros",
"numpy.array",
"cv2.cvtColor",
"cv2.resize",
"cv2.imread",
"numpy.arange",
"numpy.r... | [((3564, 3588), 'numpy.zeros', 'np.zeros', (['(max_boxes, 5)'], {}), '((max_boxes, 5))\n', (3572, 3588), True, 'import numpy as np\n'), ((4394, 4421), 'numpy.arange', 'np.arange', (['(320)', '(608 + 1)', '(32)'], {}), '(320, 608 + 1, 32)\n', (4403, 4421), True, 'import numpy as np\n'), ((705, 718), 'cv2.imread', 'cv2.imread', (['i'], {}), '(i)\n', (715, 718), False, 'import cv2\n'), ((1562, 1586), 'numpy.zeros', 'np.zeros', (['(max_boxes, 5)'], {}), '((max_boxes, 5))\n', (1570, 1586), True, 'import numpy as np\n'), ((2142, 2199), 'cv2.resize', 'cv2.resize', (['image', '(nw, nh)'], {'interpolation': 'cv2.INTER_AREA'}), '(image, (nw, nh), interpolation=cv2.INTER_AREA)\n', (2152, 2199), False, 'import cv2\n'), ((2234, 2268), 'PIL.Image.fromarray', 'Image.fromarray', (['image[:, :, ::-1]'], {}), '(image[:, :, ::-1])\n', (2249, 2268), False, 'from PIL import Image\n'), ((2373, 2414), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(w, h)', '(128, 128, 128)'], {}), "('RGB', (w, h), (128, 128, 128))\n", (2382, 2414), False, 'from PIL import Image\n'), ((2973, 3015), 'cv2.cvtColor', 'cv2.cvtColor', (['images[i]', 'cv2.COLOR_BGR2HSV'], {}), '(images[i], cv2.COLOR_BGR2HSV)\n', (2985, 3015), False, 'import cv2\n'), ((3157, 3194), 'numpy.clip', 'np.clip', (['H'], {'a_min': '(0)', 'a_max': '(179)', 'out': 'H'}), '(H, a_min=0, a_max=179, out=H)\n', (3164, 3194), True, 'import numpy as np\n'), ((3210, 3247), 'numpy.clip', 'np.clip', (['S'], {'a_min': '(0)', 'a_max': '(255)', 'out': 'S'}), '(S, a_min=0, a_max=255, out=S)\n', (3217, 3247), True, 'import numpy as np\n'), ((3263, 3300), 'numpy.clip', 'np.clip', (['V'], {'a_min': '(0)', 'a_max': '(255)', 'out': 'V'}), '(V, a_min=0, a_max=255, out=V)\n', (3270, 3300), True, 'import numpy as np\n'), ((3606, 3628), 'numpy.random.shuffle', 'np.random.shuffle', (['box'], {}), '(box)\n', (3623, 3628), True, 'import numpy as np\n'), ((4125, 4146), 'numpy.stack', 'np.stack', (['images_data'], {}), '(images_data)\n', (4133, 4146), True, 'import numpy as np\n'), ((4816, 4836), 'numpy.array', 'np.array', (['image_data'], {}), '(image_data)\n', (4824, 4836), True, 'import numpy as np\n'), ((4850, 4868), 'numpy.array', 'np.array', (['box_data'], {}), '(box_data)\n', (4858, 4868), True, 'import numpy as np\n'), ((4880, 4946), 'yolo3.model.preprocess_true_boxes', 'preprocess_true_boxes', (['box_data', 'input_shape', 'anchors', 'num_classes'], {}), '(box_data, input_shape, anchors, num_classes)\n', (4901, 4946), False, 'from yolo3.model import preprocess_true_boxes\n'), ((229, 245), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (243, 245), True, 'import numpy as np\n'), ((1606, 1628), 'numpy.random.shuffle', 'np.random.shuffle', (['box'], {}), '(box)\n', (1623, 1628), True, 'import numpy as np\n'), ((1856, 1877), 'numpy.stack', 'np.stack', (['images_data'], {}), '(images_data)\n', (1864, 1877), True, 'import numpy as np\n'), ((2565, 2586), 'numpy.asarray', 'np.asarray', (['new_image'], {}), '(new_image)\n', (2575, 2586), True, 'import numpy as np\n'), ((3933, 3969), 'numpy.logical_and', 'np.logical_and', (['(box_w > 1)', '(box_h > 1)'], {}), '(box_w > 1, box_h > 1)\n', (3947, 3969), True, 'import numpy as np\n'), ((4495, 4528), 'numpy.random.choice', 'np.random.choice', (['valid_img_sizes'], {}), '(valid_img_sizes)\n', (4511, 4528), True, 'import numpy as np\n'), ((1182, 1243), 'cv2.resize', 'cv2.resize', (['images[i]', '(nw, nh)'], {'interpolation': 'cv2.INTER_AREA'}), '(images[i], (nw, nh), interpolation=cv2.INTER_AREA)\n', (1192, 1243), False, 'import cv2\n'), ((1296, 1334), 'PIL.Image.fromarray', 'Image.fromarray', (['images[i][:, :, ::-1]'], {}), '(images[i][:, :, ::-1])\n', (1311, 1334), False, 'from PIL import Image\n'), ((1355, 1396), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(w, h)', '(128, 128, 128)'], {}), "('RGB', (w, h), (128, 128, 128))\n", (1364, 1396), False, 'from PIL import Image\n'), ((3484, 3524), 'cv2.cvtColor', 'cv2.cvtColor', (['img_hsv', 'cv2.COLOR_HSV2RGB'], {}), '(img_hsv, cv2.COLOR_HSV2RGB)\n', (3496, 3524), False, 'import cv2\n'), ((4609, 4644), 'numpy.random.shuffle', 'np.random.shuffle', (['annotation_lines'], {}), '(annotation_lines)\n', (4626, 4644), True, 'import numpy as np\n'), ((4978, 4998), 'numpy.zeros', 'np.zeros', (['batch_size'], {}), '(batch_size)\n', (4986, 4998), True, 'import numpy as np\n'), ((1501, 1524), 'numpy.array', 'np.array', (['new_images[i]'], {}), '(new_images[i])\n', (1509, 1524), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.calibration import calibration_curve
import pickle
def reliability(y, y_prob):
nbin = 10
# class0
y_true = y.copy()
y_true[y_true == 0] = 4
y_true[y_true != 4] = 0
y_true[y_true == 4] = 1
select = y_prob[:, 0]
print(select)
x0, y0 = calibration_curve(y_true, select, n_bins=nbin)
# class 1
y_true = y.copy()
y_true[y_true != 1] = 0
select = y_prob[:, 1]
x1, y1 = calibration_curve(y_true, select, n_bins=nbin)
# class 2
y_true = y.copy()
y_true[y_true != 2] = 0
select = y_prob[:, 2]
x2, y2 = calibration_curve(y_true, select, n_bins=nbin)
# class 3
y_true = y.copy()
y_true[y_true != 3] = 0
select = y_prob[:, 3]
x3, y3 = calibration_curve(y_true, select, n_bins=nbin)
x = np.linspace(0, 1, 101)
fig = plt.figure()
plt.plot(x, x, label='Identity', color='black')
plt.plot(y0, x0, label='Good')
plt.plot(y1, x1, label='EM1_Contact')
plt.plot(y2, x2, label='EM3_Radius')
plt.plot(y3, x3, label='EM4_Hole')
plt.legend()
plt.show()
def main():
file = 'Input_logit/exp143_test_logit.txt'
data = pd.read_csv(file, sep='\t', header=0, index_col=False)
y_pred = np.array(data.iloc[:, 3:])
y_prob = np.exp(y_pred) / np.sum(np.exp(y_pred), axis=1).reshape(149, 1)
y_true = np.array(data.iloc[:, 2])
reliability(y_true, y_prob)
# import calibrated result
file = 'result/exp143_DIR-ODIR_test_0.0025_0.01.txt'
data = pd.read_csv(file, header=None, sep=' ')
y_prob = np.array(data.iloc[149:])
print(y_prob)
reliability(y_true, y_prob)
# # import calibrated result
# file = 'result/exp145_DIR-ODIR_test_0.0025_0.01.txt'
# data = pd.read_csv(file, header=None, sep=' ')
# y_prob = np.array(data.iloc[149:])
# print(y_prob)
# reliability(y_true, y_prob)
#
# # import calibrated result
# file = 'result/exp145_MS-ODIR_test_0.0025_0.01.txt'
# data = pd.read_csv(file, header=None, sep=' ')
# y_prob = np.array(data.iloc[149:])
# print(y_prob)
# reliability(y_true, y_prob)
# # get weight
# filename = 'model_weights/model_MS-ODIR_exp144_l2=0.0025_mu=0.01.p'
# file = open(filename, 'rb')
# model = pickle.load(file)
# W = model[0][-1][0]
# b = model[0][-1][1]
# print(W, b)
if __name__ == '__main__':
main()
| [
"pandas.read_csv",
"matplotlib.pyplot.plot",
"numpy.exp",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.figure",
"sklearn.calibration.calibration_curve",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((355, 401), 'sklearn.calibration.calibration_curve', 'calibration_curve', (['y_true', 'select'], {'n_bins': 'nbin'}), '(y_true, select, n_bins=nbin)\n', (372, 401), False, 'from sklearn.calibration import calibration_curve\n'), ((505, 551), 'sklearn.calibration.calibration_curve', 'calibration_curve', (['y_true', 'select'], {'n_bins': 'nbin'}), '(y_true, select, n_bins=nbin)\n', (522, 551), False, 'from sklearn.calibration import calibration_curve\n'), ((655, 701), 'sklearn.calibration.calibration_curve', 'calibration_curve', (['y_true', 'select'], {'n_bins': 'nbin'}), '(y_true, select, n_bins=nbin)\n', (672, 701), False, 'from sklearn.calibration import calibration_curve\n'), ((805, 851), 'sklearn.calibration.calibration_curve', 'calibration_curve', (['y_true', 'select'], {'n_bins': 'nbin'}), '(y_true, select, n_bins=nbin)\n', (822, 851), False, 'from sklearn.calibration import calibration_curve\n'), ((860, 882), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(101)'], {}), '(0, 1, 101)\n', (871, 882), True, 'import numpy as np\n'), ((894, 906), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (904, 906), True, 'import matplotlib.pyplot as plt\n'), ((911, 958), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'x'], {'label': '"""Identity"""', 'color': '"""black"""'}), "(x, x, label='Identity', color='black')\n", (919, 958), True, 'import matplotlib.pyplot as plt\n'), ((963, 993), 'matplotlib.pyplot.plot', 'plt.plot', (['y0', 'x0'], {'label': '"""Good"""'}), "(y0, x0, label='Good')\n", (971, 993), True, 'import matplotlib.pyplot as plt\n'), ((998, 1035), 'matplotlib.pyplot.plot', 'plt.plot', (['y1', 'x1'], {'label': '"""EM1_Contact"""'}), "(y1, x1, label='EM1_Contact')\n", (1006, 1035), True, 'import matplotlib.pyplot as plt\n'), ((1040, 1076), 'matplotlib.pyplot.plot', 'plt.plot', (['y2', 'x2'], {'label': '"""EM3_Radius"""'}), "(y2, x2, label='EM3_Radius')\n", (1048, 1076), True, 'import matplotlib.pyplot as plt\n'), ((1081, 1115), 'matplotlib.pyplot.plot', 'plt.plot', (['y3', 'x3'], {'label': '"""EM4_Hole"""'}), "(y3, x3, label='EM4_Hole')\n", (1089, 1115), True, 'import matplotlib.pyplot as plt\n'), ((1120, 1132), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1130, 1132), True, 'import matplotlib.pyplot as plt\n'), ((1137, 1147), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1145, 1147), True, 'import matplotlib.pyplot as plt\n'), ((1221, 1275), 'pandas.read_csv', 'pd.read_csv', (['file'], {'sep': '"""\t"""', 'header': '(0)', 'index_col': '(False)'}), "(file, sep='\\t', header=0, index_col=False)\n", (1232, 1275), True, 'import pandas as pd\n'), ((1289, 1315), 'numpy.array', 'np.array', (['data.iloc[:, 3:]'], {}), '(data.iloc[:, 3:])\n', (1297, 1315), True, 'import numpy as np\n'), ((1406, 1431), 'numpy.array', 'np.array', (['data.iloc[:, 2]'], {}), '(data.iloc[:, 2])\n', (1414, 1431), True, 'import numpy as np\n'), ((1564, 1603), 'pandas.read_csv', 'pd.read_csv', (['file'], {'header': 'None', 'sep': '""" """'}), "(file, header=None, sep=' ')\n", (1575, 1603), True, 'import pandas as pd\n'), ((1617, 1642), 'numpy.array', 'np.array', (['data.iloc[149:]'], {}), '(data.iloc[149:])\n', (1625, 1642), True, 'import numpy as np\n'), ((1329, 1343), 'numpy.exp', 'np.exp', (['y_pred'], {}), '(y_pred)\n', (1335, 1343), True, 'import numpy as np\n'), ((1353, 1367), 'numpy.exp', 'np.exp', (['y_pred'], {}), '(y_pred)\n', (1359, 1367), True, 'import numpy as np\n')] |
import numpy as np
import ac_core
import matplotlib.pyplot as plt
def primaris_smite(bonus = 0, wc = 5):
damage = 0
test = ac_core.d6() + ac_core.d6() + bonus
if test > 10:
damage = ac_core.d6()
elif test > wc:
damage = ac_core.d3()
return damage
def wyrdvane_smite(bonus = 0, wc = 5, wyrd_bonus = 0):
damage = 0
test = ac_core.d6() + bonus + wyrd_bonus
if test > 10:
damage = ac_core.d6()
elif test > wc:
damage = ac_core.d3()
return damage
if __name__ == '__main__' :
N = 100000
all_exps = {}
all_exps["P_3W"] = []
all_exps["3W_P"] = []
all_exps["P_3W_str"] = []
all_exps["3W_P_str"] = []
all_exps["P_2W"] = []
all_exps["2W_P"] = []
all_exps["P_2W_str"] = []
all_exps["2W_P_str"] = []
for i in range(N):
all_exps["P_3W"].append(primaris_smite(0,5) + wyrdvane_smite(0,6,1))
all_exps["3W_P"].append(primaris_smite(0,6) + wyrdvane_smite(0,5,1))
all_exps["P_3W_str"].append(primaris_smite(2,5) + wyrdvane_smite(2,6,1))
all_exps["3W_P_str"].append(primaris_smite(2,6) + wyrdvane_smite(2,5,1))
all_exps["P_2W"].append(primaris_smite(0,5) + wyrdvane_smite(0,6))
all_exps["2W_P"].append(primaris_smite(0,6) + wyrdvane_smite(0,5))
all_exps["P_2W_str"].append(primaris_smite(2,5) + wyrdvane_smite(2,6))
all_exps["2W_P_str"].append(primaris_smite(2,6) + wyrdvane_smite(2,5))
all_exps = {k: v for k, v in sorted(all_exps.items(), key=lambda item: np.mean(item[1]))}
fig1, ax1 = plt.subplots()
ac_core.boxplot(list(all_exps.values()), ax1, list(all_exps.keys()))
plt.grid()
plt.title("Smites in Imperial Guard")
plt.ylabel("Infilicted mortals")
plt.xlabel("Sequence")
fig1.autofmt_xdate()
plt.show()
| [
"ac_core.d6",
"numpy.mean",
"matplotlib.pyplot.grid",
"ac_core.d3",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((1621, 1635), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1633, 1635), True, 'import matplotlib.pyplot as plt\n'), ((1721, 1731), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1729, 1731), True, 'import matplotlib.pyplot as plt\n'), ((1736, 1773), 'matplotlib.pyplot.title', 'plt.title', (['"""Smites in Imperial Guard"""'], {}), "('Smites in Imperial Guard')\n", (1745, 1773), True, 'import matplotlib.pyplot as plt\n'), ((1778, 1810), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Infilicted mortals"""'], {}), "('Infilicted mortals')\n", (1788, 1810), True, 'import matplotlib.pyplot as plt\n'), ((1815, 1837), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Sequence"""'], {}), "('Sequence')\n", (1825, 1837), True, 'import matplotlib.pyplot as plt\n'), ((1867, 1877), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1875, 1877), True, 'import matplotlib.pyplot as plt\n'), ((203, 215), 'ac_core.d6', 'ac_core.d6', ([], {}), '()\n', (213, 215), False, 'import ac_core\n'), ((443, 455), 'ac_core.d6', 'ac_core.d6', ([], {}), '()\n', (453, 455), False, 'import ac_core\n'), ((132, 144), 'ac_core.d6', 'ac_core.d6', ([], {}), '()\n', (142, 144), False, 'import ac_core\n'), ((147, 159), 'ac_core.d6', 'ac_core.d6', ([], {}), '()\n', (157, 159), False, 'import ac_core\n'), ((253, 265), 'ac_core.d3', 'ac_core.d3', ([], {}), '()\n', (263, 265), False, 'import ac_core\n'), ((374, 386), 'ac_core.d6', 'ac_core.d6', ([], {}), '()\n', (384, 386), False, 'import ac_core\n'), ((493, 505), 'ac_core.d3', 'ac_core.d3', ([], {}), '()\n', (503, 505), False, 'import ac_core\n'), ((1577, 1593), 'numpy.mean', 'np.mean', (['item[1]'], {}), '(item[1])\n', (1584, 1593), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Creating ``tf.data.Dataset`` instance for image window sampler.
"""
from __future__ import absolute_import, division, print_function
import inspect
import numpy as np
import tensorflow as tf
# pylint: disable=no-name-in-module
from tensorflow.python.data.util import nest
from tensorflow.python.keras.utils import GeneratorEnqueuer
from niftynet.engine.image_window import ImageWindow, N_SPATIAL, \
LOCATION_FORMAT, BUFFER_POSITION_NP_TYPE
from niftynet.io.misc_io import squeeze_spatial_temporal_dim
from niftynet.layer.base_layer import Layer
from niftynet.utilities.util_common import look_up_operations
# when total number of window samples are not divisible by batch_size
# the class supports different modes for the final batch
# 'drop': drop the remainder batch
# 'pad': padding the final smaller batch with -1s
# 'dynamic': output the remainder directly (in this case the batch_size
# is undetermined at 'compile time')
SMALLER_FINAL_BATCH_MODE = {'drop', 'pad', 'dynamic'}
# pylint: disable=too-many-instance-attributes
class ImageWindowDataset(Layer):
"""
This class creates a ``tf.data.Dataset`` instance from
a sampler's layer_op function or generator.
If ``from_generator``, ``Dataset.from_generator`` interface will be used,
``Dataset.map`` interface will be used otherwise::
if the windows are from a image reader,
the total number of windows produced
will be: `epoch x n_subjects x windows_per_image`
if the windows are from a generator,
the total number of windows produced
will be: "iterations from the generator" x num_threads
"""
# pylint: disable=too-many-arguments
def __init__(self,
reader=None,
window_sizes=None,
batch_size=1,
windows_per_image=1,
queue_length=10,
shuffle=True,
epoch=-1,
smaller_final_batch_mode='pad',
seed=None,
name='image_dataset'):
Layer.__init__(self, name=name)
self._num_threads = 1
self._enqueuer = None
self._seed = seed
self.dataset = None
self.iterator = None
self.reader = reader
self.batch_size = batch_size
self.queue_length = int(max(queue_length, round(batch_size * 5.0)))
if self.queue_length > queue_length:
tf.logging.warning(
'sampler queue_length should be larger than batch_size, '
'defaulting to batch_size * 5.0 (%s).', self.queue_length)
self.from_generator = inspect.isgeneratorfunction(self.layer_op)
self.shuffle = shuffle
self.epoch = 1 if self.from_generator else epoch
self.smaller_final_batch_mode = look_up_operations(
smaller_final_batch_mode.lower(), SMALLER_FINAL_BATCH_MODE)
self.n_subjects = 1
self.window = None
if reader is not None:
self.window = ImageWindow.from_data_reader_properties(
reader.input_sources,
reader.shapes,
reader.tf_dtypes,
window_sizes or (-1, -1, -1))
self.n_subjects = reader.num_subjects
self.window.n_samples = windows_per_image
@property
def shapes(self):
"""
the sampler output (value of ``layer_op``) is::
[windows_per_image, x, y, z, 1, channels]
returns a dictionary of sampler output shapes
"""
assert self.window, 'Unknown output shapes: self.window not initialised'
return self.window.shapes
@property
def tf_shapes(self):
"""
returns a dictionary of sampler output tensor shapes
"""
assert self.window, 'Unknown output shapes: self.window not initialised'
return self.window.tf_shapes
@property
def tf_dtypes(self):
"""
returns a dictionary of sampler output tensorflow dtypes
"""
assert self.window, 'Unknown output dtypes: self.window not initialised'
return self.window.tf_dtypes
def set_num_threads(self, num_threads):
"""
Set number windows to generate in parallel.
"""
self._num_threads = int(num_threads)
def layer_op(self, idx=None):
"""
Generating each image as a window.
Overriding this function to create new image sampling strategies.
This function should either yield or return a dictionary
(of multiple windows per image)::
return a dictionary:
{
'image_name': a numpy array [n_samples, h, w, d, chn],
'image_name_location': [n_samples, 7]
}
where the 7-element location vector encode the image_id,
starting and ending coordinates of the image window.
Following the same notation, the dictionary can be extended
to multiple modalities; the keys will be::
{'image_name_1', 'image_name_1_location',
'image_name_2', 'image_name_2_location', ...}
:param idx: image_id used to load the image at the i-th row of
the input
:return: a image data dictionary
"""
image_id, image_data, _ = self.reader(idx=idx)
for mod in list(image_data):
spatial_shape = image_data[mod].shape[:N_SPATIAL]
coords = self.dummy_coordinates(image_id, spatial_shape, 1)
image_data[LOCATION_FORMAT.format(mod)] = coords
image_data[mod] = image_data[mod][np.newaxis, ...]
return image_data
# # The following is a demo of generator as the layer_op
# # Often we don't know the total number of elements that
# # will be generated, epoch is always 1.
# for idx in range(100):
# image_id, image_data, _ = self.reader()
# for mod in list(image_data):
# spatial_shape = image_data[mod].shape[:N_SPATIAL]
# coords = self.dummy_coordinates(image_id, spatial_shape, 1)
# image_data[LOCATION_FORMAT.format(mod)] = coords
# image_data[mod] = image_data[mod][np.newaxis, ...]
# yield image_data
def pop_batch_op(self):
"""
This function is used when connecting a sampler output
to a network. e.g.::
data_dict = self.get_sampler()[0].pop_batch_op(device_id)
net_output = net_model(data_dict['image'], is_training)
.. caution::
Note it squeezes the output tensor of 6 dims
``[batch, x, y, z, time, modality]``
by removing all dims along which length is one.
:return: a dictionary of image window tensors.
"""
if self.dataset is None or self.iterator is None:
# in case `run_threads` is not called,
# here we initialise the dataset and iterator
self.init_dataset()
self.iterator = self.dataset.make_one_shot_iterator()
# self.iterator = tf.data.Iterator.from_structure(
# self.dataset.output_types, self.dataset.output_shapes)
window_output = self.iterator.get_next()
for name in window_output:
window_output[name] = squeeze_spatial_temporal_dim(
window_output[name])
return window_output
def init_dataset(self):
"""
Make a window samples dataset from the reader and layer_op.
This function sets ``self.dataset``.
:return:
"""
if not self.from_generator:
dataset = self._dataset_from_range()
else:
dataset = self._dataset_from_generator()
self.dataset = self.dataset_preprocessing(dataset)
def dataset_preprocessing(self, dataset):
"""
dataset: batch and shuffle
:param dataset: a `tf.data.Dataset` instance
:return: a `tf.data.Dataset` instance
"""
dataset = dataset.repeat(self.epoch)
dataset = dataset.prefetch(buffer_size=self.queue_length)
if self.shuffle:
# locally shuffle the buffer of image windows
dataset = dataset.shuffle(
buffer_size=self.queue_length, seed=self._seed)
if self.smaller_final_batch_mode == 'drop':
# drop the remainder if there's not enough windows to
# form a batch, so that we have a fixed batch size.
# dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(
# batch_size=self.batch_size))
# new API since TF 1.10
dataset = dataset.batch(batch_size=self.batch_size,
drop_remainder=True)
return dataset
dataset = dataset.batch(batch_size=self.batch_size)
if self.smaller_final_batch_mode == 'dynamic' and self.batch_size > 1:
return dataset
# self.smaller_final_batch_mode is 'pad'
# if self.batch_size == 1 no actual padding
# but this function will set the output shapes properly.
def _pad_batch(batch_size):
def _pad_batch_func(input_tensor_dict):
"""
function to pad the batch dim to `batch_size`.
(assuming the input dataset is a dictionary-based one)
"""
out_dict = {}
for in_name in list(input_tensor_dict):
in_var = input_tensor_dict[in_name]
var_shape = in_var.shape.as_list()
if batch_size > 1:
paddings = [[0, 0] for _ in in_var.shape]
paddings[0][1] = batch_size - tf.shape(in_var)[0]
in_var = tf.pad(
in_var, paddings, "CONSTANT", constant_values=-1)
var_shape[0] = batch_size
in_var.set_shape(var_shape)
out_dict[in_name] = in_var
return out_dict
return _pad_batch_func
dataset = dataset.map(_pad_batch(self.batch_size))
return dataset
# pylint: disable=redefined-variable-type
def _dataset_from_range(self):
"""
This function maps a dataset of integers to a dataset of images.
:return: a `tf.data.Dataset`
"""
# dataset: a list of integers
tf.logging.info(
'Initialising Dataset from %s subjects...', self.n_subjects)
dataset = tf.data.Dataset.range(self.n_subjects)
if self.shuffle:
# global shuffle of the entire set of subjects
dataset = dataset.shuffle(
buffer_size=self.n_subjects, seed=self._seed)
# dataset: map each integer i to n windows sampled from subject i
def _tf_wrapper(idx):
flattened_types = nest.flatten(self.tf_dtypes)
flattened_shapes = nest.flatten(self.tf_shapes)
flat_values = tf.py_func(
func=lambda subject_id: nest.flatten(self(subject_id)),
inp=[idx],
Tout=flattened_types)
for ret_t, shape in zip(flat_values, flattened_shapes):
# the actual returned numpy array shapes are not checked
ret_t.set_shape(shape)
return nest.pack_sequence_as(self.tf_dtypes, flat_values)
dataset = dataset.map(_tf_wrapper, num_parallel_calls=self._num_threads)
# dataset: slice the n-element window into n single windows
dataset = dataset.flat_map(map_func=tf.data.Dataset.from_tensor_slices)
return dataset
def _dataset_from_generator(self):
"""
Create a `tf.data.Dataset` from a layer_op (as a generator).
:return: a `tf.data.Dataset`
"""
tf.logging.info('Initialising dataset from generator...')
if self._num_threads < 2 or not self.shuffle:
window_generator = self
else:
self._enqueuer = GeneratorEnqueuer(
self(),
use_multiprocessing=True,
wait_time=0.01,
seed=self._seed)
self._enqueuer.start(
workers=self._num_threads, max_queue_size=self.queue_length)
window_generator = self._enqueuer.get
# dataset from generator
dataset = tf.data.Dataset.from_generator(
generator=window_generator,
output_types=self.tf_dtypes,
output_shapes=self.tf_shapes)
# dataset: slice the n-element window into n single windows
dataset = dataset.flat_map(map_func=tf.data.Dataset.from_tensor_slices)
return dataset
def run_threads(self, *_args, **_kwargs):
"""
This function is created for compatibility purposes
(Deprecating)
:param _args:
:param _kwargs:
:return:
"""
pass
# if self.dataset is None or self.iterator is None:
# self.init_dataset()
# self.iterator = self.dataset.make_one_shot_iterator()
# self.iterator = tf.data.Iterator.from_structure(
# self.dataset.output_types, self.dataset.output_shapes)
# sess = session or tf.get_default_session()
# if sess is not None:
# sess.run(self.iterator.make_initializer(self.dataset))
def close_all(self):
"""
For compatibility with the queue-based sampler.
"""
if self._enqueuer is not None:
self._enqueuer.stop()
@classmethod
def dummy_coordinates(cls, image_id, image_sizes, n_samples):
"""
This function returns a set of image window coordinates
which are just spatially from 0 to `image_sizes`.
:return: a numpy array of `n_samples` spatial coordinates
"""
starting_coordinates = [0, 0, 0]
image_spatial_shape = list(image_sizes[:N_SPATIAL])
coords = [image_id] + starting_coordinates + image_spatial_shape
coords = np.tile(np.asarray(coords), [n_samples, 1])
return coords.astype(BUFFER_POSITION_NP_TYPE)
| [
"tensorflow.shape",
"tensorflow.pad",
"tensorflow.data.Dataset.range",
"tensorflow.logging.warning",
"tensorflow.logging.info",
"tensorflow.python.data.util.nest.flatten",
"numpy.asarray",
"tensorflow.data.Dataset.from_generator",
"niftynet.io.misc_io.squeeze_spatial_temporal_dim",
"niftynet.engin... | [((2100, 2131), 'niftynet.layer.base_layer.Layer.__init__', 'Layer.__init__', (['self'], {'name': 'name'}), '(self, name=name)\n', (2114, 2131), False, 'from niftynet.layer.base_layer import Layer\n'), ((2677, 2719), 'inspect.isgeneratorfunction', 'inspect.isgeneratorfunction', (['self.layer_op'], {}), '(self.layer_op)\n', (2704, 2719), False, 'import inspect\n'), ((10492, 10568), 'tensorflow.logging.info', 'tf.logging.info', (['"""Initialising Dataset from %s subjects..."""', 'self.n_subjects'], {}), "('Initialising Dataset from %s subjects...', self.n_subjects)\n", (10507, 10568), True, 'import tensorflow as tf\n'), ((10600, 10638), 'tensorflow.data.Dataset.range', 'tf.data.Dataset.range', (['self.n_subjects'], {}), '(self.n_subjects)\n', (10621, 10638), True, 'import tensorflow as tf\n'), ((11906, 11963), 'tensorflow.logging.info', 'tf.logging.info', (['"""Initialising dataset from generator..."""'], {}), "('Initialising dataset from generator...')\n", (11921, 11963), True, 'import tensorflow as tf\n'), ((12461, 12583), 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', ([], {'generator': 'window_generator', 'output_types': 'self.tf_dtypes', 'output_shapes': 'self.tf_shapes'}), '(generator=window_generator, output_types=\n self.tf_dtypes, output_shapes=self.tf_shapes)\n', (12491, 12583), True, 'import tensorflow as tf\n'), ((2477, 2619), 'tensorflow.logging.warning', 'tf.logging.warning', (['"""sampler queue_length should be larger than batch_size, defaulting to batch_size * 5.0 (%s)."""', 'self.queue_length'], {}), "(\n 'sampler queue_length should be larger than batch_size, defaulting to batch_size * 5.0 (%s).'\n , self.queue_length)\n", (2495, 2619), True, 'import tensorflow as tf\n'), ((3053, 3181), 'niftynet.engine.image_window.ImageWindow.from_data_reader_properties', 'ImageWindow.from_data_reader_properties', (['reader.input_sources', 'reader.shapes', 'reader.tf_dtypes', '(window_sizes or (-1, -1, -1))'], {}), '(reader.input_sources, reader.shapes,\n reader.tf_dtypes, window_sizes or (-1, -1, -1))\n', (3092, 3181), False, 'from niftynet.engine.image_window import ImageWindow, N_SPATIAL, LOCATION_FORMAT, BUFFER_POSITION_NP_TYPE\n'), ((7357, 7406), 'niftynet.io.misc_io.squeeze_spatial_temporal_dim', 'squeeze_spatial_temporal_dim', (['window_output[name]'], {}), '(window_output[name])\n', (7385, 7406), False, 'from niftynet.io.misc_io import squeeze_spatial_temporal_dim\n'), ((10959, 10987), 'tensorflow.python.data.util.nest.flatten', 'nest.flatten', (['self.tf_dtypes'], {}), '(self.tf_dtypes)\n', (10971, 10987), False, 'from tensorflow.python.data.util import nest\n'), ((11019, 11047), 'tensorflow.python.data.util.nest.flatten', 'nest.flatten', (['self.tf_shapes'], {}), '(self.tf_shapes)\n', (11031, 11047), False, 'from tensorflow.python.data.util import nest\n'), ((11422, 11472), 'tensorflow.python.data.util.nest.pack_sequence_as', 'nest.pack_sequence_as', (['self.tf_dtypes', 'flat_values'], {}), '(self.tf_dtypes, flat_values)\n', (11443, 11472), False, 'from tensorflow.python.data.util import nest\n'), ((14147, 14165), 'numpy.asarray', 'np.asarray', (['coords'], {}), '(coords)\n', (14157, 14165), True, 'import numpy as np\n'), ((5555, 5582), 'niftynet.engine.image_window.LOCATION_FORMAT.format', 'LOCATION_FORMAT.format', (['mod'], {}), '(mod)\n', (5577, 5582), False, 'from niftynet.engine.image_window import ImageWindow, N_SPATIAL, LOCATION_FORMAT, BUFFER_POSITION_NP_TYPE\n'), ((9851, 9907), 'tensorflow.pad', 'tf.pad', (['in_var', 'paddings', '"""CONSTANT"""'], {'constant_values': '(-1)'}), "(in_var, paddings, 'CONSTANT', constant_values=-1)\n", (9857, 9907), True, 'import tensorflow as tf\n'), ((9798, 9814), 'tensorflow.shape', 'tf.shape', (['in_var'], {}), '(in_var)\n', (9806, 9814), True, 'import tensorflow as tf\n')] |
import numpy as np
fft2 = np.fft.fft2
ifft2 = np.fft.ifft2
fftshift = np.fft.fftshift
ifftshift = np.fft.ifftshift
def cart2pol(x, y):
r = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return (r, phi)
def pol2cart(r, phi):
x = r * np.cos(phi)
y = r * np.sin(phi)
return (x, y)
def ft2(g, dx):
""" Schmidt implementation of DFT2
TODO This needs better documentation.
Why this instead of np version of fft2?
"""
G = fftshift(fft2(fftshift(g))) * dx**2
return G
def ift2(G, df):
""" Schmidt implementation of DIFT2
TODO This needs better documentation for the same reason as ft2.
"""
N = G.shape[0]
g = ifftshift(ifft2(ifftshift(G))) * (N * df)**2
return g
| [
"numpy.cos",
"numpy.sin",
"numpy.sqrt",
"numpy.arctan2"
] | [((147, 171), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (154, 171), True, 'import numpy as np\n'), ((178, 194), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (188, 194), True, 'import numpy as np\n'), ((252, 263), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (258, 263), True, 'import numpy as np\n'), ((276, 287), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (282, 287), True, 'import numpy as np\n')] |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %% [markdown]
# # How to use custom data and implement custom models and metrics
# %% [markdown]
# ## Building a simple, first model
# %% [markdown]
# For demonstration purposes we will choose a simple fully connected model. It takes a timeseries of size `input_size` as input and outputs a new timeseries of size `output_size`. You can think of this `input_size` encoding steps and `output_size` decoding/prediction steps.
# %%
import os
import warnings
# warnings.filterwarnings("ignore")
os.chdir("../../..")
# %%
import torch
from torch import nn
class FullyConnectedModule(nn.Module):
def __init__(self, input_size: int, output_size: int, hidden_size: int, n_hidden_layers: int):
super().__init__()
# input layer
module_list = [nn.Linear(input_size, hidden_size), nn.ReLU()]
# hidden layers
for _ in range(n_hidden_layers):
module_list.extend([nn.Linear(hidden_size, hidden_size), nn.ReLU()])
# output layer
module_list.append(nn.Linear(hidden_size, output_size))
self.sequential = nn.Sequential(*module_list)
def forward(self, x: torch.Tensor) -> torch.Tensor:
# x of shape: batch_size x n_timesteps_in
# output of shape batch_size x n_timesteps_out
return self.sequential(x)
# test that network works as intended
network = FullyConnectedModule(input_size=5, output_size=2, hidden_size=10, n_hidden_layers=2)
x = torch.rand(20, 5)
network(x).shape
# %%
from typing import Dict
from pytorch_forecasting.models import BaseModel
class FullyConnectedModel(BaseModel):
def __init__(self, input_size: int, output_size: int, hidden_size: int, n_hidden_layers: int, **kwargs):
# saves arguments in signature to `.hparams` attribute, mandatory call - do not skip this
self.save_hyperparameters()
# pass additional arguments to BaseModel.__init__, mandatory call - do not skip this
super().__init__(**kwargs)
self.network = FullyConnectedModule(
input_size=self.hparams.input_size,
output_size=self.hparams.output_size,
hidden_size=self.hparams.hidden_size,
n_hidden_layers=self.hparams.n_hidden_layers,
)
def forward(self, x: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
# x is a batch generated based on the TimeSeriesDataset
network_input = x["encoder_cont"].squeeze(-1)
prediction = self.network(network_input)
# We need to return a dictionary that at least contains the prediction and the target_scale.
# The parameter can be directly forwarded from the input.
return dict(prediction=prediction, target_scale=x["target_scale"])
model = FullyConnectedModel(input_size=5, output_size=2, hidden_size=10, n_hidden_layers=2)
# %% [markdown]
# This is a very basic implementation that could be readily used for training. But before we add additional features, let's first have a look how we pass data to this model.
# %% [markdown]
# ### Passing data to a model
# %%
import numpy as np
import pandas as pd
test_data = pd.DataFrame(
dict(
value=np.random.rand(30) - 0.5,
#value=np.arange(30),
group=np.repeat(np.arange(3), 10),
time_idx=np.tile(np.arange(10), 3),
)
)
test_data
# %%
from pytorch_forecasting import TimeSeriesDataSet
# create the dataset from the pandas dataframe
dataset = TimeSeriesDataSet(
test_data,
group_ids=["group"],
target="value",
time_idx="time_idx",
min_encoder_length=5,
max_encoder_length=5,
min_prediction_length=2,
max_prediction_length=2,
time_varying_unknown_reals=["value"],
)
# %%
dataset.get_parameters()
# %% [markdown]
# Now, we take a look at the output of the dataloader. It's `x` will be fed to the model's forward method, that is why it is so important to understand it.
# %%
# convert the dataset to a dataloader
dataloader = dataset.to_dataloader(batch_size=4)
# and load the first batch
x, y = next(iter(dataloader))
print("x =", x)
print("\ny =", y)
print("\nsizes of x =")
for key, value in x.items():
print(f"\t{key} = {value.size()}")
# %% [markdown]
# This explains why we had to first extract the correct input in our simple `FullyConnectedModel` above before passing it to our `FullyConnectedModule`.
# As a reminder:
#
# %%
def forward(self, x: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
# x is a batch generated based on the TimeSeriesDataset
network_input = x["encoder_cont"].squeeze(-1)
prediction = self.network(network_input)
# We need to return a dictionary that at least contains the prediction and the target_scale.
# The parameter can be directly forwarded from the input.
return dict(prediction=prediction, target_scale=x["target_scale"])
# %% [markdown]
# For such a simple architecture, we can ignore most of the inputs in ``x``. You do not have to worry about moving tensors to specifc GPUs, [PyTorch Lightning](https://pytorch-lightning.readthedocs.io) will take care of this for you.
#
# Now, let's check if our model works:
# %%
x, y = next(iter(dataloader))
model(x)
# %%
dataset.x_to_index(x)
# %% [markdown]
# ### Coupling datasets and models
# %%
class FullyConnectedModel(BaseModel):
def __init__(self, input_size: int, output_size: int, hidden_size: int, n_hidden_layers: int, **kwargs):
# saves arguments in signature to `.hparams` attribute, mandatory call - do not skip this
self.save_hyperparameters()
# pass additional arguments to BaseModel.__init__, mandatory call - do not skip this
super().__init__(**kwargs)
self.network = FullyConnectedModule(
input_size=self.hparams.input_size,
output_size=self.hparams.output_size,
hidden_size=self.hparams.hidden_size,
n_hidden_layers=self.hparams.n_hidden_layers,
)
def forward(self, x: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
# x is a batch generated based on the TimeSeriesDataset
network_input = x["encoder_cont"].squeeze(-1)
prediction = self.network(network_input).unsqueeze(-1)
# We need to return a dictionary that at least contains the prediction and the target_scale.
# The parameter can be directly forwarded from the input.
return dict(prediction=prediction, target_scale=x["target_scale"])
@classmethod
def from_dataset(cls, dataset: TimeSeriesDataSet, **kwargs):
new_kwargs = {
"output_size": dataset.max_prediction_length,
"input_size": dataset.max_encoder_length,
}
new_kwargs.update(kwargs) # use to pass real hyperparameters and override defaults set by dataset
# example for dataset validation
assert dataset.max_prediction_length == dataset.min_prediction_length, "Decoder only supports a fixed length"
assert dataset.min_encoder_length == dataset.max_encoder_length, "Encoder only supports a fixed length"
assert (
len(dataset.time_varying_known_categoricals) == 0
and len(dataset.time_varying_known_reals) == 0
and len(dataset.time_varying_unknown_categoricals) == 0
and len(dataset.static_categoricals) == 0
and len(dataset.static_reals) == 0
and len(dataset.time_varying_unknown_reals) == 1
and dataset.time_varying_unknown_reals[0] == dataset.target
), "Only covariate should be the target in 'time_varying_unknown_reals'"
return super().from_dataset(dataset, **new_kwargs)
# %% [markdown]
# Now, let's initialize from our dataset:
# %%
model = FullyConnectedModel.from_dataset(dataset, hidden_size=10, n_hidden_layers=2)
model.summarize("full") # print model summary
model.hparams
# %% [markdown]
# ### Defining additional hyperparameters
# %%
model.hparams
# %%
print(BaseModel.__init__.__doc__)
# %% [markdown]
# ## Classification
# %%
classification_test_data = pd.DataFrame(
dict(
target=np.random.choice(["A", "B", "C"], size=30), # CHANGING values to predict to a categorical
value=np.random.rand(30), # INPUT values - see next section on covariates how to use categorical inputs
group=np.repeat(np.arange(3), 10),
time_idx=np.tile(np.arange(10), 3),
)
)
classification_test_data
# %%
from pytorch_forecasting.data.encoders import NaNLabelEncoder
# create the dataset from the pandas dataframe
classification_dataset = TimeSeriesDataSet(
classification_test_data,
group_ids=["group"],
target="target", # SWITCHING to categorical target
time_idx="time_idx",
min_encoder_length=5,
max_encoder_length=5,
min_prediction_length=2,
max_prediction_length=2,
time_varying_unknown_reals=["value"],
target_normalizer=NaNLabelEncoder(), # Use the NaNLabelEncoder to encode categorical target
)
x, y = next(iter(classification_dataset.to_dataloader(batch_size=4)))
y[0] # target values are encoded categories
# The keyword argument ``target_normalizer`` is here redundant because the would have detected that a categorical target is used and therefore a :py:class:`~pytorch_forecasting.data.encoders.NaNLabelEncoder` is required.
# %%
from pytorch_forecasting.metrics import CrossEntropy
class FullyConnectedClassificationModel(BaseModel):
def __init__(
self,
input_size: int,
output_size: int,
hidden_size: int,
n_hidden_layers: int,
n_classes: int,
loss=CrossEntropy(),
**kwargs,
):
# saves arguments in signature to `.hparams` attribute, mandatory call - do not skip this
self.save_hyperparameters()
# pass additional arguments to BaseModel.__init__, mandatory call - do not skip this
super().__init__(**kwargs)
self.network = FullyConnectedModule(
input_size=self.hparams.input_size,
output_size=self.hparams.output_size * self.hparams.n_classes,
hidden_size=self.hparams.hidden_size,
n_hidden_layers=self.hparams.n_hidden_layers,
)
def forward(self, x: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
# x is a batch generated based on the TimeSeriesDataset
batch_size = x["encoder_cont"].size(0)
network_input = x["encoder_cont"].squeeze(-1)
prediction = self.network(network_input)
# RESHAPE output to batch_size x n_decoder_timesteps x n_classes
prediction = prediction.unsqueeze(-1).view(batch_size, -1, self.hparams.n_classes)
# We need to return a dictionary that at least contains the prediction and the target_scale.
# The parameter can be directly forwarded from the input.
return dict(prediction=prediction, target_scale=x["target_scale"])
@classmethod
def from_dataset(cls, dataset: TimeSeriesDataSet, **kwargs):
assert isinstance(dataset.target_normalizer, NaNLabelEncoder), "target normalizer has to encode categories"
new_kwargs = {
"n_classes": len(
dataset.target_normalizer.classes_
), # ADD number of classes as encoded by the target normalizer
"output_size": dataset.max_prediction_length,
"input_size": dataset.max_encoder_length,
}
new_kwargs.update(kwargs) # use to pass real hyperparameters and override defaults set by dataset
# example for dataset validation
assert dataset.max_prediction_length == dataset.min_prediction_length, "Decoder only supports a fixed length"
assert dataset.min_encoder_length == dataset.max_encoder_length, "Encoder only supports a fixed length"
assert (
len(dataset.time_varying_known_categoricals) == 0
and len(dataset.time_varying_known_reals) == 0
and len(dataset.time_varying_unknown_categoricals) == 0
and len(dataset.static_categoricals) == 0
and len(dataset.static_reals) == 0
and len(dataset.time_varying_unknown_reals) == 1
), "Only covariate should be in 'time_varying_unknown_reals'"
return super().from_dataset(dataset, **new_kwargs)
model = FullyConnectedClassificationModel.from_dataset(classification_dataset, hidden_size=10, n_hidden_layers=2)
model.summarize("full")
model.hparams
# %%
# passing x through model
model(x)["prediction"].shape
# %% [markdown]
# ## Predicting multiple targets at the same time
# %% [markdown]
# Training a model to predict multiple targets simulateneously is not difficult to implement. We can even employ mixed targets, i.e. a mix of categorical and continous targets. The first step is to use define a dataframe with multiple targets:
# %%
multi_target_test_data = pd.DataFrame(
dict(
target1=np.random.rand(30),
target2=np.random.rand(30),
group=np.repeat(np.arange(3), 10),
time_idx=np.tile(np.arange(10), 3),
)
)
multi_target_test_data
# %%
from pytorch_forecasting.data.encoders import EncoderNormalizer, MultiNormalizer, TorchNormalizer
# create the dataset from the pandas dataframe
multi_target_dataset = TimeSeriesDataSet(
multi_target_test_data,
group_ids=["group"],
target=["target1", "target2"], # USING two targets
time_idx="time_idx",
min_encoder_length=5,
max_encoder_length=5,
min_prediction_length=2,
max_prediction_length=2,
time_varying_unknown_reals=["target1", "target2"],
target_normalizer=MultiNormalizer(
[EncoderNormalizer(), TorchNormalizer()]
), # Use the NaNLabelEncoder to encode categorical target
)
x, y = next(iter(multi_target_dataset.to_dataloader(batch_size=4)))
y[0] # target values are a list of targets
# %%
from typing import List, Union
from pytorch_forecasting.metrics import MAE, SMAPE, MultiLoss
from pytorch_forecasting.utils import to_list
class FullyConnectedMultiTargetModel(BaseModel):
def __init__(
self,
input_size: int,
output_size: int,
hidden_size: int,
n_hidden_layers: int,
target_sizes: Union[int, List[int]] = [],
**kwargs,
):
# saves arguments in signature to `.hparams` attribute, mandatory call - do not skip this
self.save_hyperparameters()
# pass additional arguments to BaseModel.__init__, mandatory call - do not skip this
super().__init__(**kwargs)
self.network = FullyConnectedModule(
input_size=self.hparams.input_size * len(to_list(self.hparams.target_sizes)),
output_size=self.hparams.output_size * sum(to_list(self.hparams.target_sizes)),
hidden_size=self.hparams.hidden_size,
n_hidden_layers=self.hparams.n_hidden_layers,
)
def forward(self, x: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
# x is a batch generated based on the TimeSeriesDataset
batch_size = x["encoder_cont"].size(0)
network_input = x["encoder_cont"].view(batch_size, -1)
prediction = self.network(network_input)
# RESHAPE output to batch_size x n_decoder_timesteps x sum_of_target_sizes
prediction = prediction.unsqueeze(-1).view(batch_size, self.hparams.output_size, sum(self.hparams.target_sizes))
# RESHAPE into list of batch_size x n_decoder_timesteps x target_sizes[i] where i=1..len(target_sizes)
stops = np.cumsum(self.hparams.target_sizes)
starts = stops - self.hparams.target_sizes
prediction = [prediction[..., start:stop] for start, stop in zip(starts, stops)]
if isinstance(self.hparams.target_sizes, int): # only one target
prediction = prediction[0]
# We need to return a dictionary that at least contains the prediction and the target_scale.
# The parameter can be directly forwarded from the input.
return dict(prediction=prediction, target_scale=x["target_scale"])
@classmethod
def from_dataset(cls, dataset: TimeSeriesDataSet, **kwargs):
# By default only handle targets of size one here, categorical targets would be of larger size
new_kwargs = {
"target_sizes": [1] * len(to_list(dataset.target)),
"output_size": dataset.max_prediction_length,
"input_size": dataset.max_encoder_length,
}
new_kwargs.update(kwargs) # use to pass real hyperparameters and override defaults set by dataset
# example for dataset validation
assert dataset.max_prediction_length == dataset.min_prediction_length, "Decoder only supports a fixed length"
assert dataset.min_encoder_length == dataset.max_encoder_length, "Encoder only supports a fixed length"
assert (
len(dataset.time_varying_known_categoricals) == 0
and len(dataset.time_varying_known_reals) == 0
and len(dataset.time_varying_unknown_categoricals) == 0
and len(dataset.static_categoricals) == 0
and len(dataset.static_reals) == 0
and len(dataset.time_varying_unknown_reals)
== len(dataset.target_names) # Expect as as many unknown reals as targets
), "Only covariate should be in 'time_varying_unknown_reals'"
return super().from_dataset(dataset, **new_kwargs)
model = FullyConnectedMultiTargetModel.from_dataset(
multi_target_dataset,
hidden_size=10,
n_hidden_layers=2,
loss=MultiLoss(metrics=[MAE(), SMAPE()], weights=[2.0, 1.0]),
)
model.summarize("full")
model.hparams
# %% [markdown]
# Now, let's pass some data through our model and calculate the loss.
# %%
out = model(x)
out
# %%
y_hat = model.transform_output(
out
) # the model's transform_output method re-scales/de-normalizes the predictions to into the real target space
model.loss(y_hat, y)
# %% [markdown]
# ## Using covariates
# %%
from pytorch_forecasting.models.base_model import BaseModelWithCovariates
print(BaseModelWithCovariates.__doc__)
# %%
from typing import Dict, List, Tuple
from pytorch_forecasting.models.nn import MultiEmbedding
class FullyConnectedModelWithCovariates(BaseModelWithCovariates):
def __init__(
self,
input_size: int,
output_size: int,
hidden_size: int,
n_hidden_layers: int,
x_reals: List[str],
x_categoricals: List[str],
embedding_sizes: Dict[str, Tuple[int, int]],
embedding_labels: Dict[str, List[str]],
static_categoricals: List[str],
static_reals: List[str],
time_varying_categoricals_encoder: List[str],
time_varying_categoricals_decoder: List[str],
time_varying_reals_encoder: List[str],
time_varying_reals_decoder: List[str],
embedding_paddings: List[str],
categorical_groups: Dict[str, List[str]],
**kwargs,
):
# saves arguments in signature to `.hparams` attribute, mandatory call - do not skip this
self.save_hyperparameters()
# pass additional arguments to BaseModel.__init__, mandatory call - do not skip this
super().__init__(**kwargs)
# create embedder - can be fed with x["encoder_cat"] or x["decoder_cat"] and will return
# dictionary of category names mapped to embeddings
self.input_embeddings = MultiEmbedding(
embedding_sizes=self.hparams.embedding_sizes,
categorical_groups=self.hparams.categorical_groups,
embedding_paddings=self.hparams.embedding_paddings,
x_categoricals=self.hparams.x_categoricals,
max_embedding_size=self.hparams.hidden_size,
)
# calculate the size of all concatenated embeddings + continous variables
n_features = sum(
embedding_size for classes_size, embedding_size in self.hparams.embedding_sizes.values()
) + len(self.reals)
# create network that will be fed with continious variables and embeddings
self.network = FullyConnectedModule(
input_size=self.hparams.input_size * n_features,
output_size=self.hparams.output_size,
hidden_size=self.hparams.hidden_size,
n_hidden_layers=self.hparams.n_hidden_layers,
)
def forward(self, x: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
# x is a batch generated based on the TimeSeriesDataset
batch_size = x["encoder_lengths"].size(0)
embeddings = self.input_embeddings(x["encoder_cat"]) # returns dictionary with embedding tensors
network_input = torch.cat(
[x["encoder_cont"]]
+ [
emb
for name, emb in embeddings.items()
if name in self.encoder_variables or name in self.static_variables
],
dim=-1,
)
prediction = self.network(network_input.view(batch_size, -1))
# We need to return a dictionary that at least contains the prediction and the target_scale.
# The parameter can be directly forwarded from the input.
return dict(prediction=prediction, target_scale=x["target_scale"])
@classmethod
def from_dataset(cls, dataset: TimeSeriesDataSet, **kwargs):
new_kwargs = {
"output_size": dataset.max_prediction_length,
"input_size": dataset.max_encoder_length,
}
new_kwargs.update(kwargs) # use to pass real hyperparameters and override defaults set by dataset
# example for dataset validation
assert dataset.max_prediction_length == dataset.min_prediction_length, "Decoder only supports a fixed length"
assert dataset.min_encoder_length == dataset.max_encoder_length, "Encoder only supports a fixed length"
return super().from_dataset(dataset, **new_kwargs)
# %% [markdown]
# Note that the model does not make use of the known covariates in the decoder - this is obviously suboptimal but not scope of this tutorial. Anyways, let us create a new dataset with categorical variables and see how the model can be instantiated from it.
# %%
import numpy as np
import pandas as pd
from pytorch_forecasting import TimeSeriesDataSet
test_data_with_covariates = pd.DataFrame(
dict(
# as before
value=np.random.rand(30),
group=np.repeat(np.arange(3), 10),
time_idx=np.tile(np.arange(10), 3),
# now adding covariates
categorical_covariate=np.random.choice(["a", "b"], size=30),
real_covariate=np.random.rand(30),
)
).astype(
dict(group=str)
) # categorical covariates have to be of string type
test_data_with_covariates
# %%
# create the dataset from the pandas dataframe
dataset_with_covariates = TimeSeriesDataSet(
test_data_with_covariates,
group_ids=["group"],
target="value",
time_idx="time_idx",
min_encoder_length=5,
max_encoder_length=5,
min_prediction_length=2,
max_prediction_length=2,
time_varying_unknown_reals=["value"],
time_varying_known_reals=["real_covariate"],
time_varying_known_categoricals=["categorical_covariate"],
static_categoricals=["group"],
)
model = FullyConnectedModelWithCovariates.from_dataset(dataset_with_covariates, hidden_size=10, n_hidden_layers=2)
model.summarize("full") # print model summary
model.hparams
# %% [markdown]
# To test that the model could be trained, pass a sample batch.
# %%
x, y = next(iter(dataset_with_covariates.to_dataloader(batch_size=4))) # generate batch
model(x) # pass batch through model
# %% [markdown]
# ## Implementing an autoregressive / recurrent model
# %%
from torch.nn.utils import rnn
from pytorch_forecasting.models.base_model import AutoRegressiveBaseModel
from pytorch_forecasting.models.nn import LSTM
class LSTMModel(AutoRegressiveBaseModel):
def __init__(
self,
target: str,
target_lags: Dict[str, Dict[str, int]],
n_layers: int,
hidden_size: int,
dropout: float = 0.1,
**kwargs,
):
# arguments target and target_lags are required for autoregressive models
# even though target_lags cannot be used without covariates
# saves arguments in signature to `.hparams` attribute, mandatory call - do not skip this
self.save_hyperparameters()
# pass additional arguments to BaseModel.__init__, mandatory call - do not skip this
super().__init__(**kwargs)
# use version of LSTM that can handle zero-length sequences
self.lstm = LSTM(
hidden_size=self.hparams.hidden_size,
input_size=1,
num_layers=self.hparams.n_layers,
dropout=self.hparams.dropout,
batch_first=True,
)
self.output_layer = nn.Linear(self.hparams.hidden_size, 1)
def encode(self, x: Dict[str, torch.Tensor]):
# we need at least one encoding step as because the target needs to be lagged by one time step
# because we use the custom LSTM, we do not have to require encoder lengths of > 1
# but can handle lengths of >= 1
assert x["encoder_lengths"].min() >= 1
input_vector = x["encoder_cont"].clone()
# lag target by one
input_vector[..., self.target_positions] = torch.roll(
input_vector[..., self.target_positions], shifts=1, dims=1
)
input_vector = input_vector[:, 1:] # first time step cannot be used because of lagging
# determine effective encoder_length length
effective_encoder_lengths = x["encoder_lengths"] - 1
# run through LSTM network
_, hidden_state = self.lstm(
input_vector, lengths=effective_encoder_lengths, enforce_sorted=False # passing the lengths directly
) # second ouput is not needed (hidden state)
return hidden_state
def decode(self, x: Dict[str, torch.Tensor], hidden_state):
# again lag target by one
input_vector = x["decoder_cont"].clone()
input_vector[..., self.target_positions] = torch.roll(
input_vector[..., self.target_positions], shifts=1, dims=1
)
# but this time fill in missing target from encoder_cont at the first time step instead of throwing it away
last_encoder_target = x["encoder_cont"][
torch.arange(x["encoder_cont"].size(0), device=x["encoder_cont"].device),
x["encoder_lengths"] - 1,
self.target_positions.unsqueeze(-1),
].T
input_vector[:, 0, self.target_positions] = last_encoder_target
if self.training: # training mode
lstm_output, _ = self.lstm(input_vector, hidden_state, lengths=x["decoder_lengths"], enforce_sorted=False)
# transform into right shape
prediction = self.output_layer(lstm_output)
# predictions are not yet rescaled
return dict(prediction=prediction, target_scale=x["target_scale"])
else: # prediction mode
target_pos = self.target_positions
def decode_one(idx, lagged_targets, hidden_state):
x = input_vector[:, [idx]]
# overwrite at target positions
x[:, 0, target_pos] = lagged_targets[-1] # take most recent target (i.e. lag=1)
lstm_output, hidden_state = self.lstm(x, hidden_state)
# transform into right shape
prediction = self.output_layer(lstm_output)[:, 0] # take first timestep
return prediction, hidden_state
# make predictions which are fed into next step
output = self.decode_autoregressive(
decode_one,
first_target=input_vector[:, 0, target_pos],
first_hidden_state=hidden_state,
target_scale=x["target_scale"],
n_decoder_steps=input_vector.size(1),
)
# predictions are already rescaled
return dict(prediction=output, output_transformation=None, target_scale=x["target_scale"])
def forward(self, x: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
hidden_state = self.encode(x) # encode to hidden state
output = self.decode(x, hidden_state) # decode leveraging hidden state
return output
model = LSTMModel.from_dataset(dataset, n_layers=2, hidden_size=10)
model.summarize("full")
model.hparams
# %%
x, y = next(iter(dataloader))
print(
"prediction shape in training:", model(x)["prediction"].size()
) # batch_size x decoder time steps x 1 (1 for one target dimension)
model.eval() # set model into eval mode to use autoregressive prediction
print("prediction shape in inference:", model(x)["prediction"].size()) # should be the same as in training
# %% [markdown]
# ## Using and defining a custom/non-trivial metric
# %% [markdown]
# To use a different metric, simply pass it to the model when initializing it (preferably via the `from_dataset()` method). For example, to use mean absolute error with our `FullyConnectedModel` from the beginning of this tutorial, type
# %%
from pytorch_forecasting.metrics import MAE
model = FullyConnectedModel.from_dataset(dataset, hidden_size=10, n_hidden_layers=2, loss=MAE())
model.hparams
# %% [markdown]
# Note that some metrics might require a certain form of model prediction, e.g. quantile prediction assumes an output of shape `batch_size x n_decoder_timesteps x n_quantiles` instead of `batch_size x n_decoder_timesteps`. For the `FullyConnectedModel`, this means that we need to use a modified `FullyConnectedModule`network. Here `n_outputs` corresponds to the number of quantiles.
# %%
import torch
from torch import nn
class FullyConnectedMultiOutputModule(nn.Module):
def __init__(self, input_size: int, output_size: int, hidden_size: int, n_hidden_layers: int, n_outputs: int):
super().__init__()
# input layer
module_list = [nn.Linear(input_size, hidden_size), nn.ReLU()]
# hidden layers
for _ in range(n_hidden_layers):
module_list.extend([nn.Linear(hidden_size, hidden_size), nn.ReLU()])
# output layer
self.n_outputs = n_outputs
module_list.append(
nn.Linear(hidden_size, output_size * n_outputs)
) # <<<<<<<< modified: replaced output_size with output_size * n_outputs
self.sequential = nn.Sequential(*module_list)
def forward(self, x: torch.Tensor) -> torch.Tensor:
# x of shape: batch_size x n_timesteps_in
# output of shape batch_size x n_timesteps_out
return self.sequential(x).reshape(x.size(0), -1, self.n_outputs) # <<<<<<<< modified: added reshape
# test that network works as intended
network = FullyConnectedMultiOutputModule(input_size=5, output_size=2, hidden_size=10, n_hidden_layers=2, n_outputs=7)
network(torch.rand(20, 5)).shape # <<<<<<<<<< instead of shape (20, 2), returning additional dimension for quantiles
# %% [markdown]
# ### Implement a new metric
# %%
from pytorch_forecasting.metrics import MultiHorizonMetric
class MAE(MultiHorizonMetric):
def loss(self, y_pred, target):
loss = (self.to_prediction(y_pred) - target).abs()
return loss
# %% [markdown]
# ### Model ouptut cannot be readily converted to prediction
# %%
from copy import copy
from pytorch_forecasting.metrics import NormalDistributionLoss
class FullyConnectedForDistributionLossModel(BaseModel): # we inherit the `from_dataset` method
def __init__(self, input_size: int, output_size: int, hidden_size: int, n_hidden_layers: int, **kwargs):
# saves arguments in signature to `.hparams` attribute, mandatory call - do not skip this
self.save_hyperparameters()
# pass additional arguments to BaseModel.__init__, mandatory call - do not skip this
super().__init__(**kwargs)
self.network = FullyConnectedMultiOutputModule(
input_size=self.hparams.input_size,
output_size=self.hparams.output_size,
hidden_size=self.hparams.hidden_size,
n_hidden_layers=self.hparams.n_hidden_layers,
n_outputs=2, # <<<<<<<< we predict two outputs for mean and scale of the normal distribution
)
self.loss = NormalDistributionLoss()
@classmethod
def from_dataset(cls, dataset: TimeSeriesDataSet, **kwargs):
new_kwargs = {
"output_size": dataset.max_prediction_length,
"input_size": dataset.max_encoder_length,
}
new_kwargs.update(kwargs) # use to pass real hyperparameters and override defaults set by dataset
# example for dataset validation
assert dataset.max_prediction_length == dataset.min_prediction_length, "Decoder only supports a fixed length"
assert dataset.min_encoder_length == dataset.max_encoder_length, "Encoder only supports a fixed length"
assert (
len(dataset.time_varying_known_categoricals) == 0
and len(dataset.time_varying_known_reals) == 0
and len(dataset.time_varying_unknown_categoricals) == 0
and len(dataset.static_categoricals) == 0
and len(dataset.static_reals) == 0
and len(dataset.time_varying_unknown_reals) == 1
and dataset.time_varying_unknown_reals[0] == dataset.target
), "Only covariate should be the target in 'time_varying_unknown_reals'"
return super().from_dataset(dataset, **new_kwargs)
def forward(self, x: Dict[str, torch.Tensor], n_samples: int = None) -> Dict[str, torch.Tensor]:
# x is a batch generated based on the TimeSeriesDataset
network_input = x["encoder_cont"].squeeze(-1)
prediction = self.network(network_input) # shape batch_size x n_decoder_steps x 2
if (
self.training or n_samples is None
): # training is a PyTorch variable indicating if a module is being trained (tracing gradients) or evaluated
assert n_samples is None, "We need to predict parameters when training"
output_transformation = True
else:
# let's sample from our distribution - first we need to scale the parameters to real space
scaled_parameters = self.transform_output(
dict(
prediction=prediction,
target_scale=x["target_scale"],
)
)
# and then sample from distribution
prediction = self.loss.sample(scaled_parameters, n_samples)
output_transformation = None # predictions are already re-scaled
return dict(prediction=prediction, target_scale=x["target_scale"], output_transformation=output_transformation)
def transform_output(self, out: Dict[str, torch.Tensor]) -> torch.Tensor:
# this is already implemented in pytorch forecasting but this code demonstrates the point
# input is forward's output
# depending on output, transform differently
if out.get("output_transformation", True) is None: # samples are already rescaled
out = out["prediction"]
else: # parameters need to be rescaled
out = self.loss.rescale_parameters(
out["prediction"], target_scale=out["target_scale"], encoder=self.output_transformer
)
return out
model = FullyConnectedForDistributionLossModel.from_dataset(dataset, hidden_size=10, n_hidden_layers=2)
model.summarize("full")
model.hparams
# %%
x["decoder_lengths"]
# %%
x, y = next(iter(dataloader))
print("parameter predition shape: ", model(x)["prediction"].size())
model.eval() # set model into eval mode for sampling
print("sample prediction shape: ", model(x, n_samples=200)["prediction"].size())
# %%
model.predict(dataloader, mode="quantiles", n_samples=100).shape
# %% [markdown]
# The returned quantiles are here determined by the quantiles defined in the loss function and can be modified by passing a list of quantiles to at initialization.
# %%
model.loss.quantiles
# %%
NormalDistributionLoss(quantiles=[0.2, 0.8]).quantiles
# %% [markdown]
# ## Adding custom plotting and interpretation
# %% [markdown]
# ### Log often whenever an example prediction vs actuals plot is created
# %%
import matplotlib.pyplot as plt
def plot_prediction(
self,
x: Dict[str, torch.Tensor],
out: Dict[str, torch.Tensor],
idx: int,
plot_attention: bool = True,
add_loss_to_title: bool = False,
show_future_observed: bool = True,
ax=None,
) -> plt.Figure:
"""
Plot actuals vs prediction and attention
Args:
x (Dict[str, torch.Tensor]): network input
out (Dict[str, torch.Tensor]): network output
idx (int): sample index
plot_attention: if to plot attention on secondary axis
add_loss_to_title: if to add loss to title. Default to False.
show_future_observed: if to show actuals for future. Defaults to True.
ax: matplotlib axes to plot on
Returns:
plt.Figure: matplotlib figure
"""
# plot prediction as normal
fig = super().plot_prediction(
x, out, idx=idx, add_loss_to_title=add_loss_to_title, show_future_observed=show_future_observed, ax=ax
)
# add attention on secondary axis
if plot_attention:
interpretation = self.interpret_output(out)
ax = fig.axes[0]
ax2 = ax.twinx()
ax2.set_ylabel("Attention")
encoder_length = x["encoder_lengths"][idx]
ax2.plot(
torch.arange(-encoder_length, 0),
interpretation["attention"][idx, :encoder_length].detach().cpu(),
alpha=0.2,
color="k",
)
fig.tight_layout()
return fig
# %% [markdown]
# ### Log at the end of an epoch
# %%
def step(
self, x: Dict[str, torch.Tensor], y: torch.Tensor, batch_idx: int, **kwargs
) -> Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor]]:
"""
Run for each train/val step.
Args:
x (Dict[str, torch.Tensor]): x as passed to the network by the dataloader
y (torch.Tensor): y as passed to the loss function by the dataloader
batch_idx (int): batch number
**kwargs: additional arguments to pass to the network apart from ``x``
Returns:
Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor]]: tuple where the first
entry is a dictionary to which additional logging results can be added for consumption in the
``epoch_end`` hook and the second entry is the model's output.
"""
# extract data and run model
log, out = super().step(x, y, batch_idx)
# calculate interpretations etc for latter logging
if self.log_interval > 0:
detached_output = {name: tensor.detach() for name, tensor in out.items()}
interpretation = self.interpret_output(
detached_output,
reduction="sum",
attention_prediction_horizon=0, # attention only for first prediction horizon
)
log["interpretation"] = interpretation
return log, out
def epoch_end(self, outputs):
"""
Run at epoch end for training or validation
"""
if self.log_interval > 0:
self.log_interpretation(outputs)
# %% [markdown]
# ### Log at the end of training
# %%
def on_fit_end(self):
"""
run at the end of training
"""
if self.log_interval > 0:
for name, emb in self.input_embeddings.items():
labels = self.hparams.embedding_labels[name]
self.logger.experiment.add_embedding(
emb.weight.data.cpu(), metadata=labels, tag=name, global_step=self.global_step
)
# %% [markdown]
# ## Minimal testing of models
# %% [markdown]
# Testing models is essential to quickly detect problems and iterate quickly. Some issues can be only identified after lengthy training but many problems show up after one or two batches. PyTorch Lightning, on which PyTorch Forecasting is built, makes it easy to set up such tests.
# %%
from pytorch_lightning import Trainer
model = FullyConnectedForDistributionLossModel.from_dataset(dataset, hidden_size=10, n_hidden_layers=2, log_interval=1)
trainer = Trainer(fast_dev_run=True)
trainer.fit(model, train_dataloader=dataloader, val_dataloaders=dataloader)
| [
"torch.nn.ReLU",
"numpy.random.rand",
"torch.nn.Sequential",
"pytorch_lightning.Trainer",
"pytorch_forecasting.metrics.NormalDistributionLoss",
"numpy.arange",
"torch.arange",
"pytorch_forecasting.metrics.SMAPE",
"torch.roll",
"pytorch_forecasting.models.nn.MultiEmbedding",
"numpy.random.choice"... | [((582, 602), 'os.chdir', 'os.chdir', (['"""../../.."""'], {}), "('../../..')\n", (590, 602), False, 'import os\n'), ((1527, 1544), 'torch.rand', 'torch.rand', (['(20)', '(5)'], {}), '(20, 5)\n', (1537, 1544), False, 'import torch\n'), ((3507, 3738), 'pytorch_forecasting.TimeSeriesDataSet', 'TimeSeriesDataSet', (['test_data'], {'group_ids': "['group']", 'target': '"""value"""', 'time_idx': '"""time_idx"""', 'min_encoder_length': '(5)', 'max_encoder_length': '(5)', 'min_prediction_length': '(2)', 'max_prediction_length': '(2)', 'time_varying_unknown_reals': "['value']"}), "(test_data, group_ids=['group'], target='value', time_idx=\n 'time_idx', min_encoder_length=5, max_encoder_length=5,\n min_prediction_length=2, max_prediction_length=2,\n time_varying_unknown_reals=['value'])\n", (3524, 3738), False, 'from pytorch_forecasting import TimeSeriesDataSet\n'), ((22765, 23158), 'pytorch_forecasting.TimeSeriesDataSet', 'TimeSeriesDataSet', (['test_data_with_covariates'], {'group_ids': "['group']", 'target': '"""value"""', 'time_idx': '"""time_idx"""', 'min_encoder_length': '(5)', 'max_encoder_length': '(5)', 'min_prediction_length': '(2)', 'max_prediction_length': '(2)', 'time_varying_unknown_reals': "['value']", 'time_varying_known_reals': "['real_covariate']", 'time_varying_known_categoricals': "['categorical_covariate']", 'static_categoricals': "['group']"}), "(test_data_with_covariates, group_ids=['group'], target=\n 'value', time_idx='time_idx', min_encoder_length=5, max_encoder_length=\n 5, min_prediction_length=2, max_prediction_length=2,\n time_varying_unknown_reals=['value'], time_varying_known_reals=[\n 'real_covariate'], time_varying_known_categoricals=[\n 'categorical_covariate'], static_categoricals=['group'])\n", (22782, 23158), False, 'from pytorch_forecasting import TimeSeriesDataSet\n'), ((40202, 40228), 'pytorch_lightning.Trainer', 'Trainer', ([], {'fast_dev_run': '(True)'}), '(fast_dev_run=True)\n', (40209, 40228), False, 'from pytorch_lightning import Trainer\n'), ((36060, 36104), 'pytorch_forecasting.metrics.NormalDistributionLoss', 'NormalDistributionLoss', ([], {'quantiles': '[0.2, 0.8]'}), '(quantiles=[0.2, 0.8])\n', (36082, 36104), False, 'from pytorch_forecasting.metrics import NormalDistributionLoss\n'), ((1164, 1191), 'torch.nn.Sequential', 'nn.Sequential', (['*module_list'], {}), '(*module_list)\n', (1177, 1191), False, 'from torch import nn\n'), ((8930, 8947), 'pytorch_forecasting.data.encoders.NaNLabelEncoder', 'NaNLabelEncoder', ([], {}), '()\n', (8945, 8947), False, 'from pytorch_forecasting.data.encoders import NaNLabelEncoder\n'), ((9633, 9647), 'pytorch_forecasting.metrics.CrossEntropy', 'CrossEntropy', ([], {}), '()\n', (9645, 9647), False, 'from pytorch_forecasting.metrics import CrossEntropy\n'), ((15499, 15535), 'numpy.cumsum', 'np.cumsum', (['self.hparams.target_sizes'], {}), '(self.hparams.target_sizes)\n', (15508, 15535), True, 'import numpy as np\n'), ((19379, 19646), 'pytorch_forecasting.models.nn.MultiEmbedding', 'MultiEmbedding', ([], {'embedding_sizes': 'self.hparams.embedding_sizes', 'categorical_groups': 'self.hparams.categorical_groups', 'embedding_paddings': 'self.hparams.embedding_paddings', 'x_categoricals': 'self.hparams.x_categoricals', 'max_embedding_size': 'self.hparams.hidden_size'}), '(embedding_sizes=self.hparams.embedding_sizes,\n categorical_groups=self.hparams.categorical_groups, embedding_paddings=\n self.hparams.embedding_paddings, x_categoricals=self.hparams.\n x_categoricals, max_embedding_size=self.hparams.hidden_size)\n', (19393, 19646), False, 'from pytorch_forecasting.models.nn import MultiEmbedding\n'), ((24556, 24699), 'pytorch_forecasting.models.nn.LSTM', 'LSTM', ([], {'hidden_size': 'self.hparams.hidden_size', 'input_size': '(1)', 'num_layers': 'self.hparams.n_layers', 'dropout': 'self.hparams.dropout', 'batch_first': '(True)'}), '(hidden_size=self.hparams.hidden_size, input_size=1, num_layers=self.\n hparams.n_layers, dropout=self.hparams.dropout, batch_first=True)\n', (24560, 24699), False, 'from pytorch_forecasting.models.nn import LSTM\n'), ((24794, 24832), 'torch.nn.Linear', 'nn.Linear', (['self.hparams.hidden_size', '(1)'], {}), '(self.hparams.hidden_size, 1)\n', (24803, 24832), False, 'from torch import nn\n'), ((25294, 25364), 'torch.roll', 'torch.roll', (['input_vector[..., self.target_positions]'], {'shifts': '(1)', 'dims': '(1)'}), '(input_vector[..., self.target_positions], shifts=1, dims=1)\n', (25304, 25364), False, 'import torch\n'), ((26065, 26135), 'torch.roll', 'torch.roll', (['input_vector[..., self.target_positions]'], {'shifts': '(1)', 'dims': '(1)'}), '(input_vector[..., self.target_positions], shifts=1, dims=1)\n', (26075, 26135), False, 'import torch\n'), ((29249, 29254), 'pytorch_forecasting.metrics.MAE', 'MAE', ([], {}), '()\n', (29252, 29254), False, 'from pytorch_forecasting.metrics import MAE\n'), ((30399, 30426), 'torch.nn.Sequential', 'nn.Sequential', (['*module_list'], {}), '(*module_list)\n', (30412, 30426), False, 'from torch import nn\n'), ((30865, 30882), 'torch.rand', 'torch.rand', (['(20)', '(5)'], {}), '(20, 5)\n', (30875, 30882), False, 'import torch\n'), ((32272, 32296), 'pytorch_forecasting.metrics.NormalDistributionLoss', 'NormalDistributionLoss', ([], {}), '()\n', (32294, 32296), False, 'from pytorch_forecasting.metrics import NormalDistributionLoss\n'), ((857, 891), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (866, 891), False, 'from torch import nn\n'), ((893, 902), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (900, 902), False, 'from torch import nn\n'), ((1100, 1135), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (1109, 1135), False, 'from torch import nn\n'), ((8135, 8177), 'numpy.random.choice', 'np.random.choice', (["['A', 'B', 'C']"], {'size': '(30)'}), "(['A', 'B', 'C'], size=30)\n", (8151, 8177), True, 'import numpy as np\n'), ((8240, 8258), 'numpy.random.rand', 'np.random.rand', (['(30)'], {}), '(30)\n', (8254, 8258), True, 'import numpy as np\n'), ((12913, 12931), 'numpy.random.rand', 'np.random.rand', (['(30)'], {}), '(30)\n', (12927, 12931), True, 'import numpy as np\n'), ((12949, 12967), 'numpy.random.rand', 'np.random.rand', (['(30)'], {}), '(30)\n', (12963, 12967), True, 'import numpy as np\n'), ((29951, 29985), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (29960, 29985), False, 'from torch import nn\n'), ((29987, 29996), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (29994, 29996), False, 'from torch import nn\n'), ((30242, 30289), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(output_size * n_outputs)'], {}), '(hidden_size, output_size * n_outputs)\n', (30251, 30289), False, 'from torch import nn\n'), ((37536, 37568), 'torch.arange', 'torch.arange', (['(-encoder_length)', '(0)'], {}), '(-encoder_length, 0)\n', (37548, 37568), False, 'import torch\n'), ((3231, 3249), 'numpy.random.rand', 'np.random.rand', (['(30)'], {}), '(30)\n', (3245, 3249), True, 'import numpy as np\n'), ((3311, 3323), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (3320, 3323), True, 'import numpy as np\n'), ((3355, 3368), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (3364, 3368), True, 'import numpy as np\n'), ((8363, 8375), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (8372, 8375), True, 'import numpy as np\n'), ((8407, 8420), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (8416, 8420), True, 'import numpy as np\n'), ((12993, 13005), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (13002, 13005), True, 'import numpy as np\n'), ((13037, 13050), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (13046, 13050), True, 'import numpy as np\n'), ((13629, 13648), 'pytorch_forecasting.data.encoders.EncoderNormalizer', 'EncoderNormalizer', ([], {}), '()\n', (13646, 13648), False, 'from pytorch_forecasting.data.encoders import EncoderNormalizer, MultiNormalizer, TorchNormalizer\n'), ((13650, 13667), 'pytorch_forecasting.data.encoders.TorchNormalizer', 'TorchNormalizer', ([], {}), '()\n', (13665, 13667), False, 'from pytorch_forecasting.data.encoders import EncoderNormalizer, MultiNormalizer, TorchNormalizer\n'), ((1001, 1036), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (1010, 1036), False, 'from torch import nn\n'), ((1038, 1047), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1045, 1047), False, 'from torch import nn\n'), ((16279, 16302), 'pytorch_forecasting.utils.to_list', 'to_list', (['dataset.target'], {}), '(dataset.target)\n', (16286, 16302), False, 'from pytorch_forecasting.utils import to_list\n'), ((17537, 17542), 'pytorch_forecasting.metrics.MAE', 'MAE', ([], {}), '()\n', (17540, 17542), False, 'from pytorch_forecasting.metrics import MAE\n'), ((17544, 17551), 'pytorch_forecasting.metrics.SMAPE', 'SMAPE', ([], {}), '()\n', (17549, 17551), False, 'from pytorch_forecasting.metrics import MAE, SMAPE, MultiLoss\n'), ((22318, 22336), 'numpy.random.rand', 'np.random.rand', (['(30)'], {}), '(30)\n', (22332, 22336), True, 'import numpy as np\n'), ((22487, 22524), 'numpy.random.choice', 'np.random.choice', (["['a', 'b']"], {'size': '(30)'}), "(['a', 'b'], size=30)\n", (22503, 22524), True, 'import numpy as np\n'), ((22549, 22567), 'numpy.random.rand', 'np.random.rand', (['(30)'], {}), '(30)\n', (22563, 22567), True, 'import numpy as np\n'), ((30095, 30130), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (30104, 30130), False, 'from torch import nn\n'), ((30132, 30141), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (30139, 30141), False, 'from torch import nn\n'), ((14619, 14653), 'pytorch_forecasting.utils.to_list', 'to_list', (['self.hparams.target_sizes'], {}), '(self.hparams.target_sizes)\n', (14626, 14653), False, 'from pytorch_forecasting.utils import to_list\n'), ((14711, 14745), 'pytorch_forecasting.utils.to_list', 'to_list', (['self.hparams.target_sizes'], {}), '(self.hparams.target_sizes)\n', (14718, 14745), False, 'from pytorch_forecasting.utils import to_list\n'), ((22362, 22374), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (22371, 22374), True, 'import numpy as np\n'), ((22406, 22419), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (22415, 22419), True, 'import numpy as np\n')] |
# encoding: utf-8
"""
Training implementation
Author: <NAME>
Update time: 08/11/2020
"""
import re
import sys
import os
import cv2
import time
import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.optim import lr_scheduler
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from read_data import ChestXrayDataSet
from sklearn.metrics import roc_auc_score
from skimage.measure import label
from model import Densenet121_AG, Fusion_Branch
from PIL import Image
#np.set_printoptions(threshold = np.nan)
CKPT_PATH = ''
CKPT_PATH_G = '/best_model/AG_CNN_Global_epoch_1.pkl'
CKPT_PATH_L = '/best_model/AG_CNN_Local_epoch_2.pkl'
CKPT_PATH_F = '/best_model/AG_CNN_Fusion_epoch_23.pkl'
N_CLASSES = 14
CLASS_NAMES = [ 'Atelectasis', 'Cardiomegaly', 'Effusion', 'Infiltration', 'Mass', 'Nodule', 'Pneumonia',
'Pneumothorax', 'Consolidation', 'Edema', 'Emphysema', 'Fibrosis', 'Pleural_Thickening', 'Hernia']
# load with your own dataset path
DATA_DIR = '/media/xxxx/data/xxxx/images'
TRAIN_IMAGE_LIST = '/labels/train_list.txt'
VAL_IMAGE_LIST = '/labels/val_list.txt'
save_model_path = '/model-AG-CNN/'
save_model_name = 'AG_CNN'
# learning rate
LR_G = 1e-8
LR_L = 1e-8
LR_F = 1e-3
num_epochs = 50
BATCH_SIZE = 32
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
preprocess = transforms.Compose([
transforms.Resize((256,256)),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
def Attention_gen_patchs(ori_image, fm_cuda):
# feature map -> feature mask (using feature map to crop on the original image) -> crop -> patchs
feature_conv = fm_cuda.data.cpu().numpy()
size_upsample = (224, 224)
bz, nc, h, w = feature_conv.shape
patchs_cuda = torch.FloatTensor().cuda()
for i in range(0, bz):
feature = feature_conv[i]
cam = feature.reshape((nc, h*w))
cam = cam.sum(axis=0)
cam = cam.reshape(h,w)
cam = cam - np.min(cam)
cam_img = cam / np.max(cam)
cam_img = np.uint8(255 * cam_img)
heatmap_bin = binImage(cv2.resize(cam_img, size_upsample))
heatmap_maxconn = selectMaxConnect(heatmap_bin)
heatmap_mask = heatmap_bin * heatmap_maxconn
ind = np.argwhere(heatmap_mask != 0)
minh = min(ind[:,0])
minw = min(ind[:,1])
maxh = max(ind[:,0])
maxw = max(ind[:,1])
# to ori image
image = ori_image[i].numpy().reshape(224,224,3)
image = image[int(224*0.334):int(224*0.667),int(224*0.334):int(224*0.667),:]
image = cv2.resize(image, size_upsample)
image_crop = image[minh:maxh,minw:maxw,:] * 256 # because image was normalized before
image_crop = preprocess(Image.fromarray(image_crop.astype('uint8')).convert('RGB'))
img_variable = torch.autograd.Variable(image_crop.reshape(3,224,224).unsqueeze(0).cuda())
patchs_cuda = torch.cat((patchs_cuda,img_variable),0)
return patchs_cuda
def binImage(heatmap):
_, heatmap_bin = cv2.threshold(heatmap , 0 , 255 , cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# t in the paper
#_, heatmap_bin = cv2.threshold(heatmap , 178 , 255 , cv2.THRESH_BINARY)
return heatmap_bin
def selectMaxConnect(heatmap):
labeled_img, num = label(heatmap, connectivity=2, background=0, return_num=True)
max_label = 0
max_num = 0
for i in range(1, num+1):
if np.sum(labeled_img == i) > max_num:
max_num = np.sum(labeled_img == i)
max_label = i
lcc = (labeled_img == max_label)
if max_num == 0:
lcc = (labeled_img == -1)
lcc = lcc + 0
return lcc
def main():
print('********************load data********************')
normalize = transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
train_dataset = ChestXrayDataSet(data_dir=DATA_DIR,
image_list_file=TRAIN_IMAGE_LIST,
transform=transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE,
shuffle=True, num_workers=4, pin_memory=True)
test_dataset = ChestXrayDataSet(data_dir=DATA_DIR,
image_list_file=VAL_IMAGE_LIST,
transform=transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
test_loader = DataLoader(dataset=test_dataset, batch_size=128,
shuffle=False, num_workers=4, pin_memory=True)
print('********************load data succeed!********************')
print('********************load model********************')
# initialize and load the model
Global_Branch_model = Densenet121_AG(pretrained = False, num_classes = N_CLASSES).cuda()
Local_Branch_model = Densenet121_AG(pretrained = False, num_classes = N_CLASSES).cuda()
Fusion_Branch_model = Fusion_Branch(input_size = 2048, output_size = N_CLASSES).cuda()
if os.path.isfile(CKPT_PATH):
print("=> loading checkpoint")
checkpoint = torch.load(CKPT_PATH)
# to load state
# Code modified from torchvision densenet source for loading from pre .4 densenet weights.
state_dict = checkpoint['state_dict']
remove_data_parallel = True # Change if you don't want to use nn.DataParallel(model)
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
for key in list(state_dict.keys()):
ori_key = key
key = key.replace('densenet121.','')
#print('key',key)
match = pattern.match(key)
new_key = match.group(1) + match.group(2) if match else key
new_key = new_key[7:] if remove_data_parallel else new_key
#print('new_key',new_key)
if '.0.' in new_key:
new_key = new_key.replace('0.','')
state_dict[new_key] = state_dict[ori_key]
# Delete old key only if modified.
if match or remove_data_parallel:
del state_dict[ori_key]
Global_Branch_model.load_state_dict(state_dict)
Local_Branch_model.load_state_dict(state_dict)
print("=> loaded baseline checkpoint")
else:
print("=> no checkpoint found")
if os.path.isfile(CKPT_PATH_G):
checkpoint = torch.load(CKPT_PATH_G)
Global_Branch_model.load_state_dict(checkpoint)
print("=> loaded Global_Branch_model checkpoint")
if os.path.isfile(CKPT_PATH_L):
checkpoint = torch.load(CKPT_PATH_L)
Local_Branch_model.load_state_dict(checkpoint)
print("=> loaded Local_Branch_model checkpoint")
if os.path.isfile(CKPT_PATH_F):
checkpoint = torch.load(CKPT_PATH_F)
Fusion_Branch_model.load_state_dict(checkpoint)
print("=> loaded Fusion_Branch_model checkpoint")
cudnn.benchmark = True
criterion = nn.BCELoss()
optimizer_global = optim.Adam(Global_Branch_model.parameters(), lr=LR_G, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5)
lr_scheduler_global = lr_scheduler.StepLR(optimizer_global , step_size = 10, gamma = 1)
optimizer_local = optim.Adam(Local_Branch_model.parameters(), lr=LR_L, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5)
lr_scheduler_local = lr_scheduler.StepLR(optimizer_local , step_size = 10, gamma = 1)
optimizer_fusion = optim.Adam(Fusion_Branch_model.parameters(), lr=LR_F, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5)
lr_scheduler_fusion = lr_scheduler.StepLR(optimizer_fusion , step_size = 15, gamma = 0.1)
print('********************load model succeed!********************')
print('********************begin training!********************')
for epoch in range(num_epochs):
since = time.time()
print('Epoch {}/{}'.format(epoch , num_epochs - 1))
print('-' * 10)
#set the mode of model
lr_scheduler_global.step() #about lr and gamma
lr_scheduler_local.step()
lr_scheduler_fusion.step()
Global_Branch_model.train() #set model to training mode
Local_Branch_model.train()
Fusion_Branch_model.train()
running_loss = 0.0
#Iterate over data
for i, (input, target) in enumerate(train_loader):
input_var = torch.autograd.Variable(input.cuda())
target_var = torch.autograd.Variable(target.cuda())
optimizer_global.zero_grad()
optimizer_local.zero_grad()
optimizer_fusion.zero_grad()
# compute output
output_global, fm_global, pool_global = Global_Branch_model(input_var)
patchs_var = Attention_gen_patchs(input,fm_global)
output_local, _, pool_local = Local_Branch_model(patchs_var)
#print(fusion_var.shape)
output_fusion = Fusion_Branch_model(pool_global, pool_local)
#
# loss
loss1 = criterion(output_global, target_var)
loss2 = criterion(output_local, target_var)
loss3 = criterion(output_fusion, target_var)
#
loss = loss1*0.8 + loss2*0.1 + loss3*0.1
if (i%500) == 0:
print('step: {} totalloss: {loss:.3f} loss1: {loss1:.3f} loss2: {loss2:.3f} loss3: {loss3:.3f}'.format(i, loss = loss, loss1 = loss1, loss2 = loss2, loss3 = loss3))
loss.backward()
optimizer_global.step()
optimizer_local.step()
optimizer_fusion.step()
#print(loss.data.item())
running_loss += loss.data.item()
#break
'''
if i == 40:
print('break')
break
'''
epoch_loss = float(running_loss) / float(i)
print(' Epoch over Loss: {:.5f}'.format(epoch_loss))
print('*******testing!*********')
test(Global_Branch_model, Local_Branch_model, Fusion_Branch_model,test_loader)
#break
#save
if epoch % 1 == 0:
save_path = save_model_path
torch.save(Global_Branch_model.state_dict(), save_path+save_model_name+'_Global'+'_epoch_'+str(epoch)+'.pkl')
print('Global_Branch_model already save!')
torch.save(Local_Branch_model.state_dict(), save_path+save_model_name+'_Local'+'_epoch_'+str(epoch)+'.pkl')
print('Local_Branch_model already save!')
torch.save(Fusion_Branch_model.state_dict(), save_path+save_model_name+'_Fusion'+'_epoch_'+str(epoch)+'.pkl')
print('Fusion_Branch_model already save!')
time_elapsed = time.time() - since
print('Training one epoch complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60 , time_elapsed % 60))
def test(model_global, model_local, model_fusion, test_loader):
# initialize the ground truth and output tensor
gt = torch.FloatTensor().cuda()
pred_global = torch.FloatTensor().cuda()
pred_local = torch.FloatTensor().cuda()
pred_fusion = torch.FloatTensor().cuda()
# switch to evaluate mode
model_global.eval()
model_local.eval()
model_fusion.eval()
cudnn.benchmark = True
for i, (inp, target) in enumerate(test_loader):
with torch.no_grad():
if i % 2000 == 0:
print('testing process:',i)
target = target.cuda()
gt = torch.cat((gt, target), 0)
input_var = torch.autograd.Variable(inp.cuda())
#output = model_global(input_var)
output_global, fm_global, pool_global = model_global(input_var)
patchs_var = Attention_gen_patchs(inp,fm_global)
output_local, _, pool_local = model_local(patchs_var)
output_fusion = model_fusion(pool_global,pool_local)
pred_global = torch.cat((pred_global, output_global.data), 0)
pred_local = torch.cat((pred_local, output_local.data), 0)
pred_fusion = torch.cat((pred_fusion, output_fusion.data), 0)
AUROCs_g = compute_AUCs(gt, pred_global)
AUROC_avg = np.array(AUROCs_g).mean()
print('Global branch: The average AUROC is {AUROC_avg:.3f}'.format(AUROC_avg=AUROC_avg))
for i in range(N_CLASSES):
print('The AUROC of {} is {}'.format(CLASS_NAMES[i], AUROCs_g[i]))
AUROCs_l = compute_AUCs(gt, pred_local)
AUROC_avg = np.array(AUROCs_l).mean()
print('\n')
print('Local branch: The average AUROC is {AUROC_avg:.3f}'.format(AUROC_avg=AUROC_avg))
for i in range(N_CLASSES):
print('The AUROC of {} is {}'.format(CLASS_NAMES[i], AUROCs_l[i]))
AUROCs_f = compute_AUCs(gt, pred_fusion)
AUROC_avg = np.array(AUROCs_f).mean()
print('\n')
print('Fusion branch: The average AUROC is {AUROC_avg:.3f}'.format(AUROC_avg=AUROC_avg))
for i in range(N_CLASSES):
print('The AUROC of {} is {}'.format(CLASS_NAMES[i], AUROCs_f[i]))
def compute_AUCs(gt, pred):
"""Computes Area Under the Curve (AUC) from prediction scores.
Args:
gt: Pytorch tensor on GPU, shape = [n_samples, n_classes]
true binary labels.
pred: Pytorch tensor on GPU, shape = [n_samples, n_classes]
can either be probability estimates of the positive class,
confidence values, or binary decisions.
Returns:
List of AUROCs of all classes.
"""
AUROCs = []
gt_np = gt.cpu().numpy()
pred_np = pred.cpu().numpy()
for i in range(N_CLASSES):
AUROCs.append(roc_auc_score(gt_np[:, i], pred_np[:, i]))
return AUROCs
if __name__ == '__main__':
main()
| [
"numpy.uint8",
"re.compile",
"sklearn.metrics.roc_auc_score",
"numpy.array",
"cv2.threshold",
"model.Fusion_Branch",
"numpy.max",
"numpy.min",
"torchvision.transforms.ToTensor",
"os.path.isfile",
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize",
"cv2.resize",
"time.time"... | [((1369, 1444), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (1389, 1444), True, 'import torchvision.transforms as transforms\n'), ((3162, 3229), 'cv2.threshold', 'cv2.threshold', (['heatmap', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(heatmap, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (3175, 3229), False, 'import cv2\n'), ((3408, 3469), 'skimage.measure.label', 'label', (['heatmap'], {'connectivity': '(2)', 'background': '(0)', 'return_num': '(True)'}), '(heatmap, connectivity=2, background=0, return_num=True)\n', (3413, 3469), False, 'from skimage.measure import label\n'), ((3876, 3942), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (3896, 3942), True, 'import torchvision.transforms as transforms\n'), ((4479, 4585), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'BATCH_SIZE', 'shuffle': '(True)', 'num_workers': '(4)', 'pin_memory': '(True)'}), '(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True,\n num_workers=4, pin_memory=True)\n', (4489, 4585), False, 'from torch.utils.data import DataLoader\n'), ((5110, 5210), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_dataset', 'batch_size': '(128)', 'shuffle': '(False)', 'num_workers': '(4)', 'pin_memory': '(True)'}), '(dataset=test_dataset, batch_size=128, shuffle=False, num_workers\n =4, pin_memory=True)\n', (5120, 5210), False, 'from torch.utils.data import DataLoader\n'), ((5693, 5718), 'os.path.isfile', 'os.path.isfile', (['CKPT_PATH'], {}), '(CKPT_PATH)\n', (5707, 5718), False, 'import os\n'), ((7081, 7108), 'os.path.isfile', 'os.path.isfile', (['CKPT_PATH_G'], {}), '(CKPT_PATH_G)\n', (7095, 7108), False, 'import os\n'), ((7277, 7304), 'os.path.isfile', 'os.path.isfile', (['CKPT_PATH_L'], {}), '(CKPT_PATH_L)\n', (7291, 7304), False, 'import os\n'), ((7471, 7498), 'os.path.isfile', 'os.path.isfile', (['CKPT_PATH_F'], {}), '(CKPT_PATH_F)\n', (7485, 7498), False, 'import os\n'), ((7703, 7715), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (7713, 7715), True, 'import torch.nn as nn\n'), ((7869, 7929), 'torch.optim.lr_scheduler.StepLR', 'lr_scheduler.StepLR', (['optimizer_global'], {'step_size': '(10)', 'gamma': '(1)'}), '(optimizer_global, step_size=10, gamma=1)\n', (7888, 7929), False, 'from torch.optim import lr_scheduler\n'), ((8090, 8149), 'torch.optim.lr_scheduler.StepLR', 'lr_scheduler.StepLR', (['optimizer_local'], {'step_size': '(10)', 'gamma': '(1)'}), '(optimizer_local, step_size=10, gamma=1)\n', (8109, 8149), False, 'from torch.optim import lr_scheduler\n'), ((8313, 8375), 'torch.optim.lr_scheduler.StepLR', 'lr_scheduler.StepLR', (['optimizer_fusion'], {'step_size': '(15)', 'gamma': '(0.1)'}), '(optimizer_fusion, step_size=15, gamma=0.1)\n', (8332, 8375), False, 'from torch.optim import lr_scheduler\n'), ((1490, 1519), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256, 256)'], {}), '((256, 256))\n', (1507, 1519), True, 'import torchvision.transforms as transforms\n'), ((1523, 1549), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (1544, 1549), True, 'import torchvision.transforms as transforms\n'), ((1554, 1575), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1573, 1575), True, 'import torchvision.transforms as transforms\n'), ((2156, 2179), 'numpy.uint8', 'np.uint8', (['(255 * cam_img)'], {}), '(255 * cam_img)\n', (2164, 2179), True, 'import numpy as np\n'), ((2372, 2402), 'numpy.argwhere', 'np.argwhere', (['(heatmap_mask != 0)'], {}), '(heatmap_mask != 0)\n', (2383, 2402), True, 'import numpy as np\n'), ((2710, 2742), 'cv2.resize', 'cv2.resize', (['image', 'size_upsample'], {}), '(image, size_upsample)\n', (2720, 2742), False, 'import cv2\n'), ((3052, 3093), 'torch.cat', 'torch.cat', (['(patchs_cuda, img_variable)', '(0)'], {}), '((patchs_cuda, img_variable), 0)\n', (3061, 3093), False, 'import torch\n'), ((5780, 5801), 'torch.load', 'torch.load', (['CKPT_PATH'], {}), '(CKPT_PATH)\n', (5790, 5801), False, 'import torch\n'), ((6083, 6204), 're.compile', 're.compile', (['"""^(.*denselayer\\\\d+\\\\.(?:norm|relu|conv))\\\\.((?:[12])\\\\.(?:weight|bias|running_mean|running_var))$"""'], {}), "(\n '^(.*denselayer\\\\d+\\\\.(?:norm|relu|conv))\\\\.((?:[12])\\\\.(?:weight|bias|running_mean|running_var))$'\n )\n", (6093, 6204), False, 'import re\n'), ((7131, 7154), 'torch.load', 'torch.load', (['CKPT_PATH_G'], {}), '(CKPT_PATH_G)\n', (7141, 7154), False, 'import torch\n'), ((7327, 7350), 'torch.load', 'torch.load', (['CKPT_PATH_L'], {}), '(CKPT_PATH_L)\n', (7337, 7350), False, 'import torch\n'), ((7521, 7544), 'torch.load', 'torch.load', (['CKPT_PATH_F'], {}), '(CKPT_PATH_F)\n', (7531, 7544), False, 'import torch\n'), ((8576, 8587), 'time.time', 'time.time', ([], {}), '()\n', (8585, 8587), False, 'import time\n'), ((1879, 1898), 'torch.FloatTensor', 'torch.FloatTensor', ([], {}), '()\n', (1896, 1898), False, 'import torch\n'), ((2090, 2101), 'numpy.min', 'np.min', (['cam'], {}), '(cam)\n', (2096, 2101), True, 'import numpy as np\n'), ((2126, 2137), 'numpy.max', 'np.max', (['cam'], {}), '(cam)\n', (2132, 2137), True, 'import numpy as np\n'), ((2212, 2246), 'cv2.resize', 'cv2.resize', (['cam_img', 'size_upsample'], {}), '(cam_img, size_upsample)\n', (2222, 2246), False, 'import cv2\n'), ((3549, 3573), 'numpy.sum', 'np.sum', (['(labeled_img == i)'], {}), '(labeled_img == i)\n', (3555, 3573), True, 'import numpy as np\n'), ((3607, 3631), 'numpy.sum', 'np.sum', (['(labeled_img == i)'], {}), '(labeled_img == i)\n', (3613, 3631), True, 'import numpy as np\n'), ((5435, 5490), 'model.Densenet121_AG', 'Densenet121_AG', ([], {'pretrained': '(False)', 'num_classes': 'N_CLASSES'}), '(pretrained=False, num_classes=N_CLASSES)\n', (5449, 5490), False, 'from model import Densenet121_AG, Fusion_Branch\n'), ((5527, 5582), 'model.Densenet121_AG', 'Densenet121_AG', ([], {'pretrained': '(False)', 'num_classes': 'N_CLASSES'}), '(pretrained=False, num_classes=N_CLASSES)\n', (5541, 5582), False, 'from model import Densenet121_AG, Fusion_Branch\n'), ((5620, 5673), 'model.Fusion_Branch', 'Fusion_Branch', ([], {'input_size': '(2048)', 'output_size': 'N_CLASSES'}), '(input_size=2048, output_size=N_CLASSES)\n', (5633, 5673), False, 'from model import Densenet121_AG, Fusion_Branch\n'), ((11442, 11453), 'time.time', 'time.time', ([], {}), '()\n', (11451, 11453), False, 'import time\n'), ((11705, 11724), 'torch.FloatTensor', 'torch.FloatTensor', ([], {}), '()\n', (11722, 11724), False, 'import torch\n'), ((11750, 11769), 'torch.FloatTensor', 'torch.FloatTensor', ([], {}), '()\n', (11767, 11769), False, 'import torch\n'), ((11794, 11813), 'torch.FloatTensor', 'torch.FloatTensor', ([], {}), '()\n', (11811, 11813), False, 'import torch\n'), ((11839, 11858), 'torch.FloatTensor', 'torch.FloatTensor', ([], {}), '()\n', (11856, 11858), False, 'import torch\n'), ((12061, 12076), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12074, 12076), False, 'import torch\n'), ((12204, 12230), 'torch.cat', 'torch.cat', (['(gt, target)', '(0)'], {}), '((gt, target), 0)\n', (12213, 12230), False, 'import torch\n'), ((12648, 12695), 'torch.cat', 'torch.cat', (['(pred_global, output_global.data)', '(0)'], {}), '((pred_global, output_global.data), 0)\n', (12657, 12695), False, 'import torch\n'), ((12721, 12766), 'torch.cat', 'torch.cat', (['(pred_local, output_local.data)', '(0)'], {}), '((pred_local, output_local.data), 0)\n', (12730, 12766), False, 'import torch\n'), ((12793, 12840), 'torch.cat', 'torch.cat', (['(pred_fusion, output_fusion.data)', '(0)'], {}), '((pred_fusion, output_fusion.data), 0)\n', (12802, 12840), False, 'import torch\n'), ((12915, 12933), 'numpy.array', 'np.array', (['AUROCs_g'], {}), '(AUROCs_g)\n', (12923, 12933), True, 'import numpy as np\n'), ((13201, 13219), 'numpy.array', 'np.array', (['AUROCs_l'], {}), '(AUROCs_l)\n', (13209, 13219), True, 'import numpy as np\n'), ((13503, 13521), 'numpy.array', 'np.array', (['AUROCs_f'], {}), '(AUROCs_f)\n', (13511, 13521), True, 'import numpy as np\n'), ((14327, 14368), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['gt_np[:, i]', 'pred_np[:, i]'], {}), '(gt_np[:, i], pred_np[:, i])\n', (14340, 14368), False, 'from sklearn.metrics import roc_auc_score\n'), ((4214, 4236), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224)'], {}), '(224)\n', (4231, 4236), True, 'import torchvision.transforms as transforms\n'), ((4278, 4304), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (4299, 4304), True, 'import torchvision.transforms as transforms\n'), ((4346, 4367), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4365, 4367), True, 'import torchvision.transforms as transforms\n'), ((4846, 4868), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (4863, 4868), True, 'import torchvision.transforms as transforms\n'), ((4910, 4936), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (4931, 4936), True, 'import torchvision.transforms as transforms\n'), ((4978, 4999), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4997, 4999), True, 'import torchvision.transforms as transforms\n')] |
import typing
import matplotlib.pyplot as plt
import numpy as np
from . import plot_ticks
plot_specs = {
'PlotData': {
'common': {
'merge': {'key_name': 'key_content'},
# ... PlotDatum keys and values
},
'plots': {'PlotID': 'PlotDatum'},
'subplot_height': 'Number',
'subplots': {
'n_columns': 'Integer',
},
'figure': 'Map',
},
'PlotDatum': {
'name': 'Text',
'name_position': ['ylabel', 'title', None],
'x': 'Series',
'y': 'Series',
'y_kwargs': 'Map',
'ys': [{'y': 'Series', 'y_kwargs': 'Map'}],
'stacks': ['Series'],
'stacks_kwargs': 'Map',
'hist': 'Series',
'hist_kwargs': 'Map',
'tickgrid': 'Boolean',
'xtick_format': typing.Mapping,
'ytick_format': typing.Mapping,
'xlabel': 'Text',
'ylabel': 'Text',
'xlim': ['Number', 'Number'],
'ylim': ['Number', 'Number'],
'title': 'Text',
'legend_kwargs': 'Map',
}
}
def plot(plot_datum):
legend = False
# extract args
x = plot_datum.get('x')
if plot_datum.get('y') is not None:
y = plot_datum['y']
y_kwargs = plot_datum.get('y_kwargs', {})
# plot points
if x is not None:
plt.plot(x, y, **y_kwargs)
else:
plt.plot(y, **y_kwargs)
if plot_datum.get('ys'):
for subplot in plot_datum['ys']:
sub_x = subplot.get('x')
if sub_x is None:
sub_x = x
y = subplot.get('y')
y_kwargs = subplot.get('y_kwargs', {})
# plot points
if sub_x is not None:
plt.plot(sub_x, y, **y_kwargs)
else:
plt.plot(y, **y_kwargs)
if y_kwargs.get('label') is not None:
legend = True
if plot_datum.get('stacks') is not None:
stacks_kwargs = plot_datum.get('stacks_kwargs', {})
plt.stackplot(x, *plot_datum['stacks'], **stacks_kwargs)
if stacks_kwargs.get('labels') is not None:
legend = True
if plot_datum.get('hist') is not None:
hist_kwargs = plot_datum.get('hist_kwargs', {})
plt.hist(plot_datum['hist'], **hist_kwargs)
if hist_kwargs.get('label') is not None:
legend = True
# lines
for hline in plot_datum.get('hlines', []):
plt.axhline(**hline)
for vline in plot_datum.get('vlines', []):
plt.axvline(**hline)
name = plot_datum.get('name')
name_position = plot_datum.get('name_position')
if name is not None and name_position is not None:
if name_position == 'ylabel':
plt.ylabel(name)
plt.gca().yaxis.tick_right()
elif name_position == 'title':
plt.title(name)
else:
raise Exception('unknown position for name: ' + str(name_position))
if plot_datum.get('xtick_format') is not None:
plot_ticks.format_xticks(**plot_datum.get('xtick_format'))
if plot_datum.get('ytick_format') is not None:
plot_ticks.format_yticks(**plot_datum.get('ytick_format'))
title = plot_datum.get('title')
if title is not None:
plt.title(title)
# TODO: add capability for legend outside of plot:
# https://www.statology.org/matplotlib-legend-outside-plot/
if legend or plot_datum.get('legend_kwargs') is not None:
legend_kwargs = plot_datum.get('legend_kwargs', {})
plt.legend(**legend_kwargs)
if plot_datum.get('xlim') is not None:
plt.xlim(plot_datum['xlim'])
if plot_datum.get('ylim') is not None:
plt.ylim(plot_datum['ylim'])
if plot_datum.get('tickgrid'):
plot_ticks.add_tick_grid()
if plot_datum.get('xlabel') is not None:
plt.xlabel(plot_datum['xlabel'])
if plot_datum.get('ylabel') is not None:
plt.ylabel(plot_datum['ylabel'])
def plot_subplots(plot_data):
common = plot_data.get('common', {})
merge = common.get('merge')
n_subplots = len(plot_data['plots'])
if n_subplots == 0:
print('[no plots specified, skipping plotting]')
n_columns = plot_data.get('subplots', {}).get('n_columns', 1)
n_rows = int(np.ceil(n_subplots / n_columns))
figure = plot_data.get('figure', {})
subplot_height = plot_data.get('subplot_height', 3)
figure.setdefault('figsize', [10, subplot_height * n_rows])
plt.figure(**figure)
for sp, (plot_id, plot_datum) in enumerate(plot_data['plots'].items()):
plot_datum = dict(plot_datum)
for key, value in common.items():
if key == 'merge':
for subkey, subvalue in merge.items():
plot_datum.setdefault(subkey, {})
plot_datum[subkey] = dict(subvalue, **plot_datum[subkey])
elif key not in plot_datum:
plot_datum[key] = value
# create plot
plt.subplot(n_rows, n_columns, sp + 1)
if sp == 0 and plot_data.get('title') is not None:
plt.title(plot_data['title'])
plot(plot_datum=plot_datum)
| [
"numpy.ceil",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplot... | [((4492, 4512), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '(**figure)\n', (4502, 4512), True, 'import matplotlib.pyplot as plt\n'), ((2031, 2087), 'matplotlib.pyplot.stackplot', 'plt.stackplot', (['x', "*plot_datum['stacks']"], {}), "(x, *plot_datum['stacks'], **stacks_kwargs)\n", (2044, 2087), True, 'import matplotlib.pyplot as plt\n'), ((2274, 2317), 'matplotlib.pyplot.hist', 'plt.hist', (["plot_datum['hist']"], {}), "(plot_datum['hist'], **hist_kwargs)\n", (2282, 2317), True, 'import matplotlib.pyplot as plt\n'), ((2461, 2481), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {}), '(**hline)\n', (2472, 2481), True, 'import matplotlib.pyplot as plt\n'), ((2537, 2557), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {}), '(**hline)\n', (2548, 2557), True, 'import matplotlib.pyplot as plt\n'), ((3277, 3293), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (3286, 3293), True, 'import matplotlib.pyplot as plt\n'), ((3548, 3575), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '(**legend_kwargs)\n', (3558, 3575), True, 'import matplotlib.pyplot as plt\n'), ((3628, 3656), 'matplotlib.pyplot.xlim', 'plt.xlim', (["plot_datum['xlim']"], {}), "(plot_datum['xlim'])\n", (3636, 3656), True, 'import matplotlib.pyplot as plt\n'), ((3708, 3736), 'matplotlib.pyplot.ylim', 'plt.ylim', (["plot_datum['ylim']"], {}), "(plot_datum['ylim'])\n", (3716, 3736), True, 'import matplotlib.pyplot as plt\n'), ((3861, 3893), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["plot_datum['xlabel']"], {}), "(plot_datum['xlabel'])\n", (3871, 3893), True, 'import matplotlib.pyplot as plt\n'), ((3947, 3979), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["plot_datum['ylabel']"], {}), "(plot_datum['ylabel'])\n", (3957, 3979), True, 'import matplotlib.pyplot as plt\n'), ((4293, 4324), 'numpy.ceil', 'np.ceil', (['(n_subplots / n_columns)'], {}), '(n_subplots / n_columns)\n', (4300, 4324), True, 'import numpy as np\n'), ((4999, 5037), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_rows', 'n_columns', '(sp + 1)'], {}), '(n_rows, n_columns, sp + 1)\n', (5010, 5037), True, 'import matplotlib.pyplot as plt\n'), ((1345, 1371), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y, **y_kwargs)\n', (1353, 1371), True, 'import matplotlib.pyplot as plt\n'), ((1398, 1421), 'matplotlib.pyplot.plot', 'plt.plot', (['y'], {}), '(y, **y_kwargs)\n', (1406, 1421), True, 'import matplotlib.pyplot as plt\n'), ((2750, 2766), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['name'], {}), '(name)\n', (2760, 2766), True, 'import matplotlib.pyplot as plt\n'), ((5109, 5138), 'matplotlib.pyplot.title', 'plt.title', (["plot_data['title']"], {}), "(plot_data['title'])\n", (5118, 5138), True, 'import matplotlib.pyplot as plt\n'), ((1747, 1777), 'matplotlib.pyplot.plot', 'plt.plot', (['sub_x', 'y'], {}), '(sub_x, y, **y_kwargs)\n', (1755, 1777), True, 'import matplotlib.pyplot as plt\n'), ((1812, 1835), 'matplotlib.pyplot.plot', 'plt.plot', (['y'], {}), '(y, **y_kwargs)\n', (1820, 1835), True, 'import matplotlib.pyplot as plt\n'), ((2859, 2874), 'matplotlib.pyplot.title', 'plt.title', (['name'], {}), '(name)\n', (2868, 2874), True, 'import matplotlib.pyplot as plt\n'), ((2779, 2788), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2786, 2788), True, 'import matplotlib.pyplot as plt\n')] |
"""
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(
cov=2.0, n_samples=200, n_features=2, n_classes=2, random_state=1
)
X2, y2 = make_gaussian_quantiles(
mean=(3, 3), cov=1.5, n_samples=300, n_features=2, n_classes=2, random_state=1
)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, -y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=1), algorithm="SAMME", n_estimators=200
)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(
np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step)
)
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(
X[idx, 0],
X[idx, 1],
c=c,
cmap=plt.cm.Paired,
s=20,
edgecolor="k",
label="Class %s" % n,
)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc="upper right")
plt.xlabel("x")
plt.ylabel("y")
plt.title("Decision Boundary")
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(
twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label="Class %s" % n,
alpha=0.5,
edgecolor="k",
)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc="upper right")
plt.ylabel("Samples")
plt.xlabel("Score")
plt.title("Decision Scores")
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| [
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"matplotlib.pyplot.contourf",
"numpy.where",
"matplotlib.pyplot.xlabel",
"sklearn.tree.DecisionTreeClassifier",
"numpy.concatenate",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.ylim",
"matplotli... | [((1171, 1265), 'sklearn.datasets.make_gaussian_quantiles', 'make_gaussian_quantiles', ([], {'cov': '(2.0)', 'n_samples': '(200)', 'n_features': '(2)', 'n_classes': '(2)', 'random_state': '(1)'}), '(cov=2.0, n_samples=200, n_features=2, n_classes=2,\n random_state=1)\n', (1194, 1265), False, 'from sklearn.datasets import make_gaussian_quantiles\n'), ((1277, 1384), 'sklearn.datasets.make_gaussian_quantiles', 'make_gaussian_quantiles', ([], {'mean': '(3, 3)', 'cov': '(1.5)', 'n_samples': '(300)', 'n_features': '(2)', 'n_classes': '(2)', 'random_state': '(1)'}), '(mean=(3, 3), cov=1.5, n_samples=300, n_features=2,\n n_classes=2, random_state=1)\n', (1300, 1384), False, 'from sklearn.datasets import make_gaussian_quantiles\n'), ((1391, 1415), 'numpy.concatenate', 'np.concatenate', (['(X1, X2)'], {}), '((X1, X2))\n', (1405, 1415), True, 'import numpy as np\n'), ((1420, 1449), 'numpy.concatenate', 'np.concatenate', (['(y1, -y2 + 1)'], {}), '((y1, -y2 + 1))\n', (1434, 1449), True, 'import numpy as np\n'), ((1673, 1700), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (1683, 1700), True, 'import matplotlib.pyplot as plt\n'), ((1733, 1749), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (1744, 1749), True, 'import matplotlib.pyplot as plt\n'), ((2030, 2073), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xx', 'yy', 'Z'], {'cmap': 'plt.cm.Paired'}), '(xx, yy, Z, cmap=plt.cm.Paired)\n', (2042, 2073), True, 'import matplotlib.pyplot as plt\n'), ((2074, 2091), 'matplotlib.pyplot.axis', 'plt.axis', (['"""tight"""'], {}), "('tight')\n", (2082, 2091), True, 'import matplotlib.pyplot as plt\n'), ((2372, 2394), 'matplotlib.pyplot.xlim', 'plt.xlim', (['x_min', 'x_max'], {}), '(x_min, x_max)\n', (2380, 2394), True, 'import matplotlib.pyplot as plt\n'), ((2395, 2417), 'matplotlib.pyplot.ylim', 'plt.ylim', (['y_min', 'y_max'], {}), '(y_min, y_max)\n', (2403, 2417), True, 'import matplotlib.pyplot as plt\n'), ((2418, 2447), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (2428, 2447), True, 'import matplotlib.pyplot as plt\n'), ((2448, 2463), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (2458, 2463), True, 'import matplotlib.pyplot as plt\n'), ((2464, 2479), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (2474, 2479), True, 'import matplotlib.pyplot as plt\n'), ((2480, 2510), 'matplotlib.pyplot.title', 'plt.title', (['"""Decision Boundary"""'], {}), "('Decision Boundary')\n", (2489, 2510), True, 'import matplotlib.pyplot as plt\n'), ((2652, 2668), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (2663, 2668), True, 'import matplotlib.pyplot as plt\n'), ((2931, 2941), 'matplotlib.pyplot.axis', 'plt.axis', ([], {}), '()\n', (2939, 2941), True, 'import matplotlib.pyplot as plt\n'), ((2942, 2974), 'matplotlib.pyplot.axis', 'plt.axis', (['(x1, x2, y1, y2 * 1.2)'], {}), '((x1, x2, y1, y2 * 1.2))\n', (2950, 2974), True, 'import matplotlib.pyplot as plt\n'), ((2975, 3004), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (2985, 3004), True, 'import matplotlib.pyplot as plt\n'), ((3005, 3026), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Samples"""'], {}), "('Samples')\n", (3015, 3026), True, 'import matplotlib.pyplot as plt\n'), ((3027, 3046), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Score"""'], {}), "('Score')\n", (3037, 3046), True, 'import matplotlib.pyplot as plt\n'), ((3047, 3075), 'matplotlib.pyplot.title', 'plt.title', (['"""Decision Scores"""'], {}), "('Decision Scores')\n", (3056, 3075), True, 'import matplotlib.pyplot as plt\n'), ((3077, 3095), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3093, 3095), True, 'import matplotlib.pyplot as plt\n'), ((3096, 3128), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.35)'}), '(wspace=0.35)\n', (3115, 3128), True, 'import matplotlib.pyplot as plt\n'), ((3129, 3139), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3137, 3139), True, 'import matplotlib.pyplot as plt\n'), ((1526, 1561), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'max_depth': '(1)'}), '(max_depth=1)\n', (1548, 1561), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((1880, 1914), 'numpy.arange', 'np.arange', (['x_min', 'x_max', 'plot_step'], {}), '(x_min, x_max, plot_step)\n', (1889, 1914), True, 'import numpy as np\n'), ((1916, 1950), 'numpy.arange', 'np.arange', (['y_min', 'y_max', 'plot_step'], {}), '(y_min, y_max, plot_step)\n', (1925, 1950), True, 'import numpy as np\n'), ((2186, 2202), 'numpy.where', 'np.where', (['(y == i)'], {}), '(y == i)\n', (2194, 2202), True, 'import numpy as np\n'), ((2207, 2313), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[idx, 0]', 'X[idx, 1]'], {'c': 'c', 'cmap': 'plt.cm.Paired', 's': '(20)', 'edgecolor': '"""k"""', 'label': "('Class %s' % n)"}), "(X[idx, 0], X[idx, 1], c=c, cmap=plt.cm.Paired, s=20, edgecolor=\n 'k', label='Class %s' % n)\n", (2218, 2313), True, 'import matplotlib.pyplot as plt\n'), ((2729, 2854), 'matplotlib.pyplot.hist', 'plt.hist', (['twoclass_output[y == i]'], {'bins': '(10)', 'range': 'plot_range', 'facecolor': 'c', 'label': "('Class %s' % n)", 'alpha': '(0.5)', 'edgecolor': '"""k"""'}), "(twoclass_output[y == i], bins=10, range=plot_range, facecolor=c,\n label='Class %s' % n, alpha=0.5, edgecolor='k')\n", (2737, 2854), True, 'import matplotlib.pyplot as plt\n')] |
import param
import numpy as np
import panel as pn
import holoviews as hv
from alerce.core import Alerce
hv.extension("bokeh")
hv.opts.defaults(
hv.opts.ErrorBars(
width=300,
height=200,
lower_head=None,
upper_head=None,
xrotation=35,
invert_yaxis=True,
xlim=(0, 1),
)
)
client = Alerce()
oids = client.query_objects(
class_name="RRL", classifier="lc_classifier", probability=0.9, page_size=27
)["oid"]
lcs = {oid: client.query_detections(oid, format="pandas") for oid in oids}
periods = {
oid: client.query_feature(oid, "Multiband_period")[0]["value"] for oid in oids
}
def plot_lc(oid):
lc, period = lcs[oid], periods[oid]
plot_bands = []
for fid, c in zip([1, 2], ["green", "red"]):
mjd, mag, err = (
lc.loc[lc["fid"] == fid][["mjd", "magpsf_corr", "sigmapsf"]]
.dropna()
.values.T
)
phase = np.mod(mjd, period) / period
plot_bands.append(
hv.ErrorBars(
(phase, mag, err),
label=oid,
kdims="Phase",
vdims=["Magnitude", "Delta mag"],
).opts(line_color=c)
)
return hv.Overlay(plot_bands).opts(shared_axes=False, framewise=True)
class LightCurveExplorer(param.Parameterized):
page = param.Integer(default=0, bounds=(0, 2))
lc_per_page = 9
@param.depends("page")
def create_grid(self, lc_per_page=9):
selection = oids[self.page * lc_per_page : (self.page + 1) * lc_per_page]
return (
hv.Layout([plot_lc(oid) for oid in selection])
.cols(lc_per_page // 3)
.opts(framewise=True, shared_axes=False)
)
def view(self):
return hv.DynamicMap(self.create_grid).opts(shared_axes=False, framewise=True)
explorer = LightCurveExplorer()
app = pn.Column(explorer.view, explorer.param)
# If on jupyter you can run app to display the dashboard
app.save("docs/index.html", embed=True)
| [
"alerce.core.Alerce",
"holoviews.extension",
"holoviews.Overlay",
"param.Integer",
"holoviews.opts.ErrorBars",
"holoviews.DynamicMap",
"param.depends",
"numpy.mod",
"holoviews.ErrorBars",
"panel.Column"
] | [((106, 127), 'holoviews.extension', 'hv.extension', (['"""bokeh"""'], {}), "('bokeh')\n", (118, 127), True, 'import holoviews as hv\n'), ((346, 354), 'alerce.core.Alerce', 'Alerce', ([], {}), '()\n', (352, 354), False, 'from alerce.core import Alerce\n'), ((1880, 1920), 'panel.Column', 'pn.Column', (['explorer.view', 'explorer.param'], {}), '(explorer.view, explorer.param)\n', (1889, 1920), True, 'import panel as pn\n'), ((150, 274), 'holoviews.opts.ErrorBars', 'hv.opts.ErrorBars', ([], {'width': '(300)', 'height': '(200)', 'lower_head': 'None', 'upper_head': 'None', 'xrotation': '(35)', 'invert_yaxis': '(True)', 'xlim': '(0, 1)'}), '(width=300, height=200, lower_head=None, upper_head=None,\n xrotation=35, invert_yaxis=True, xlim=(0, 1))\n', (167, 274), True, 'import holoviews as hv\n'), ((1345, 1384), 'param.Integer', 'param.Integer', ([], {'default': '(0)', 'bounds': '(0, 2)'}), '(default=0, bounds=(0, 2))\n', (1358, 1384), False, 'import param\n'), ((1411, 1432), 'param.depends', 'param.depends', (['"""page"""'], {}), "('page')\n", (1424, 1432), False, 'import param\n'), ((943, 962), 'numpy.mod', 'np.mod', (['mjd', 'period'], {}), '(mjd, period)\n', (949, 962), True, 'import numpy as np\n'), ((1222, 1244), 'holoviews.Overlay', 'hv.Overlay', (['plot_bands'], {}), '(plot_bands)\n', (1232, 1244), True, 'import holoviews as hv\n'), ((1768, 1799), 'holoviews.DynamicMap', 'hv.DynamicMap', (['self.create_grid'], {}), '(self.create_grid)\n', (1781, 1799), True, 'import holoviews as hv\n'), ((1011, 1107), 'holoviews.ErrorBars', 'hv.ErrorBars', (['(phase, mag, err)'], {'label': 'oid', 'kdims': '"""Phase"""', 'vdims': "['Magnitude', 'Delta mag']"}), "((phase, mag, err), label=oid, kdims='Phase', vdims=[\n 'Magnitude', 'Delta mag'])\n", (1023, 1107), True, 'import holoviews as hv\n')] |
import os
import matplotlib.patches as mpatches
import numpy as np
import pkg_resources
from PyQt5 import QtGui, QtWidgets, QtCore, uic
from PyQt5.Qt import QSplashScreen, QObject
from PyQt5.QtCore import QSettings, QThread, pyqtSignal, QTimer, QDateTime
from PyQt5.QtWidgets import QMenu
from PyQt5.QtGui import QPixmap
from PyQt5.Qt import Qt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, \
NavigationToolbar2QT as NavigationToolbar
from sys import platform
from pathlib import Path
from isstools.dialogs.BasicDialogs import message_box
from matplotlib.figure import Figure
from isstools.elements.figure_update import update_figure
from xas.xray import k2e, e2k
from xas.file_io import load_binned_df_from_file
from isstools.xasproject.xasproject import XASDataSet
if platform == 'darwin':
ui_path = pkg_resources.resource_filename('isstools', 'ui/ui_xview_project-mac.ui')
else:
ui_path = pkg_resources.resource_filename('isstools', 'ui/ui_xview_project.ui')
class UIXviewProject(*uic.loadUiType(ui_path)):
def __init__(self, parent=None,*args, **kwargs):
super().__init__(*args, **kwargs)
self.setupUi(self)
self.parent = parent
self.parent.project.datasets_changed.connect(self.update_project_list)
self.addCanvas()
self.label_E0.setText("E<sub>0</sub>")
self.list_project.itemSelectionChanged.connect(self.show_ds_params)
self.list_project.setContextMenuPolicy(Qt.CustomContextMenu)
self.list_project.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.list_project.customContextMenuRequested.connect(self.xas_project_context_menu)
self.list_project.doubleClicked.connect(self.xas_project_double_clicked)
self.push_plot_project_in_E.clicked.connect(self.plot_project_in_E)
self.push_plot_project_in_K.clicked.connect(self.plot_project_in_K)
self.push_plot_project_in_R.clicked.connect(self.plot_project_in_R)
self.lineEdit_e0.textEdited.connect(self.update_ds_params)
self.lineEdit_preedge_lo.textEdited.connect(self.update_ds_params)
self.lineEdit_preedge_hi.textEdited.connect(self.update_ds_params)
self.lineEdit_postedge_lo.textEdited.connect(self.update_ds_params)
self.lineEdit_postedge_hi.textEdited.connect(self.update_ds_params)
self.lineEdit_spline_lo.textEdited.connect(self.update_ds_params)
self.lineEdit_spline_hi.textEdited.connect(self.update_ds_params)
self.lineEdit_clamp_lo.textEdited.connect(self.update_ds_params)
self.lineEdit_clamp_hi.textEdited.connect(self.update_ds_params)
self.lineEdit_k_ft_lo.textEdited.connect(self.update_ds_params)
self.lineEdit_k_ft_hi.textEdited.connect(self.update_ds_params)
self.pushButton_e0_set.clicked.connect(self.set_ds_params_from_plot)
self.pushButton_preedge_lo_set.clicked.connect(self.set_ds_params_from_plot)
self.pushButton_preedge_hi_set.clicked.connect(self.set_ds_params_from_plot)
self.pushButton_postedge_lo_set.clicked.connect(self.set_ds_params_from_plot)
self.pushButton_postedge_hi_set.clicked.connect(self.set_ds_params_from_plot)
self.pushButton_spline_lo_set.clicked.connect(self.set_ds_params_from_plot)
self.pushButton_spline_hi_set.clicked.connect(self.set_ds_params_from_plot)
self.pushButton_k_ft_lo_set.clicked.connect(self.set_ds_params_from_plot)
self.pushButton_k_ft_hi_set.clicked.connect(self.set_ds_params_from_plot)
self.pushButton_truncate_at_set.clicked.connect(self.set_ds_params_from_plot)
# Push to selected/all buttons defs
self.pushButton_push_norm_param_to_selected.clicked.connect(self.push_param)
self.pushButton_push_norm_param_to_all.clicked.connect(self.push_param)
self.pushButton_push_bkg_param_to_selected.clicked.connect(self.push_param)
self.pushButton_push_bkg_param_to_all.clicked.connect(self.push_param)
self.pushButton_truncate_below.clicked.connect(self.truncate)
self.pushButton_truncate_above.clicked.connect(self.truncate)
# Menu defs
# self.action_exit.triggered.connect(self.close_app)
# self.action_save_project.triggered.connect(self.save_xas_project)
# self.action_open_project.triggered.connect(self.open_xas_project)
# self.action_save_datasets_as_text.triggered.connect(self.save_xas_datasets_as_text)
# self.action_combine_and_save_as_text.triggered.connect(self.combine_and_save_datasets_as_text)
# self.action_merge.triggered.connect(self.merge_datasets)
# self.action_rename.triggered.connect(self.rename_dataset)
# self.action_remove.triggered.connect(self.remove_from_xas_project)
self.lineEdit_to_ds_parameter_dict = {
'lineEdit_preedge_lo': 'pre1',
'lineEdit_preedge_hi': 'pre2',
'lineEdit_postedge_lo': 'norm1',
'lineEdit_postedge_hi': 'norm2',
'lineEdit_e0': 'e0',
'lineEdit_spline_lo': 'kmin',
'lineEdit_spline_hi': 'kmax',
'lineEdit_clamp_lo': 'clamp_lo',
'lineEdit_clamp_hi': 'clamp_hi',
'lineEdit_truncate_at': 'truncate',
'lineEdit_k_ft_lo': 'kmin_ft',
'lineEdit_k_ft_hi': 'kmax_ft'
}
self.pushButton_set_to_lineEdit_dict = {
'pushButton_e0_set': 'lineEdit_e0',
'pushButton_preedge_lo_set': 'lineEdit_preedge_lo',
'pushButton_preedge_hi_set': 'lineEdit_preedge_hi',
'pushButton_postedge_lo_set': 'lineEdit_postedge_lo',
'pushButton_postedge_hi_set': 'lineEdit_postedge_hi',
'pushButton_spline_lo_set': 'lineEdit_spline_lo',
'pushButton_spline_hi_set': 'lineEdit_spline_hi',
'pushButton_k_ft_lo_set': 'lineEdit_k_ft_lo',
'pushButton_k_ft_hi_set': 'lineEdit_k_ft_hi',
'pushButton_truncate_at_set': 'lineEdit_truncate_at'
}
self.windows_list = [
'hanning',
'kaiser',
'gaussian',
'sine'
]
def xas_project_context_menu(self, QPos):
menu = QMenu()
rename_action = menu.addAction("&Rename")
merge_action = menu.addAction("&Merge")
remove_action = menu.addAction("&Remove")
save_datasets_as_text_action = menu.addAction("&Save datasets as text")
combine_and_save_datasets_as_text_action = menu.addAction("&Combine and save datasets as text")
parentPosition = self.list_project.mapToGlobal(QtCore.QPoint(0, 0))
menu.move(parentPosition + QPos)
action = menu.exec_()
if action == rename_action:
self.rename_dataset()
elif action == merge_action:
self.merge_datasets()
elif action == remove_action:
self.remove_from_xas_project()
elif action == combine_and_save_datasets_as_text_action:
self.combine_and_save_datasets_as_text()
elif action == save_datasets_as_text_action:
self.save_datasets_as_text()
def xas_project_double_clicked(self):
selection = self.list_project.selectedIndexes()
if selection != []:
self.rename_dataset()
def addCanvas(self):
# XASProject Plot:
self.figure_project = Figure()
#self.figure_project.set_facecolor(color='#E2E2E2')
self.figure_project.ax = self.figure_project.add_subplot(111)
self.figure_project.ax.grid(alpha=0.4)
self.canvas_project = FigureCanvas(self.figure_project)
self.toolbar_project = NavigationToolbar(self.canvas_project, self)
self.layout_plot_project.addWidget(self.canvas_project)
self.layout_plot_project.addWidget(self.toolbar_project)
self.figure_project.tight_layout()
self.canvas_project.draw()
# layout_plot_xasproject
def push_param(self):
self.norm_param_list = [
'e0',
'pre1',
'pre2',
'norm1',
'norm2',
]
self.bkg_param_list = [
'kmin',
'kmax',
'clamp_lo',
'clamp_hi'
]
self.ft_param_list = [
]
selection = self.list_project.selectedIndexes()
if selection != []:
sender = QObject()
sender_object = sender.sender().objectName()
index = selection[0].row()
ds_master = self.parent.project[index]
if sender_object == 'pushButton_push_norm_param_to_selected':
for indx, obj in enumerate(selection):
ds = self.parent.project[selection[indx].row()]
for param in self.norm_param_list:
setattr(ds, param, getattr(ds_master, param))
if sender_object == 'pushButton_push_norm_param_to_all':
for indx, obj in enumerate(self.parent.project):
for param in self.norm_param_list:
setattr(self.parent.project[indx], param, getattr(ds_master, param))
if sender_object == 'pushButton_push_bkg_param_to_selected':
for indx, obj in enumerate(selection):
ds = self.parent.project[selection[indx].row()]
for param in self.bkg_param_list:
setattr(ds, param, getattr(ds_master, param))
if sender_object == 'pushButton_push_bkg_param_to_all':
for indx, obj in enumerate(self.parent.project):
for param in self.bkg_param_list:
setattr(self.parent.project[indx], param, getattr(ds_master, param))
# here we begin to work on the second pre-processing tab
def update_ds_params(self):
sender = QObject()
sender_object = sender.sender().objectName()
print(sender_object)
selection = self.list_project.selectedIndexes()
if selection != []:
index = selection[0].row()
ds = self.parent.xasproject[index]
try:
self.parent.statusBar().showMessage(sender_object)
print(getattr(self, sender_object).text())
setattr(ds, self.lineEdit_to_ds_parameter_dict[sender_object],
float(getattr(self, sender_object).text()))
except:
self.parent.statusBar().showMessage('Use numbers only')
def set_ds_params_from_plot(self):
sender = QObject()
self.sender_object = sender.sender().objectName()
self.parent.statusBar().showMessage('Click on graph or press Esc')
self.cid = self.canvas_project.mpl_connect('button_press_event', self.mouse_press_event)
def _disconnect_cid(self):
if hasattr(self, 'cid'):
self.canvas_project.mpl_disconnect(self.cid)
delattr(self, 'cid')
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Escape:
self._disconnect_cid()
def mouse_press_event(self, event):
e_vs_k_discriminate_list = ['pushButton_spline_lo_set',
'pushButton_spline_hi_set',
'pushButton_k_ft_lo_set',
'pushButton_k_ft_hi_set'
]
lineEdit = getattr(self, self.pushButton_set_to_lineEdit_dict[self.sender_object])
e0 = float(self.lineEdit_e0.text())
if self.sender_object == 'pushButton_e0_set':
new_value = event.xdata
elif self.sender_object == 'pushButton_truncate_at_set':
if self.current_plot_in == 'e':
new_value = event.xdata
elif self.current_plot_in == 'k':
new_value = k2e(event.xdata, e0)
elif self.sender_object in e_vs_k_discriminate_list:
if self.current_plot_in == 'k':
new_value = event.xdata
elif self.current_plot_in == 'e':
new_value = e2k(event.xdata, e0)
else:
new_value = event.xdata - e0
lineEdit.setText('{:.1f}'.format(new_value))
sender_object = lineEdit
print(sender_object)
selection = self.list_project.selectedIndexes()
if selection != []:
index = selection[0].row()
ds = self.parent.project[index]
try:
float(sender_object.text())
setattr(ds, self.lineEdit_to_ds_parameter_dict[sender_object.objectName()],
float(sender_object.text()))
except:
print('what''s going wrong')
self._disconnect_cid()
def update_project_list(self, datasets):
self.list_project.clear()
for ds in datasets:
self.list_project.addItem(ds.name)
def show_ds_params(self):
if self.list_project.selectedIndexes():
index = self.list_project.selectedIndexes()[0]
ds = self.parent.project[index.row()]
self.lineEdit_e0.setText('{:.1f}'.format(ds.e0))
self.lineEdit_preedge_lo.setText('{:.1f}'.format(ds.pre1))
self.lineEdit_preedge_hi.setText('{:.1f}'.format(ds.pre2))
self.lineEdit_postedge_lo.setText('{:.1f}'.format(ds.norm1))
self.lineEdit_postedge_hi.setText('{:.1f}'.format(ds.norm2))
self.lineEdit_spline_lo.setText('{:.1f}'.format(ds.kmin))
self.lineEdit_spline_hi.setText('{:.1f}'.format(ds.kmax))
self.lineEdit_clamp_lo.setText('{:.1f}'.format(ds.clamp_lo))
self.lineEdit_clamp_hi.setText('{:.1f}'.format(ds.clamp_hi))
self.lineEdit_k_ft_lo.setText('{:.1f}'.format(ds.kmin_ft))
self.lineEdit_k_ft_hi.setText('{:.1f}'.format(ds.kmax_ft))
# Make the first selected line bold, and reset bold font for other selections
font = QtGui.QFont()
font.setBold(False)
for i in range(self.list_project.count()):
self.list_project.item(i).setFont(font)
font.setBold(True)
self.list_project.item(index.row()).setFont(font)
def remove_from_xas_project(self):
for index in self.list_project.selectedIndexes()[
::-1]: # [::-1] to remove using indexes from last to first
self.parent.project.removeDatasetIndex(index.row())
self.parent.statusBar().showMessage('Datasets deleted')
def plot_project_in_E(self):
if self.list_project.selectedIndexes():
update_figure([self.figure_project.ax], self.toolbar_project, self.canvas_project)
for index in self.list_project.selectedIndexes():
ds = self.parent.project[index.row()]
ds.normalize_force()
ds.extract_chi_force()
ds.extract_ft()
energy = ds.energy
if self.radioButton_mu_xasproject.isChecked():
data = ds.mu
elif self.radioButton_norm_xasproject.isChecked():
if self.checkBox_norm_flat_xasproject.checkState():
data = ds.flat
else:
data = ds.norm
if self.checkBox_deriv.isChecked():
data = ds.mu_deriv
energy = ds.energy_deriv
self.figure_project.ax.plot(energy, data, label=ds.name)
if self.radioButton_mu_xasproject.isChecked() and not self.checkBox_deriv.isChecked():
if self.checkBox_preedge_show.checkState():
self.figure_project.ax.plot(ds.energy, ds.pre_edge, label='Preedge', linewidth=0.75)
if self.checkBox_postedge_show.checkState():
self.figure_project.ax.plot(ds.energy, ds.post_edge, label='Postedge', linewidth=0.75)
if self.checkBox_background_show.checkState():
self.figure_project.ax.plot(ds.energy, ds.bkg, label='Background', linewidth=0.75)
self.parent.set_figure(self.figure_project.ax, self.canvas_project, label_x='Energy /eV',
label_y=r'$\chi \mu$' + '(E)'),
if self.checkBox_force_range_E.checkState():
self.figure_project.ax.set_xlim(
(float(self.lineEdit_e0.text()) + float(self.lineEdit_range_E_lo.text())),
(float(self.lineEdit_e0.text()) + float(self.lineEdit_range_E_hi.text())))
self.current_plot_in = 'e'
def plot_project_in_K(self):
if self.list_project.selectedIndexes():
update_figure([self.figure_project.ax], self.toolbar_project, self.canvas_project)
window = self.set_ft_window()
for index in self.list_project.selectedIndexes():
ds = self.parent.project[index.row()]
ds.extract_chi_force()
ds.extract_ft_force(window=window)
data = ds.chi * np.power(ds.k, self.spinBox_k_weight.value())
self.figure_project.ax.plot(ds.k, data, label=ds.name)
data_max = data.max()
if self.checkBox_show_window.isChecked():
self.figure_project.ax.plot(ds.k, ds.kwin * data_max / 2, label='Windows')
self.parent.set_figure(self.figure_project.ax, self.canvas_project,
label_x='k (' + r'$\AA$' + '$^1$' + ')',
label_y=r'$\chi \mu$' + '(k)')
if self.checkBox_force_range_k.checkState():
self.figure_project.ax.set_xlim(float(self.lineEdit_range_k_lo.text()),
float(self.lineEdit_range_k_hi.text()))
self.current_plot_in = 'k'
def plot_project_in_R(self):
if self.list_project.selectedIndexes():
update_figure([self.figure_project.ax], self.toolbar_project, self.canvas_project)
window = self.set_ft_window()
for index in self.list_project.selectedIndexes():
ds = self.parent.project[index.row()]
ds.extract_ft_force(window=window)
if self.checkBox_show_chir_mag.checkState():
self.figure_project.ax.plot(ds.r, ds.chir_mag, label=ds.name)
if self.checkBox_show_chir_im.checkState():
self.figure_project.ax.plot(ds.r, ds.chir_im, label=(ds.name + ' Im'))
if self.checkBox_show_chir_re.checkState():
self.figure_project.ax.plot(ds.r, ds.chir_re, label=(ds.name + ' Re'))
# if self.checkBox_show_chir_pha.checked:
# self.figure_project.ax.plot(ds.r, ds.chir_pha, label=(ds.name + ' Ph'))
self.parent.set_figure(self.figure_project.ax, self.canvas_project, label_y=r'$\chi \mu$' + '(k)',
label_x='R (' + r'$\AA$' + ')')
if self.checkBox_force_range_R.checkState():
self.figure_project.ax.set_xlim(float(self.lineEdit_range_R_lo.text()),
float(self.lineEdit_range_R_hi.text()))
self.current_plot_in = 'R'
def save_xas_project(self):
options = QtWidgets.QFileDialog.DontUseNativeDialog
filename, _ = QtWidgets.QFileDialog.getSaveFileName(self, 'Save XAS project as', self.parent.widget_data.working_folder,
'XAS project files (*.xas)', options=options)
if filename:
if Path(filename).suffix != '.xas':
filename = filename + '.xas'
print(filename)
self.parent.project.save(filename=filename)
def open_xas_project(self):
options = QtWidgets.QFileDialog.DontUseNativeDialog
filename, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'Load XAS project', self.parent.widget_data.working_folder,
'XAS project files (*.xas)', options=options)
if filename:
self.parent.project_loaded_from_file = xasproject.XASProject()
self.parent.project_loaded_from_file.load(filename=filename)
if ret == 0:
self.parent.project = self.parent.xasproject_loaded_from_file
self.update_project_list(self.parent.project._datasets)
if ret == 1:
for i in self.parent.project_loaded_from_file._datasets:
self.parent.project.append(i)
def save_datasets_as_text(self):
# options = QtWidgets.QFileDialog.DontUseNativeDialog
# filename, _ = QtWidgets.QFileDialog.getSaveFileName(self, 'Save XAS project as', self.parent.widget_data.working_folder,
# 'XAS project files (*.xas)', options=options)
selection = self.list_project.selectedIndexes()
if selection != []:
ret = self.message_box_save_datasets_as()
options = QtWidgets.QFileDialog.DontUseNativeDialog
pathname = QtWidgets.QFileDialog.getExistingDirectory(self, 'Choose folder...',
self.parent.widget_data.working_folder,
options=options)
separator = '#______________________________________________________\n'
if pathname is not '':
for indx, obj in enumerate(selection):
ds = self.parent.project._datasets[selection[indx].row()]
filename = ds.name
if ret == 0:
xx = ds.energy
yy = np.array(ds.mu.mu)
keys = '# energy(eV), mu(E)\n'
elif ret == 1:
xx = ds.energy
yy = ds.norm
keys = '# energy(eV), normalized mu(E)\n'
elif ret == 2:
xx = ds.energy
yy = ds.flat
keys = '# energy(eV), flattened normalized mu(E)\n'
table = np.stack((xx, yy)).T
filename_new = '{}/{}.{}'.format(pathname, filename, 'mu')
fid = open(filename_new, 'w')
header_wo_cols_names = ds.header[0:ds.header.rfind('#')]
fid.write(header_wo_cols_names)
fid.write(separator)
fid.write(keys)
fid.close()
fid = open(filename_new, 'a')
np.savetxt(fid, table)
fid.close()
def merge_datasets(self):
selection = self.list_project.selectedIndexes()
if selection != []:
mu = self.parent.project._datasets[selection[0].row()].mu
energy_master = self.parent.project._datasets[selection[0].row()].energy
mu_array = np.zeros([len(selection), len(mu)])
energy = self.parent.project._datasets[selection[0].row()].energy
md = ['# merged \n']
for indx, obj in enumerate(selection):
energy = self.parent.project._datasets[selection[indx].row()].energy
mu = self.parent.project._datasets[selection[indx].row()].mu.mu
mu = np.interp(energy_master, energy, mu)
mu_array[indx, :] = mu
md.append('# ' + self.parent.project._datasets[selection[indx].row()].filename + '\n')
mu_merged = np.average(mu_array, axis=0)
merged = XASDataSet(name='merge', md=md, energy=energy, mu=mu_merged, filename='',
datatype='processed')
merged.header = "".join(merged.md)
self.parent.project.append(merged)
self.parent.project.project_changed()
def combine_and_save_datasets_as_text(self):
selection = self.list_project.selectedIndexes()
if selection != []:
ds_list = []
md = []
for indx, obj in enumerate(selection):
ds_list.append(self.parent.project._datasets[selection[indx].row()])
ds_list.sort(key=lambda x: x.name)
mu = ds_list[0].mu
mu_array = np.zeros([len(selection) + 1, len(mu)])
energy_master = ds_list[0].energy
mu_array[0, :] = energy_master
ret = self.message_box_save_datasets_as()
for indx, obj in enumerate(selection):
ds = ds_list[indx]
energy = ds.energy
if ret == 0:
yy = np.array(ds.mu.mu)
keys = '# energy(eV), mu(E)\n'
elif ret == 1:
yy = ds.norm
keys = '# energy(eV), normalized mu(E)\n'
elif ret == 2:
yy = ds.flat
keys = '# energy(eV), flattened normalized mu(E)\n'
yy = np.interp(energy_master, energy, yy)
mu_array[indx + 1, :] = yy
md.append(ds.name)
self.mu_array = mu_array
options = QtWidgets.QFileDialog.DontUseNativeDialog
filename, _ = QtWidgets.QFileDialog.getSaveFileName(self, 'Save XAS project', self.parent.widget_data.working_folder,
'XAS dataset (*.dat)', options=options)
if filename:
if Path(filename).suffix != '.xas':
filename = filename + '.xas'
print(filename)
filelist = "{}".format("\n".join(md[0:]))
separator = '\n #______________________________________________________\n'
header = '{} {} {}'.format(filelist, separator, keys)
fid = open(filename, 'w')
np.savetxt(fid, np.transpose(mu_array), header=header)
fid.close()
def rename_dataset(self):
selection = self.list_project.selectedIndexes()
if selection != []:
name = self.parent.project._datasets[selection[0].row()].name
new_name, ok = QtWidgets.QInputDialog.getText(self, 'Rename dataset', 'Enter new name:',
QtWidgets.QLineEdit.Normal, name)
if ok:
self.parent.project._datasets[selection[0].row()].name = new_name
self.parent.project.project_changed()
def truncate(self):
sender = QObject()
sender_object = sender.sender().objectName()
print(sender_object)
selection = self.list_project.selectedIndexes()
if selection != []:
for indx, obj in enumerate(selection):
print(indx)
ds = self.parent.project._datasets[selection[indx].row()]
print(ds.name)
energy = ds.energy
mu = ds.mu
indx_energy_to_truncate_at = (np.abs(energy - float(self.lineEdit_truncate_at.text()))).argmin()
if sender_object == 'pushButton_truncate_below':
ds.energy = energy[indx_energy_to_truncate_at:]
ds.mu = mu[indx_energy_to_truncate_at:]
elif sender_object == 'pushButton_truncate_above':
ds.energy = energy[0:indx_energy_to_truncate_at]
ds.mu = mu[0:indx_energy_to_truncate_at:]
ds.update_larch()
self.parent.project._datasets[selection[indx].row()] = ds
'''
Service routines
'''
def message_box_save_datasets_as(self):
messageBox = QtWidgets.QMessageBox()
messageBox.setText('Save datasets as..')
messageBox.addButton(QtWidgets.QPushButton('mu(E)'), QtWidgets.QMessageBox.YesRole)
messageBox.addButton(QtWidgets.QPushButton('normalized mu(E)'), QtWidgets.QMessageBox.NoRole)
messageBox.addButton(QtWidgets.QPushButton('flattened mu(E)'), QtWidgets.QMessageBox.NoRole)
ret = messageBox.exec_()
return ret
def message_box_warning(self, line1='Warning', line2=''):
messageBox = QtWidgets.QMessageBox()
messageBox.setText(line1)
if line2:
messageBox.setInformativeText(line2)
messageBox.setWindowTitle("Warning")
messageBox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)
messageBox.exec_()
def set_ft_window(self):
window = dict()
window['window_type'] = self.windows_list[self.comboBox_window.currentIndex()]
window['r_weight'] = self.spinBox_r_weight.value()
try:
window['tapering'] = float(self.lineEdit_window_tapering.text())
except:
window['tapering'] = 1
return window
| [
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"PyQt5.QtWidgets.QMessageBox",
"PyQt5.uic.loadUiType",
"numpy.array",
"PyQt5.QtWidgets.QFileDialog.getOpenFileName",
"xas.xray.k2e",
"xas.xray.e2k",
"pathlib.Path",
"numpy.stack",
"isstools.elements.figure_update.update_figure",
"PyQt5.Q... | [((848, 921), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""isstools"""', '"""ui/ui_xview_project-mac.ui"""'], {}), "('isstools', 'ui/ui_xview_project-mac.ui')\n", (879, 921), False, 'import pkg_resources\n'), ((942, 1011), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""isstools"""', '"""ui/ui_xview_project.ui"""'], {}), "('isstools', 'ui/ui_xview_project.ui')\n", (973, 1011), False, 'import pkg_resources\n'), ((1036, 1059), 'PyQt5.uic.loadUiType', 'uic.loadUiType', (['ui_path'], {}), '(ui_path)\n', (1050, 1059), False, 'from PyQt5 import QtGui, QtWidgets, QtCore, uic\n'), ((6652, 6659), 'PyQt5.QtWidgets.QMenu', 'QMenu', ([], {}), '()\n', (6657, 6659), False, 'from PyQt5.QtWidgets import QMenu\n'), ((7918, 7926), 'matplotlib.figure.Figure', 'Figure', ([], {}), '()\n', (7924, 7926), False, 'from matplotlib.figure import Figure\n'), ((8150, 8183), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg', 'FigureCanvas', (['self.figure_project'], {}), '(self.figure_project)\n', (8162, 8183), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((8220, 8264), 'matplotlib.backends.backend_qt5agg.NavigationToolbar2QT', 'NavigationToolbar', (['self.canvas_project', 'self'], {}), '(self.canvas_project, self)\n', (8237, 8264), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((10617, 10626), 'PyQt5.Qt.QObject', 'QObject', ([], {}), '()\n', (10624, 10626), False, 'from PyQt5.Qt import QSplashScreen, QObject\n'), ((11378, 11387), 'PyQt5.Qt.QObject', 'QObject', ([], {}), '()\n', (11385, 11387), False, 'from PyQt5.Qt import QSplashScreen, QObject\n'), ((20899, 21061), 'PyQt5.QtWidgets.QFileDialog.getSaveFileName', 'QtWidgets.QFileDialog.getSaveFileName', (['self', '"""Save XAS project as"""', 'self.parent.widget_data.working_folder', '"""XAS project files (*.xas)"""'], {'options': 'options'}), "(self, 'Save XAS project as', self.\n parent.widget_data.working_folder, 'XAS project files (*.xas)', options\n =options)\n", (20936, 21061), False, 'from PyQt5 import QtGui, QtWidgets, QtCore, uic\n'), ((21461, 21615), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QtWidgets.QFileDialog.getOpenFileName', (['self', '"""Load XAS project"""', 'self.parent.widget_data.working_folder', '"""XAS project files (*.xas)"""'], {'options': 'options'}), "(self, 'Load XAS project', self.parent\n .widget_data.working_folder, 'XAS project files (*.xas)', options=options)\n", (21498, 21615), False, 'from PyQt5 import QtGui, QtWidgets, QtCore, uic\n'), ((28730, 28739), 'PyQt5.Qt.QObject', 'QObject', ([], {}), '()\n', (28737, 28739), False, 'from PyQt5.Qt import QSplashScreen, QObject\n'), ((29968, 29991), 'PyQt5.QtWidgets.QMessageBox', 'QtWidgets.QMessageBox', ([], {}), '()\n', (29989, 29991), False, 'from PyQt5 import QtGui, QtWidgets, QtCore, uic\n'), ((30505, 30528), 'PyQt5.QtWidgets.QMessageBox', 'QtWidgets.QMessageBox', ([], {}), '()\n', (30526, 30528), False, 'from PyQt5 import QtGui, QtWidgets, QtCore, uic\n'), ((7071, 7090), 'PyQt5.QtCore.QPoint', 'QtCore.QPoint', (['(0)', '(0)'], {}), '(0, 0)\n', (7084, 7090), False, 'from PyQt5 import QtGui, QtWidgets, QtCore, uic\n'), ((9049, 9058), 'PyQt5.Qt.QObject', 'QObject', ([], {}), '()\n', (9056, 9058), False, 'from PyQt5.Qt import QSplashScreen, QObject\n'), ((15077, 15090), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (15088, 15090), False, 'from PyQt5 import QtGui, QtWidgets, QtCore, uic\n'), ((15785, 15872), 'isstools.elements.figure_update.update_figure', 'update_figure', (['[self.figure_project.ax]', 'self.toolbar_project', 'self.canvas_project'], {}), '([self.figure_project.ax], self.toolbar_project, self.\n canvas_project)\n', (15798, 15872), False, 'from isstools.elements.figure_update import update_figure\n'), ((18033, 18120), 'isstools.elements.figure_update.update_figure', 'update_figure', (['[self.figure_project.ax]', 'self.toolbar_project', 'self.canvas_project'], {}), '([self.figure_project.ax], self.toolbar_project, self.\n canvas_project)\n', (18046, 18120), False, 'from isstools.elements.figure_update import update_figure\n'), ((19365, 19452), 'isstools.elements.figure_update.update_figure', 'update_figure', (['[self.figure_project.ax]', 'self.toolbar_project', 'self.canvas_project'], {}), '([self.figure_project.ax], self.toolbar_project, self.\n canvas_project)\n', (19378, 19452), False, 'from isstools.elements.figure_update import update_figure\n'), ((22793, 22923), 'PyQt5.QtWidgets.QFileDialog.getExistingDirectory', 'QtWidgets.QFileDialog.getExistingDirectory', (['self', '"""Choose folder..."""', 'self.parent.widget_data.working_folder'], {'options': 'options'}), "(self, 'Choose folder...', self.\n parent.widget_data.working_folder, options=options)\n", (22835, 22923), False, 'from PyQt5 import QtGui, QtWidgets, QtCore, uic\n'), ((25486, 25514), 'numpy.average', 'np.average', (['mu_array'], {'axis': '(0)'}), '(mu_array, axis=0)\n', (25496, 25514), True, 'import numpy as np\n'), ((25540, 25639), 'isstools.xasproject.xasproject.XASDataSet', 'XASDataSet', ([], {'name': '"""merge"""', 'md': 'md', 'energy': 'energy', 'mu': 'mu_merged', 'filename': '""""""', 'datatype': '"""processed"""'}), "(name='merge', md=md, energy=energy, mu=mu_merged, filename='',\n datatype='processed')\n", (25550, 25639), False, 'from isstools.xasproject.xasproject import XASDataSet\n'), ((27336, 27484), 'PyQt5.QtWidgets.QFileDialog.getSaveFileName', 'QtWidgets.QFileDialog.getSaveFileName', (['self', '"""Save XAS project"""', 'self.parent.widget_data.working_folder', '"""XAS dataset (*.dat)"""'], {'options': 'options'}), "(self, 'Save XAS project', self.parent\n .widget_data.working_folder, 'XAS dataset (*.dat)', options=options)\n", (27373, 27484), False, 'from PyQt5 import QtGui, QtWidgets, QtCore, uic\n'), ((28343, 28454), 'PyQt5.QtWidgets.QInputDialog.getText', 'QtWidgets.QInputDialog.getText', (['self', '"""Rename dataset"""', '"""Enter new name:"""', 'QtWidgets.QLineEdit.Normal', 'name'], {}), "(self, 'Rename dataset', 'Enter new name:',\n QtWidgets.QLineEdit.Normal, name)\n", (28373, 28454), False, 'from PyQt5 import QtGui, QtWidgets, QtCore, uic\n'), ((30078, 30108), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""mu(E)"""'], {}), "('mu(E)')\n", (30099, 30108), False, 'from PyQt5 import QtGui, QtWidgets, QtCore, uic\n'), ((30174, 30215), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""normalized mu(E)"""'], {}), "('normalized mu(E)')\n", (30195, 30215), False, 'from PyQt5 import QtGui, QtWidgets, QtCore, uic\n'), ((30280, 30320), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""flattened mu(E)"""'], {}), "('flattened mu(E)')\n", (30301, 30320), False, 'from PyQt5 import QtGui, QtWidgets, QtCore, uic\n'), ((25270, 25306), 'numpy.interp', 'np.interp', (['energy_master', 'energy', 'mu'], {}), '(energy_master, energy, mu)\n', (25279, 25306), True, 'import numpy as np\n'), ((27073, 27109), 'numpy.interp', 'np.interp', (['energy_master', 'energy', 'yy'], {}), '(energy_master, energy, yy)\n', (27082, 27109), True, 'import numpy as np\n'), ((21160, 21174), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (21164, 21174), False, 'from pathlib import Path\n'), ((24485, 24507), 'numpy.savetxt', 'np.savetxt', (['fid', 'table'], {}), '(fid, table)\n', (24495, 24507), True, 'import numpy as np\n'), ((26687, 26705), 'numpy.array', 'np.array', (['ds.mu.mu'], {}), '(ds.mu.mu)\n', (26695, 26705), True, 'import numpy as np\n'), ((28036, 28058), 'numpy.transpose', 'np.transpose', (['mu_array'], {}), '(mu_array)\n', (28048, 28058), True, 'import numpy as np\n'), ((12774, 12794), 'xas.xray.k2e', 'k2e', (['event.xdata', 'e0'], {}), '(event.xdata, e0)\n', (12777, 12794), False, 'from xas.xray import k2e, e2k\n'), ((23483, 23501), 'numpy.array', 'np.array', (['ds.mu.mu'], {}), '(ds.mu.mu)\n', (23491, 23501), True, 'import numpy as np\n'), ((23989, 24007), 'numpy.stack', 'np.stack', (['(xx, yy)'], {}), '((xx, yy))\n', (23997, 24007), True, 'import numpy as np\n'), ((27600, 27614), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (27604, 27614), False, 'from pathlib import Path\n'), ((13035, 13055), 'xas.xray.e2k', 'e2k', (['event.xdata', 'e0'], {}), '(event.xdata, e0)\n', (13038, 13055), False, 'from xas.xray import k2e, e2k\n')] |
import numpy as np
import pytest
from sklego.common import flatten
from sklego.dummy import RandomRegressor
from tests.conftest import nonmeta_checks, regressor_checks, general_checks, select_tests
@pytest.mark.parametrize(
"test_fn",
select_tests(
flatten([general_checks, nonmeta_checks, regressor_checks]),
exclude=[
"check_sample_weights_invariance",
"check_methods_subset_invariance",
"check_regressors_train",
"check_sample_weights_list",
"check_sample_weights_pandas_series"
]
)
)
def test_estimator_checks(test_fn):
# Tests that are skipped:
# 'check_methods_subset_invariance': Since we add noise, the method is not invariant on a subset
# 'check_regressors_train': score is not always greater than 0.5 due to randomness
regr_normal = RandomRegressor(strategy="normal")
test_fn(RandomRegressor.__name__ + "_normal", regr_normal)
regr_uniform = RandomRegressor(strategy="uniform")
test_fn(RandomRegressor.__name__ + "_uniform", regr_uniform)
def test_values_uniform(random_xy_dataset_regr):
X, y = random_xy_dataset_regr
mod = RandomRegressor(strategy="uniform")
predictions = mod.fit(X, y).predict(X)
assert (predictions >= y.min()).all()
assert (predictions <= y.max()).all()
assert mod.min_ == pytest.approx(y.min(), abs=0.0001)
assert mod.max_ == pytest.approx(y.max(), abs=0.0001)
def test_values_normal(random_xy_dataset_regr):
X, y = random_xy_dataset_regr
mod = RandomRegressor(strategy="normal").fit(X, y)
assert mod.mu_ == pytest.approx(np.mean(y), abs=0.001)
assert mod.sigma_ == pytest.approx(np.std(y), abs=0.001)
def test_bad_values():
np.random.seed(42)
X = np.random.normal(0, 1, (10, 2))
y = np.random.normal(0, 1, (10, 1))
with pytest.raises(ValueError):
RandomRegressor(strategy="foobar").fit(X, y)
| [
"numpy.random.normal",
"numpy.mean",
"sklego.dummy.RandomRegressor",
"sklego.common.flatten",
"pytest.raises",
"numpy.random.seed",
"numpy.std"
] | [((859, 893), 'sklego.dummy.RandomRegressor', 'RandomRegressor', ([], {'strategy': '"""normal"""'}), "(strategy='normal')\n", (874, 893), False, 'from sklego.dummy import RandomRegressor\n'), ((977, 1012), 'sklego.dummy.RandomRegressor', 'RandomRegressor', ([], {'strategy': '"""uniform"""'}), "(strategy='uniform')\n", (992, 1012), False, 'from sklego.dummy import RandomRegressor\n'), ((1173, 1208), 'sklego.dummy.RandomRegressor', 'RandomRegressor', ([], {'strategy': '"""uniform"""'}), "(strategy='uniform')\n", (1188, 1208), False, 'from sklego.dummy import RandomRegressor\n'), ((1740, 1758), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1754, 1758), True, 'import numpy as np\n'), ((1767, 1798), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(10, 2)'], {}), '(0, 1, (10, 2))\n', (1783, 1798), True, 'import numpy as np\n'), ((1807, 1838), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(10, 1)'], {}), '(0, 1, (10, 1))\n', (1823, 1838), True, 'import numpy as np\n'), ((268, 327), 'sklego.common.flatten', 'flatten', (['[general_checks, nonmeta_checks, regressor_checks]'], {}), '([general_checks, nonmeta_checks, regressor_checks])\n', (275, 327), False, 'from sklego.common import flatten\n'), ((1848, 1873), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1861, 1873), False, 'import pytest\n'), ((1546, 1580), 'sklego.dummy.RandomRegressor', 'RandomRegressor', ([], {'strategy': '"""normal"""'}), "(strategy='normal')\n", (1561, 1580), False, 'from sklego.dummy import RandomRegressor\n'), ((1627, 1637), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (1634, 1637), True, 'import numpy as np\n'), ((1689, 1698), 'numpy.std', 'np.std', (['y'], {}), '(y)\n', (1695, 1698), True, 'import numpy as np\n'), ((1883, 1917), 'sklego.dummy.RandomRegressor', 'RandomRegressor', ([], {'strategy': '"""foobar"""'}), "(strategy='foobar')\n", (1898, 1917), False, 'from sklego.dummy import RandomRegressor\n')] |
import numpy as np
NSAMPLE=64
NSPEC=4
for spec in range(NSPEC):
list = []
for i in range(NSAMPLE):
infile = "RUN%d/res.mass_spec%d_final_vert" % (i+1,spec+1)
a = np.loadtxt(infile)
list.append(a)
list = np.transpose(np.array(list))
mean = np.mean(list,axis=1)
std = np.std(list,axis=1,ddof=1)
res_stat = np.transpose([mean,std])
outfile = "res.mass_spec%d_final_vert_stat" % (spec+1)
np.savetxt(outfile,res_stat)
| [
"numpy.mean",
"numpy.array",
"numpy.savetxt",
"numpy.std",
"numpy.loadtxt",
"numpy.transpose"
] | [((293, 314), 'numpy.mean', 'np.mean', (['list'], {'axis': '(1)'}), '(list, axis=1)\n', (300, 314), True, 'import numpy as np\n'), ((324, 352), 'numpy.std', 'np.std', (['list'], {'axis': '(1)', 'ddof': '(1)'}), '(list, axis=1, ddof=1)\n', (330, 352), True, 'import numpy as np\n'), ((371, 396), 'numpy.transpose', 'np.transpose', (['[mean, std]'], {}), '([mean, std])\n', (383, 396), True, 'import numpy as np\n'), ((465, 494), 'numpy.savetxt', 'np.savetxt', (['outfile', 'res_stat'], {}), '(outfile, res_stat)\n', (475, 494), True, 'import numpy as np\n'), ((190, 208), 'numpy.loadtxt', 'np.loadtxt', (['infile'], {}), '(infile)\n', (200, 208), True, 'import numpy as np\n'), ((261, 275), 'numpy.array', 'np.array', (['list'], {}), '(list)\n', (269, 275), True, 'import numpy as np\n')] |
import numpy as np
import data
import matplotlib.pyplot as plt
def sanityCheck(objType, phiType):
""" draw and save the figure of the mean, range for different methods and N
:param objType: objective type - worst/sum
:param phiType: phi-divergence type - cre/chi/m-chi
"""
loaded = np.load('data/' + objType + '_' + phiType + '_final.npz')
robustReturns = loaded['robust']
detReturns = loaded['SAA']
robArray = np.transpose(np.array(robustReturns))
detArray = np.transpose(np.array(detReturns))
plt.figure(figsize=(7, 5))
plt.plot(list(range(10, 1001, 10)), robArray[2], color='b', label='Mean robust')
plt.fill_between(list(range(10, 1001, 10)), robArray[0], robArray[1], color='b', alpha=0.2, label='Range robust')
plt.plot(list(range(10, 1001, 10)), detArray[2], color='r', label='Mean nonrobust')
plt.fill_between(list(range(10, 1001, 10)), detArray[0], detArray[1], color='r', alpha=0.2, label='Range nonrobust')
plt.legend(loc="lower right", prop={'size': 12})
plt.xticks(list(range(0, 1001, 100)))
plt.xlim(xmin=0, xmax=1000)
plt.xlabel('N')
if objType == 'sum':
plt.yticks(list(range(100, 170, 10)))
plt.ylim(ymin=100, ymax=161)
elif objType == 'worst':
plt.yticks(list(range(-13, 10, 2)))
plt.ylim(ymin=-10, ymax=7)
else:
raise Exception("Wrong model type")
plt.ylabel('Return')
plt.savefig('data/' + objType + '_' + phiType + '_final.png')
plt.show()
def outSample(objType, phiType, N):
""" draw and save the figure of the mean, range, reliability for robust model with different alpha
:param objType: objective type - worst/sum
:param phiType: phi-divergence type - cre/chi/m-chi
"""
loaded = np.load('data/' + objType + '_' + phiType + '_alpha_' + str(N) + '.npz')
robustReturns = loaded['robust']
robArray = np.transpose(np.array(robustReturns))
fig, ax1 = plt.subplots(figsize=(7, 5))
alpha = [0.0001, 0.001, 0.01, 0.1]
alphaTest = data.alphaSet(alpha)
ax1.plot(alphaTest, robArray[2], color='b', label='Mean robust')
ax1.fill_between(alphaTest, robArray[0], robArray[1], color='b', alpha=0.2, label='Range robust')
ax1.set_xscale("log")
if objType == 'sum':
ax1.set_yticks(list(range(30, 181, 10)))
ax1.set_ylim(ymin=30, ymax=181)
elif objType == 'worst':
ax1.set_yticks(list(range(-13, 8, 2)))
ax1.set_ylim(ymin=-5, ymax=7)
else:
raise Exception("Wrong model type")
ax1.set_xlim(xmin=0.0001, xmax=0.4)
ax1.set_xlabel('alpha')
ax1.set_ylabel('Return')
ax1.plot(np.nan, 'r', label='Reliability')
ax2 = ax1.twinx()
ax2.plot(alphaTest, robArray[3], color='r', label='Reliability')
ax2.set_ylim(ymin=0, ymax=1.1)
ax2.set_yticks(list(np.arange(0, 1.1, 0.1)))
ax2.set_ylabel('Reliability')
ax1.legend(loc="lower right", prop={'size': 12})
plt.title("N = " + str(N))
plt.savefig('data/' + objType + '_' + phiType + '_alpha_' + str(N) + '.png')
plt.show() | [
"matplotlib.pyplot.savefig",
"data.alphaSet",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"numpy.load",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend",
"matplo... | [((305, 362), 'numpy.load', 'np.load', (["('data/' + objType + '_' + phiType + '_final.npz')"], {}), "('data/' + objType + '_' + phiType + '_final.npz')\n", (312, 362), True, 'import numpy as np\n'), ((540, 566), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 5)'}), '(figsize=(7, 5))\n', (550, 566), True, 'import matplotlib.pyplot as plt\n'), ((985, 1033), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""', 'prop': "{'size': 12}"}), "(loc='lower right', prop={'size': 12})\n", (995, 1033), True, 'import matplotlib.pyplot as plt\n'), ((1080, 1107), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {'xmin': '(0)', 'xmax': '(1000)'}), '(xmin=0, xmax=1000)\n', (1088, 1107), True, 'import matplotlib.pyplot as plt\n'), ((1112, 1127), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""N"""'], {}), "('N')\n", (1122, 1127), True, 'import matplotlib.pyplot as plt\n'), ((1403, 1423), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Return"""'], {}), "('Return')\n", (1413, 1423), True, 'import matplotlib.pyplot as plt\n'), ((1429, 1490), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('data/' + objType + '_' + phiType + '_final.png')"], {}), "('data/' + objType + '_' + phiType + '_final.png')\n", (1440, 1490), True, 'import matplotlib.pyplot as plt\n'), ((1495, 1505), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1503, 1505), True, 'import matplotlib.pyplot as plt\n'), ((1952, 1980), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(7, 5)'}), '(figsize=(7, 5))\n', (1964, 1980), True, 'import matplotlib.pyplot as plt\n'), ((2036, 2056), 'data.alphaSet', 'data.alphaSet', (['alpha'], {}), '(alpha)\n', (2049, 2056), False, 'import data\n'), ((3067, 3077), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3075, 3077), True, 'import matplotlib.pyplot as plt\n'), ((460, 483), 'numpy.array', 'np.array', (['robustReturns'], {}), '(robustReturns)\n', (468, 483), True, 'import numpy as np\n'), ((513, 533), 'numpy.array', 'np.array', (['detReturns'], {}), '(detReturns)\n', (521, 533), True, 'import numpy as np\n'), ((1208, 1236), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(100)', 'ymax': '(161)'}), '(ymin=100, ymax=161)\n', (1216, 1236), True, 'import matplotlib.pyplot as plt\n'), ((1911, 1934), 'numpy.array', 'np.array', (['robustReturns'], {}), '(robustReturns)\n', (1919, 1934), True, 'import numpy as np\n'), ((1318, 1344), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(-10)', 'ymax': '(7)'}), '(ymin=-10, ymax=7)\n', (1326, 1344), True, 'import matplotlib.pyplot as plt\n'), ((2837, 2859), 'numpy.arange', 'np.arange', (['(0)', '(1.1)', '(0.1)'], {}), '(0, 1.1, 0.1)\n', (2846, 2859), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import pprint
import fiolib.shared_chart as shared
from matplotlib import cm
# The module required for the 3D graph.
from mpl_toolkits.mplot3d import axes3d
from datetime import datetime
import matplotlib as mpl
import fiolib.supporting as supporting
def plot_3d(settings, dataset):
"""This function is responsible for plotting the entire 3D plot.
"""
if not settings['type']:
print("The type of data must be specified with -t (iops/lat).")
exit(1)
dataset_types = shared.get_dataset_types(dataset)
metric = settings['type'][0]
rw = settings['rw']
iodepth = dataset_types['iodepth']
numjobs = dataset_types['numjobs']
data = shared.get_record_set_3d(settings, dataset, dataset_types,
rw, metric)
# pprint.pprint(data)
fig = plt.figure()
ax1 = fig.add_subplot(111, projection='3d')
fig.set_size_inches(15, 10)
lx = len(dataset_types['iodepth'])
ly = len(dataset_types['numjobs'])
# Ton of code to scale latency
if metric == 'lat':
scale_factors = []
for row in data['values']:
scale_factor = supporting.get_scale_factor(row)
scale_factors.append(scale_factor)
largest_scale_factor = supporting.get_largest_scale_factor(
scale_factors)
# pprint.pprint(largest_scale_factor)
scaled_values = []
for row in data['values']:
result = supporting.scale_yaxis_latency(
row, largest_scale_factor)
scaled_values.append(result['data'])
z_axis_label = largest_scale_factor['label']
else:
scaled_values = data['values']
z_axis_label = metric
n = np.array(scaled_values, dtype=float)
size = lx * 0.05 # thickness of the bar
xpos_orig = np.arange(0, lx, 1)
ypos_orig = np.arange(0, ly, 1)
xpos = np.arange(0, lx, 1)
ypos = np.arange(0, ly, 1)
xpos, ypos = np.meshgrid(xpos-(size/lx), ypos-(size))
xpos_f = xpos.flatten() # Convert positions to 1D array
ypos_f = ypos.flatten()
zpos = np.zeros(lx*ly)
# Positioning and sizing of the bars
dx = size * np.ones_like(zpos)
dy = dx.copy()
dz = n.flatten()
values = dz / (dz.max()/1)
# Create the 3D chart with positioning and colors
cmap = plt.get_cmap('rainbow', xpos.ravel().shape[0])
colors = cm.rainbow(values)
ax1.bar3d(xpos_f, ypos_f, zpos, dx, dy, dz, color=colors)
# Create the color bar to the right
norm = mpl.colors.Normalize(vmin=0, vmax=dz.max())
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
res = fig.colorbar(sm, fraction=0.046, pad=0.04)
res.ax.set_title(z_axis_label)
# Set tics for x/y axis
float_x = [float(x) for x in (xpos_orig)]
ax1.w_xaxis.set_ticks(float_x)
ax1.w_yaxis.set_ticks(ypos_orig)
ax1.w_xaxis.set_ticklabels(iodepth)
ax1.w_yaxis.set_ticklabels(numjobs)
# axis labels
fontsize = 16
ax1.set_xlabel('iodepth', fontsize=fontsize)
ax1.set_ylabel('numjobs', fontsize=fontsize)
ax1.set_zlabel(z_axis_label, fontsize=fontsize)
[t.set_verticalalignment('center_baseline') for t in ax1.get_yticklabels()]
[t.set_verticalalignment('center_baseline') for t in ax1.get_xticklabels()]
ax1.zaxis.labelpad = 25
tick_label_font_size = 12
for t in ax1.xaxis.get_major_ticks():
t.label.set_fontsize(tick_label_font_size)
for t in ax1.yaxis.get_major_ticks():
t.label.set_fontsize(tick_label_font_size)
ax1.zaxis.set_tick_params(pad=10)
for t in ax1.zaxis.get_major_ticks():
t.label.set_fontsize(tick_label_font_size)
# title
supporting.create_title_and_sub(
settings, plt, skip_keys=['iodepth', 'numjobs'], sub_x_offset=0.57, sub_y_offset=1.05)
fig.text(0.75, 0.03, settings['source'])
plt.tight_layout()
now = datetime.now().strftime('%Y-%m-%d_%H%M%S')
plt.savefig('3d-iops-jobs' +
str(settings['rw']) + "-" + str(now) + '.png')
plt.close('all')
| [
"numpy.ones_like",
"matplotlib.pyplot.cm.ScalarMappable",
"fiolib.shared_chart.get_dataset_types",
"fiolib.supporting.get_largest_scale_factor",
"fiolib.supporting.get_scale_factor",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.zeros",
"datetime.datetime.now",
"ma... | [((575, 608), 'fiolib.shared_chart.get_dataset_types', 'shared.get_dataset_types', (['dataset'], {}), '(dataset)\n', (599, 608), True, 'import fiolib.shared_chart as shared\n'), ((755, 825), 'fiolib.shared_chart.get_record_set_3d', 'shared.get_record_set_3d', (['settings', 'dataset', 'dataset_types', 'rw', 'metric'], {}), '(settings, dataset, dataset_types, rw, metric)\n', (779, 825), True, 'import fiolib.shared_chart as shared\n'), ((899, 911), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (909, 911), True, 'import matplotlib.pyplot as plt\n'), ((1790, 1826), 'numpy.array', 'np.array', (['scaled_values'], {'dtype': 'float'}), '(scaled_values, dtype=float)\n', (1798, 1826), True, 'import numpy as np\n'), ((1889, 1908), 'numpy.arange', 'np.arange', (['(0)', 'lx', '(1)'], {}), '(0, lx, 1)\n', (1898, 1908), True, 'import numpy as np\n'), ((1925, 1944), 'numpy.arange', 'np.arange', (['(0)', 'ly', '(1)'], {}), '(0, ly, 1)\n', (1934, 1944), True, 'import numpy as np\n'), ((1957, 1976), 'numpy.arange', 'np.arange', (['(0)', 'lx', '(1)'], {}), '(0, lx, 1)\n', (1966, 1976), True, 'import numpy as np\n'), ((1988, 2007), 'numpy.arange', 'np.arange', (['(0)', 'ly', '(1)'], {}), '(0, ly, 1)\n', (1997, 2007), True, 'import numpy as np\n'), ((2025, 2067), 'numpy.meshgrid', 'np.meshgrid', (['(xpos - size / lx)', '(ypos - size)'], {}), '(xpos - size / lx, ypos - size)\n', (2036, 2067), True, 'import numpy as np\n'), ((2168, 2185), 'numpy.zeros', 'np.zeros', (['(lx * ly)'], {}), '(lx * ly)\n', (2176, 2185), True, 'import numpy as np\n'), ((2458, 2476), 'matplotlib.cm.rainbow', 'cm.rainbow', (['values'], {}), '(values)\n', (2468, 2476), False, 'from matplotlib import cm\n'), ((2644, 2687), 'matplotlib.pyplot.cm.ScalarMappable', 'plt.cm.ScalarMappable', ([], {'cmap': 'cmap', 'norm': 'norm'}), '(cmap=cmap, norm=norm)\n', (2665, 2687), True, 'import matplotlib.pyplot as plt\n'), ((3770, 3892), 'fiolib.supporting.create_title_and_sub', 'supporting.create_title_and_sub', (['settings', 'plt'], {'skip_keys': "['iodepth', 'numjobs']", 'sub_x_offset': '(0.57)', 'sub_y_offset': '(1.05)'}), "(settings, plt, skip_keys=['iodepth',\n 'numjobs'], sub_x_offset=0.57, sub_y_offset=1.05)\n", (3801, 3892), True, 'import fiolib.supporting as supporting\n'), ((3950, 3968), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3966, 3968), True, 'import matplotlib.pyplot as plt\n'), ((4122, 4138), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4131, 4138), True, 'import matplotlib.pyplot as plt\n'), ((1331, 1381), 'fiolib.supporting.get_largest_scale_factor', 'supporting.get_largest_scale_factor', (['scale_factors'], {}), '(scale_factors)\n', (1366, 1381), True, 'import fiolib.supporting as supporting\n'), ((2242, 2260), 'numpy.ones_like', 'np.ones_like', (['zpos'], {}), '(zpos)\n', (2254, 2260), True, 'import numpy as np\n'), ((1220, 1252), 'fiolib.supporting.get_scale_factor', 'supporting.get_scale_factor', (['row'], {}), '(row)\n', (1247, 1252), True, 'import fiolib.supporting as supporting\n'), ((1525, 1582), 'fiolib.supporting.scale_yaxis_latency', 'supporting.scale_yaxis_latency', (['row', 'largest_scale_factor'], {}), '(row, largest_scale_factor)\n', (1555, 1582), True, 'import fiolib.supporting as supporting\n'), ((3979, 3993), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3991, 3993), False, 'from datetime import datetime\n')] |
import copy
import numpy as np
import pytest
import tensorflow as tf
from tfsnippet.layers import as_gated
def safe_sigmoid(x):
return np.where(x < 0, np.exp(x) / (1. + np.exp(x)), 1. / (1. + np.exp(-x)))
class AsGatedHelper(object):
def __init__(self, main_ret, gate_ret):
self.main_args = None
self.gate_args = None
self.main_ret = main_ret
self.gate_ret = gate_ret
def __call__(self, *args, **kwargs):
scope = kwargs['scope']
if scope == 'main':
assert(self.main_args is None)
self.main_args = (args, copy.copy(kwargs))
return self.main_ret
elif scope == 'gate':
assert(self.gate_args is None)
self.gate_args = (args, copy.copy(kwargs))
return self.gate_ret
else:
raise RuntimeError()
class TestAsGated(tf.test.TestCase):
def test_as_gated(self):
main_ret = np.random.normal(size=[2, 3, 4]).astype(np.float32)
gate_ret = np.random.normal(size=[2, 3, 4]).astype(np.float32)
activation_fn = object()
# default_name infer failed
with pytest.raises(ValueError,
match='`default_name` cannot be inferred'):
g = as_gated(AsGatedHelper(main_ret, gate_ret))
with self.test_session() as sess:
# test infer default name
f = AsGatedHelper(main_ret, gate_ret)
f.__name__ = 'f'
g = as_gated(f)
g_ret = g(1, xyz=2, activation_fn=activation_fn)
np.testing.assert_allclose(
sess.run(g_ret), main_ret * safe_sigmoid(gate_ret + 2.))
self.assertTrue(g_ret.name, 'gated_f/')
self.assertEqual(
f.main_args,
(
(1,),
{'xyz': 2, 'activation_fn': activation_fn, 'scope': 'main'}
)
)
self.assertEqual(
f.gate_args,
(
(1,),
{'xyz': 2, 'scope': 'gate'}
)
)
# test specify default name
f = AsGatedHelper(main_ret, gate_ret)
g = as_gated(f, sigmoid_bias=1., default_name='ff')
g_ret = g(1, xyz=2, activation_fn=activation_fn)
np.testing.assert_allclose(
sess.run(g_ret), main_ret * safe_sigmoid(gate_ret + 1.))
self.assertTrue(g_ret.name, 'gated_ff/')
self.assertEqual(
f.main_args,
(
(1,),
{'xyz': 2, 'activation_fn': activation_fn, 'scope': 'main'}
)
)
self.assertEqual(
f.gate_args,
(
(1,),
{'xyz': 2, 'scope': 'gate'}
)
)
# test using `name`
f = AsGatedHelper(main_ret, gate_ret)
g = as_gated(f, default_name='f')
g_ret = g(1, xyz=2, activation_fn=activation_fn, name='name')
np.testing.assert_allclose(
sess.run(g_ret), main_ret * safe_sigmoid(gate_ret + 2.))
self.assertTrue(g_ret.name, 'name/')
# test using `scope`
f = AsGatedHelper(main_ret, gate_ret)
g = as_gated(f, default_name='f')
g_ret = g(1, xyz=2, activation_fn=activation_fn, scope='scope')
np.testing.assert_allclose(
sess.run(g_ret), main_ret * safe_sigmoid(gate_ret + 2.))
self.assertTrue(g_ret.name, 'scope/')
| [
"numpy.random.normal",
"tfsnippet.layers.as_gated",
"numpy.exp",
"pytest.raises",
"copy.copy"
] | [((159, 168), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (165, 168), True, 'import numpy as np\n'), ((1149, 1217), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""`default_name` cannot be inferred"""'}), "(ValueError, match='`default_name` cannot be inferred')\n", (1162, 1217), False, 'import pytest\n'), ((1482, 1493), 'tfsnippet.layers.as_gated', 'as_gated', (['f'], {}), '(f)\n', (1490, 1493), False, 'from tfsnippet.layers import as_gated\n'), ((2226, 2274), 'tfsnippet.layers.as_gated', 'as_gated', (['f'], {'sigmoid_bias': '(1.0)', 'default_name': '"""ff"""'}), "(f, sigmoid_bias=1.0, default_name='ff')\n", (2234, 2274), False, 'from tfsnippet.layers import as_gated\n'), ((2999, 3028), 'tfsnippet.layers.as_gated', 'as_gated', (['f'], {'default_name': '"""f"""'}), "(f, default_name='f')\n", (3007, 3028), False, 'from tfsnippet.layers import as_gated\n'), ((3365, 3394), 'tfsnippet.layers.as_gated', 'as_gated', (['f'], {'default_name': '"""f"""'}), "(f, default_name='f')\n", (3373, 3394), False, 'from tfsnippet.layers import as_gated\n'), ((177, 186), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (183, 186), True, 'import numpy as np\n'), ((200, 210), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (206, 210), True, 'import numpy as np\n'), ((595, 612), 'copy.copy', 'copy.copy', (['kwargs'], {}), '(kwargs)\n', (604, 612), False, 'import copy\n'), ((943, 975), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[2, 3, 4]'}), '(size=[2, 3, 4])\n', (959, 975), True, 'import numpy as np\n'), ((1014, 1046), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[2, 3, 4]'}), '(size=[2, 3, 4])\n', (1030, 1046), True, 'import numpy as np\n'), ((756, 773), 'copy.copy', 'copy.copy', (['kwargs'], {}), '(kwargs)\n', (765, 773), False, 'import copy\n')] |
"""
GenBank format (:mod:`skbio.io.format.genbank`)
===============================================
.. currentmodule:: skbio.io.format.genbank
GenBank format (GenBank Flat File Format) stores sequence and its annotation
together. The start of the annotation section is marked by a line beginning
with the word "LOCUS". The start of sequence section is marked by a line
beginning with the word "ORIGIN" and the end of the section is marked by a line
with only "//".
The GenBank file usually ends with .gb or sometimes .gbk. The GenBank format
for protein has been renamed to GenPept. The GenBank (for nucleotide) and
Genpept are essentially the same format.
An example of a GenBank file can be see here:
<http://www.ncbi.nlm.nih.gov/Sitemap/samplerecord.html>
Format Support
--------------
**Has Sniffer: Yes**
+------+------+---------------------------------------------------------------+
|Reader|Writer| Object Class |
+======+======+===============================================================+
|Yes |Yes |:mod:`skbio.sequence.Sequence` |
+------+------+---------------------------------------------------------------+
|Yes |Yes |:mod:`skbio.sequence.DNA` |
+------+------+---------------------------------------------------------------+
|Yes |Yes |:mod:`skbio.sequence.RNA` |
+------+------+---------------------------------------------------------------+
|Yes |Yes |:mod:`skbio.sequence.Protein` |
+------+------+---------------------------------------------------------------+
|Yes | Yes | generator of :mod:`skbio.sequence.Sequence` objects |
+------+------+---------------------------------------------------------------+
Format Specification
--------------------
**State: Experimental as of 0.4.0-dev.**
The International Nucleotide Sequence Database Collaboration (INSDC)
foundational initiative between the DDBJ, EMBL, and GenBank
(http://www.insdc.org/). These organisations all use the
same "Feature Table" layout in their plain text flat file formats.
However, the header and sequence sections of an EMBL file are very
different in layout to those produced by GenBank/DDBJ.
Feature Table Documentation:
http://www.insdc.org/files/feature_table.html
ftp://ftp.ncbi.nih.gov/genbank/docs/FTv10_3.html
The sequence in the ``'ORIGIN'`` section is always in lowercase for the
GenBank files downloaded from NCBI. For the RNA molecules, ``'t'`` (thymine),
instead of ``'u'`` (uracil) is used in the sequence. All GenBank writers
follow these conventions while writing GenBank files.
All the sections before ``'FEATURES'`` will be read into ``metadata`` of
``Sequence`` or its sub-class. The header and its content of a section
is stored as a pair of key and value in ``metadata``. For the ``'REFERENCE'``
section, its value is stored as a list, as there are often multiple
reference sections in one GenBank record.
The information of the ``'FEATURES'`` is stored in both ``metadata`` and
``positional_metadata`` of ``Sequence`` or its sub-class. For each feature,
its location is stored as boolean column in ``positional_metadata``; other
qualifiers are stored as a ``dict`` in the ``list`` of
``metadata['FEATURES']``. In the ``dict`` of qualifiers, there are a few
extra keys, which end with ``'_'``, including:
1. ``'index_'``: the column index to the ``positional_metadata``,
where the location of the current feature is stored.
2. ``'left_partial_'``: whether the exact lower boundary point of the
feature is unknown.
3. ``'right_partial_'``: whether the exact upper boundary point of the
feature is unknown.
4. ``'type_'``: the molecular type of the feature. Its value is from the
header of the feature.
Format Parameters
-----------------
Reader-specific Parameters
^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``constructor`` parameter can be used with the ``Sequence`` generator
to specify the in-memory type of each GenBank record that is parsed.
``constructor`` should be ``Sequence`` or a sub-class of ``Sequence``.
It is also detected by the unit label on the LOCUS line. For example, if it
is ``'bp'``, it will be read into ``DNA``; if it is ``'aa'``, it will be read
into ``Protein``. Otherwise, it will be read into ``Sequence``. This default
behavior is overridden by setting ``constructor``.
``lowercase`` is another parameter available for all GenBank readers.
By default, it is set to ``True`` to read in the ``'ORIGIN'`` sequence
as lowercase letters. This parameter is passed to ``Sequence`` or
its sub-class constructor.
``seq_num`` is a parameter used with the ``Sequence``, ``DNA``, ``RNA``, and
``Protein`` GenBank readers. It specifies which GenBank record to read from
a GenBank file with multiple records in it.
Examples
--------
Reading and Writing GenBank Files
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Suppose we have the following GenBank file [example modified from [1_]::
LOCUS 3K1V_A 34 bp RNA linear SYN 10-OCT-2012
DEFINITION Chain A, Structure Of A Mutant Class-I Preq1.
ACCESSION 3K1V_A
VERSION 3K1V_A GI:260656459
KEYWORDS .
SOURCE synthetic construct
ORGANISM synthetic construct
other sequences; artificial sequences.
REFERENCE 1 (bases 1 to 34)
AUTHORS Klein,D.J., Edwards,T.E. and Ferre-D'Amare,A.R.
TITLE Cocrystal structure of a class I preQ1 riboswitch
JOURNAL Nat. Struct. Mol. Biol. 16 (3), 343-344 (2009)
PUBMED 19234468
COMMENT SEQRES.
FEATURES Location/Qualifiers
source 1..34
/organism="synthetic construct"
/mol_type="other RNA"
/db_xref="taxon:32630"
ORIGIN
1 agaggttcta gcacatccct ctataaaaaa ctaa
//
>>> gb = ['LOCUS 3K1V_A 34 bp RNA linear SYN 10-OCT-2012\\n',
... 'DEFINITION Chain A, Structure Of A Mutant Class-I Preq1.\\n',
... 'ACCESSION 3K1V_A\\n',
... 'VERSION 3K1V_A GI:260656459\\n',
... 'KEYWORDS .\\n',
... 'SOURCE synthetic construct\\n',
... ' ORGANISM synthetic construct\\n',
... ' other sequences; artificial sequences.\\n',
... 'REFERENCE 1 (bases 1 to 34)\\n',
... " AUTHORS Klein,D.J., Edwards,T.E. and Ferre-D'Amare,A.R.\\n",
... ' TITLE Cocrystal structure of a class I preQ1 riboswitch\\n',
... ' JOURNAL Nat. Struct. Mol. Biol. 16 (3), 343-344 (2009)\\n',
... ' PUBMED 19234468\\n',
... 'COMMENT SEQRES.\\n',
... 'FEATURES Location/Qualifiers\\n',
... ' source 1..34\\n',
... ' /organism="synthetic construct"\\n',
... ' /mol_type="other RNA"\\n',
... ' /db_xref="taxon:32630"\\n',
... 'ORIGIN\\n',
... ' 1 agaggttcta gcacatccct ctataaaaaa ctaa\\n',
... '//\\n']
Now we can read it as ``DNA`` object:
>>> from skbio import DNA, RNA, Sequence
>>> dna_seq = DNA.read(gb)
>>> dna_seq
DNA
-----------------------------------------------------------------
Metadata:
'ACCESSION': '3K1V_A'
'COMMENT': 'SEQRES.'
'DEFINITION': 'Chain A, Structure Of A Mutant Class-I Preq1.'
'FEATURES': <class 'list'>
'KEYWORDS': '.'
'LOCUS': <class 'dict'>
'REFERENCE': <class 'list'>
'SOURCE': <class 'dict'>
'VERSION': '3K1V_A GI:260656459'
Positional metadata:
0: <dtype: bool>
Stats:
length: 34
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 35.29%
-----------------------------------------------------------------
0 AGAGGTTCTA GCACATCCCT CTATAAAAAA CTAA
Since this is a riboswitch molecule, we may want to read it as ``RNA``.
As the GenBank file usually have ``'t'`` instead of ``'u'`` in the
sequence, we can read it as ``RNA`` by converting ``'t'`` to ``'u'``:
>>> rna_seq = RNA.read(gb)
>>> rna_seq
RNA
-----------------------------------------------------------------
Metadata:
'ACCESSION': '3K1V_A'
'COMMENT': 'SEQRES.'
'DEFINITION': 'Chain A, Structure Of A Mutant Class-I Preq1.'
'FEATURES': <class 'list'>
'KEYWORDS': '.'
'LOCUS': <class 'dict'>
'REFERENCE': <class 'list'>
'SOURCE': <class 'dict'>
'VERSION': '3K1V_A GI:260656459'
Positional metadata:
0: <dtype: bool>
Stats:
length: 34
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 35.29%
-----------------------------------------------------------------
0 AGAGGUUCUA GCACAUCCCU CUAUAAAAAA CUAA
>>> rna_seq == dna_seq.transcribe()
True
>>> from io import StringIO
>>> with StringIO() as fh:
... print(dna_seq.write(fh, format='genbank').getvalue())
LOCUS 3K1V_A 34 bp RNA linear SYN 10-OCT-2012
DEFINITION Chain A, Structure Of A Mutant Class-I Preq1.
ACCESSION 3K1V_A
VERSION 3K1V_A GI:260656459
KEYWORDS .
SOURCE synthetic construct
ORGANISM synthetic construct
other sequences; artificial sequences.
REFERENCE 1 (bases 1 to 34)
AUTHORS Klein,D.J., Edwards,T.E. and Ferre-D'Amare,A.R.
TITLE Cocrystal structure of a class I preQ1 riboswitch
JOURNAL Nat. Struct. Mol. Biol. 16 (3), 343-344 (2009)
PUBMED 19234468
COMMENT SEQRES.
FEATURES Location/Qualifiers
source 1..34
/db_xref="taxon:32630"
/mol_type="other RNA"
/organism="synthetic construct"
ORIGIN
1 agaggttcta gcacatccct ctataaaaaa ctaa
//
<BLANKLINE>
References
----------
.. [1_] http://www.ncbi.nlm.nih.gov/nuccore/3K1V_A
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import range, zip
import re
import numpy as np
import pandas as pd
from datetime import datetime
from functools import partial
from skbio.io import create_format, GenBankFormatError
from skbio.io.format._base import (
_get_nth_sequence, _line_generator, _too_many_blanks)
from skbio.util._misc import chunk_str
from skbio.sequence import Sequence, DNA, RNA, Protein
genbank = create_format('genbank')
# date format in locus line of genbank record
_TIME_FORMAT = '%d-%b-%Y'
# This list is ordered
# used to read and write genbank file.
_HEADERS = ['LOCUS',
'DEFINITION',
'ACCESSION',
'VERSION',
'DBSOURCE',
'DBLINK',
'KEYWORDS',
'SOURCE',
'REFERENCE',
'COMMENT',
'FEATURES',
'ORIGIN']
@genbank.sniffer()
def _genbank_sniffer(fh):
# check the 1st real line is a valid LOCUS line
if _too_many_blanks(fh, 5):
return False, {}
try:
line = next(_line_generator(fh, skip_blanks=True, strip=False))
except StopIteration:
return False, {}
try:
_parse_locus([line])
except GenBankFormatError:
return False, {}
return True, {}
@genbank.reader(None)
def _genbank_to_generator(fh, constructor=None, **kwargs):
for record in _parse_genbanks(fh):
yield _construct(record, constructor, **kwargs)
@genbank.reader(Sequence)
def _genbank_to_sequence(fh, seq_num=1, **kwargs):
record = _get_nth_sequence(_parse_genbanks(fh), seq_num)
return _construct(record, Sequence, **kwargs)
@genbank.reader(DNA)
def _genbank_to_dna(fh, seq_num=1, **kwargs):
record = _get_nth_sequence(_parse_genbanks(fh), seq_num)
return _construct(record, DNA, **kwargs)
@genbank.reader(RNA)
def _genbank_to_rna(fh, seq_num=1, **kwargs):
record = _get_nth_sequence(_parse_genbanks(fh), seq_num)
return _construct(record, RNA, **kwargs)
@genbank.reader(Protein)
def _genbank_to_protein(fh, seq_num=1, **kwargs):
record = _get_nth_sequence(_parse_genbanks(fh), seq_num)
return _construct(record, Protein, **kwargs)
@genbank.writer(None)
def _generator_to_genbank(obj, fh):
for obj_i in obj:
_serialize_single_genbank(obj_i, fh)
@genbank.writer(Sequence)
def _sequence_to_genbank(obj, fh):
_serialize_single_genbank(obj, fh)
@genbank.writer(DNA)
def _dna_to_genbank(obj, fh):
_serialize_single_genbank(obj, fh)
@genbank.writer(RNA)
def _rna_to_genbank(obj, fh):
_serialize_single_genbank(obj, fh)
@genbank.writer(Protein)
def _protein_to_genbank(obj, fh):
_serialize_single_genbank(obj, fh)
def _construct(record, constructor=None, **kwargs):
'''Construct the object of Sequence, DNA, RNA, or Protein.
'''
seq, md, pmd = record
if 'lowercase' not in kwargs:
kwargs['lowercase'] = True
if constructor is None:
unit = md['LOCUS']['unit']
if unit == 'bp':
# RNA mol type has T instead of U for genbank from from NCBI
constructor = DNA
elif unit == 'aa':
constructor = Protein
if constructor == RNA:
return DNA(
seq, metadata=md, positional_metadata=pmd, **kwargs).transcribe()
else:
return constructor(
seq, metadata=md, positional_metadata=pmd, **kwargs)
def _parse_genbanks(fh):
data_chunks = []
for line in _line_generator(fh, skip_blanks=True, strip=False):
if line.startswith('//'):
yield _parse_single_genbank(data_chunks)
data_chunks = []
else:
data_chunks.append(line)
def _parse_single_genbank(chunks):
metadata = {}
positional_metadata = None
sequence = ''
# each section starts with a HEADER without indent.
section_splitter = _yield_section(
lambda x: not x[0].isspace(), strip=False)
for section in section_splitter(chunks):
header = section[0].split(None, 1)[0]
parser = _PARSER_TABLE.get(
header, _parse_section_default)
if header == 'FEATURES':
# This requires 'LOCUS' line parsed before 'FEATURES', which should
# be true and is implicitly checked by the sniffer.
parser = partial(
parser, length=metadata['LOCUS']['size'])
parsed = parser(section)
# reference can appear multiple times
if header == 'REFERENCE':
if header in metadata:
metadata[header].append(parsed)
else:
metadata[header] = [parsed]
elif header == 'ORIGIN':
sequence = parsed
elif header == 'FEATURES':
metadata[header] = parsed[0]
positional_metadata = pd.concat(parsed[1], axis=1)
else:
metadata[header] = parsed
return sequence, metadata, positional_metadata
def _serialize_single_genbank(obj, fh):
'''Write a GenBank record.
Always write it in NCBI canonical way:
1. sequence in lowercase
2. 'u' as 't' even in RNA molecules.
'''
md = obj.metadata
for header in _HEADERS:
if header in md:
serializer = _SERIALIZER_TABLE.get(
header, _serialize_section_default)
out = serializer(header, md[header])
# test if 'out' is a iterator.
# cf. Effective Python Item 17
if iter(out) is iter(out):
for s in out:
fh.write(s)
else:
fh.write(out)
# always write RNA seq as DNA
if isinstance(obj, RNA):
obj = obj.reverse_transcribe()
# always write in lowercase
seq_str = str(obj).lower()
for s in _serialize_origin(seq_str):
fh.write(s)
fh.write('//\n')
def _parse_locus(lines):
'''Parse the line LOCUS.
Format:
# Positions Contents
# --------- --------
# 00:06 LOCUS
# 06:12 spaces
# 12:?? Locus name
# ??:?? space
# ??:29 Length of sequence, right-justified
# 29:33 space, bp/aa/rc, space
# 33:41 molecule type (can be blank): DNA, ssDNA, dsRNA, tRNA, etc.
# 41:42 space
# 42:51 Blank (implies linear), linear or circular
# 51:52 space
# 52:55 The division code (e.g. BCT, VRL, INV)
# 55:62 space
# 62:73 Date, in the form dd-MMM-yyyy (e.g., 15-MAR-1991)
'''
line = lines[0]
pattern = (r'LOCUS'
' +([^\s]+)'
' +([0-9]+)'
' +(bp|aa|rc)'
' +(.*DNA|.*RNA)?'
' +(linear|circular)?'
' +(PRI|ROD|MAM|VRT|INV|PLN|BCT|VRL|PHG|'
'SYN|UNA|EST|PAT|STS|GSS|HTG|HTC|ENV|CON)'
' +([0-9]{2}-[A-Z]{3}-[0-9]{4})')
matches = re.match(pattern, line)
try:
res = dict(zip(
['locus_name', 'size', 'unit', 'mol_type',
'shape', 'division', 'date'],
matches.groups()))
except:
raise GenBankFormatError(
"Could not parse the LOCUS line:\n%s" % line)
res['size'] = int(res['size'])
res['date'] = datetime.strptime(res['date'], _TIME_FORMAT)
return res
def _serialize_locus(header, obj, indent=12):
'''Serilize LOCUS line.
Parameters
----------
obj : dict
'''
# use 'or' to convert None to ''
kwargs = {k: v or '' for k, v in obj.items()}
# convert datetime to str
kwargs['date'] = kwargs['date'].strftime(_TIME_FORMAT).upper()
return ('{header:<{indent}}{locus_name} {size} {unit}'
' {mol_type} {shape} {division} {date}\n').format(
header=header, indent=indent, **kwargs)
def _parse_reference(lines):
'''Parse single REFERENCE field.
'''
res = {}
# magic number 11: the non keyworded lines in REFERENCE
# are at least indented with 11 spaces.
feature_indent = ' ' * 11
section_splitter = _yield_section(
lambda x: not x.startswith(feature_indent),
skip_blanks=True, strip=False)
for section in section_splitter(lines):
label, data = _parse_section_default(
section, join_delimitor=' ', return_label=True)
res[label] = data
return res
def _serialize_reference(header, obj, indent=12):
'''Serialize REFERENCE.
Parameters
----------
obj : list
'''
padding = ' '
sort_order = {'REFERENCE': 0, 'AUTHORS': 1,
'TITLE': 2, 'JOURNAL': 3, 'PUBMED': 4}
for obj_i in obj:
ref_i = []
for h in sorted(obj_i, key=lambda k: sort_order.get(k, 100)):
if h == header:
s = '{h:<{indent}}{ref}'.format(
h=h, indent=indent, ref=obj_i[h])
else:
s = '{h:<{indent}}{value}'.format(
h=padding + h, indent=indent, value=obj_i[h])
ref_i.append(s)
yield '%s\n' % '\n'.join(ref_i)
def _parse_source(lines):
'''Parse SOURCE field.
'''
res = {}
# magic number 11: the non keyworded lines in SOURCE
# are at least indented with 11 spaces.
feature_indent = ' ' * 11
section_splitter = _yield_section(
lambda x: not x.startswith(feature_indent),
skip_blanks=True, strip=False)
# SOURCE line is not informative; skip it
_, organism = list(section_splitter(lines))
res['ORGANISM'] = organism[0].split(None, 1)[1].strip()
res['taxonomy'] = ' '.join([i.strip() for i in organism[1:]])
return res
def _serialize_source(header, obj, indent=12):
'''Serialize SOURCE.
Parameters
----------
obj : dict
'''
s = ('{header:<{indent}}{organism}\n'
'{h:<{indent}}{organism}\n'
'{space}{taxonomy}\n').format(
header=header, indent=indent,
h=' ORGANISM', organism=obj['ORGANISM'],
space=' ' * 12, taxonomy=obj['taxonomy'])
return s
def _parse_features(lines, length):
'''Parse FEATURES field.
'''
features = []
positional_metadata = []
# skip the 1st FEATURES line
if lines[0].startswith('FEATURES'):
lines = lines[1:]
# magic number 20: the lines following header of each feature
# are at least indented with 20 spaces.
feature_indent = ' ' * 20
section_splitter = _yield_section(
lambda x: not x.startswith(feature_indent),
skip_blanks=True, strip=False)
for i, section in enumerate(section_splitter(lines)):
# print(i) ; continue
feature, pmd = _parse_single_feature(section, length, i)
features.append(feature)
positional_metadata.append(pmd)
return features, positional_metadata
def _serialize_features(header, obj, indent=21):
first = True
for feature in obj:
if first:
first = False
yield '{header:<{indent}}Location/Qualifiers\n{feat}'.format(
header=header, indent=indent,
feat=_serialize_single_feature(feature, indent))
else:
yield _serialize_single_feature(feature, indent)
def _parse_single_feature(lines, length, index):
'''Parse a feature.
Returns
-------
tuple
Tuple of a dict of `metadata` and a pandas.Series of
`positional_metadata` for the feature.
'''
feature = {}
feature['index_'] = index
# each component of a feature starts with '/', except the 1st
# component of location.
section_splitter = _yield_section(
lambda x: x.startswith('/'), strip=True)
first = True
for section in section_splitter(lines):
if first:
# first section is the Location string
first = False
type, location = _parse_section_default(
section, join_delimitor='', return_label=True)
feature['type_'] = type
feature['location'] = location
loc, loc_pmd = _parse_loc_str(location, length)
feature.update(loc)
else:
# following sections are Qualifiers
k, v = _parse_section_default(
section, label_delimitor='=',
join_delimitor=' ', return_label=True)
k = k[1:]
# some Qualifiers can appear multiple times
if k in feature:
if not isinstance(feature[k], list):
feature[k] = [feature[k]]
feature[k].append(v)
else:
feature[k] = v
return feature, loc_pmd
def _serialize_single_feature(obj, indent=21):
padding = ' ' * 8
qualifiers = []
for k in sorted(obj):
if k.endswith('_') or k in ('location', 'type'):
continue
v = obj[k]
if isinstance(v, list):
for vi in v:
qualifiers.append(_serialize_qualifier(k, vi))
else:
qualifiers.append(_serialize_qualifier(k, v))
qualifiers = [' ' * indent + i for i in qualifiers]
return '{header:>{indent}}{loc}\n{qualifiers}\n'.format(
header=obj['type_'] + padding, loc=obj['location'],
indent=indent, qualifiers='\n'.join(qualifiers))
def _serialize_qualifier(key, value):
'''Serialize a Qualifier in a feature.
Parameters
----------
value : int, str
'''
# if value is empty
if not value:
return '/%s' % key
return '/{k}={v}'.format(k=key, v=value)
def _parse_loc_str(loc_str, length):
'''Parse location string.
Warning: This converts coordinates to 0-based from 1-based as
in GenBank format.
The location descriptor can be one of the following:
(a) a single base number. e.g. 467
(b) a site between two indicated adjoining bases. e.g. 123^124
(c) a single base chosen from within a specified range of bases (not
allowed for new entries). e.g. 102.110
(d) the base numbers delimiting a sequence span. e.g.340..565
(e) a remote entry identifier followed by a local location
descriptor (i.e., a-d). e.g. J00194.1:100..202
TODO:
handle (b), (c), (e) cases correctly
'''
pmd = np.zeros(length, dtype=bool)
res = {'rc_': False,
'left_partial_': False,
'right_partial_': False}
items = re.split('[(),]+', loc_str)
operators = ['join', 'complement', 'order']
if 'complement' in items:
res['rc_'] = True
for i in items:
i = i.strip()
if i in operators or not i:
continue
elif ':' in i: # (e)
index = []
elif '..' in i: # (d)
beg, end = i.split('..')
if beg.startswith('<'):
beg = beg[1:]
res['left_partial_'] = True
if end.startswith('>'):
end = end[1:]
res['right_partial_'] = True
beg = int(beg)
end = int(end)
index = range(beg-1, end)
elif '.' in i: # (c)
index = []
elif i.isdigit(): # (a)
index = int(i) - 1
elif '^' in i: # (b)
index = []
else:
raise GenBankFormatError(
'Could not parse location string: "%s"' %
loc_str)
pmd[index] = True
return res, pd.Series(pmd)
def _parse_origin(lines):
'''Parse the ORIGIN section for sequence.
'''
sequence = []
for line in lines:
if line.startswith('ORIGIN'):
continue
# remove the number at the beg of each line
items = line.split()
sequence.append(''.join(items[1:]))
return ''.join(sequence)
def _serialize_origin(seq, indent=9):
'''Serialize seq to ORIGIN.
Parameters
----------
seq : str
'''
n = 1
line_size = 60
frag_size = 10
for i in range(0, len(seq), line_size):
line = seq[i:i+line_size]
s = '{n:>{indent}} {s}\n'.format(
n=n, indent=indent, s=chunk_str(line, frag_size, ' '))
if n == 1:
s = 'ORIGIN\n' + s
n = n + line_size
yield s
def _parse_section_default(
lines, label_delimitor=None, join_delimitor=' ', return_label=False):
'''Parse sections in default way.
Do 2 things:
1. split first line with label_delimitor for label
2. join all the lines into one str with join_delimitor.
'''
data = []
first = True
label = None
for line in lines:
if first:
items = line.split(label_delimitor, 1)
if len(items) == 2:
label, section = items
else:
label = items[0]
section = ""
data.append(section)
first = False
else:
data.append(line)
data = join_delimitor.join(i.strip() for i in data)
if return_label:
return label, data
else:
return data
def _serialize_section_default(header, obj, indent=12):
return '{header:<{indent}}{obj}\n'.format(
header=header, obj=obj, indent=indent)
def _yield_section(is_another_section, **kwargs):
'''Returns function that returns successive sections from file.
Parameters
----------
is_another_section : callable
It takes a string as input and return a boolean indicating
a new section starts.
kwargs : dict, optional
Keyword arguments will be passed to `_line_generator`.
Returns
-------
function
A function accept a list of lines as input and return
a generator to yield section one by one.
'''
def parser(lines):
curr = []
for line in _line_generator(lines, **kwargs):
# if we find another, return the previous section
if is_another_section(line):
if curr:
yield curr
curr = []
curr.append(line)
# don't forget to return the last section in the file
if curr:
yield curr
return parser
_PARSER_TABLE = {
'LOCUS': _parse_locus,
'SOURCE': _parse_source,
'REFERENCE': _parse_reference,
'FEATURES': _parse_features,
'ORIGIN': _parse_origin}
_SERIALIZER_TABLE = {
'LOCUS': _serialize_locus,
'SOURCE': _serialize_source,
'REFERENCE': _serialize_reference,
'FEATURES': _serialize_features}
| [
"pandas.Series",
"re.split",
"skbio.io.create_format",
"skbio.io.GenBankFormatError",
"datetime.datetime.strptime",
"skbio.io.format._base._too_many_blanks",
"re.match",
"skbio.util._misc.chunk_str",
"skbio.io.format._base._line_generator",
"numpy.zeros",
"functools.partial",
"pandas.concat",
... | [((10740, 10764), 'skbio.io.create_format', 'create_format', (['"""genbank"""'], {}), "('genbank')\n", (10753, 10764), False, 'from skbio.io import create_format, GenBankFormatError\n'), ((11287, 11310), 'skbio.io.format._base._too_many_blanks', '_too_many_blanks', (['fh', '(5)'], {}), '(fh, 5)\n', (11303, 11310), False, 'from skbio.io.format._base import _get_nth_sequence, _line_generator, _too_many_blanks\n'), ((13767, 13817), 'skbio.io.format._base._line_generator', '_line_generator', (['fh'], {'skip_blanks': '(True)', 'strip': '(False)'}), '(fh, skip_blanks=True, strip=False)\n', (13782, 13817), False, 'from skbio.io.format._base import _get_nth_sequence, _line_generator, _too_many_blanks\n'), ((17215, 17238), 're.match', 're.match', (['pattern', 'line'], {}), '(pattern, line)\n', (17223, 17238), False, 'import re\n'), ((17560, 17604), 'datetime.datetime.strptime', 'datetime.strptime', (["res['date']", '_TIME_FORMAT'], {}), "(res['date'], _TIME_FORMAT)\n", (17577, 17604), False, 'from datetime import datetime\n'), ((24533, 24561), 'numpy.zeros', 'np.zeros', (['length'], {'dtype': 'bool'}), '(length, dtype=bool)\n', (24541, 24561), True, 'import numpy as np\n'), ((24670, 24697), 're.split', 're.split', (['"""[(),]+"""', 'loc_str'], {}), "('[(),]+', loc_str)\n", (24678, 24697), False, 'import re\n'), ((25683, 25697), 'pandas.Series', 'pd.Series', (['pmd'], {}), '(pmd)\n', (25692, 25697), True, 'import pandas as pd\n'), ((28053, 28085), 'skbio.io.format._base._line_generator', '_line_generator', (['lines'], {}), '(lines, **kwargs)\n', (28068, 28085), False, 'from skbio.io.format._base import _get_nth_sequence, _line_generator, _too_many_blanks\n'), ((11366, 11416), 'skbio.io.format._base._line_generator', '_line_generator', (['fh'], {'skip_blanks': '(True)', 'strip': '(False)'}), '(fh, skip_blanks=True, strip=False)\n', (11381, 11416), False, 'from skbio.io.format._base import _get_nth_sequence, _line_generator, _too_many_blanks\n'), ((14606, 14655), 'functools.partial', 'partial', (['parser'], {'length': "metadata['LOCUS']['size']"}), "(parser, length=metadata['LOCUS']['size'])\n", (14613, 14655), False, 'from functools import partial\n'), ((17428, 17495), 'skbio.io.GenBankFormatError', 'GenBankFormatError', (['("""Could not parse the LOCUS line:\n%s""" % line)'], {}), '("""Could not parse the LOCUS line:\n%s""" % line)\n', (17446, 17495), False, 'from skbio.io import create_format, GenBankFormatError\n'), ((13517, 13573), 'skbio.sequence.DNA', 'DNA', (['seq'], {'metadata': 'md', 'positional_metadata': 'pmd'}), '(seq, metadata=md, positional_metadata=pmd, **kwargs)\n', (13520, 13573), False, 'from skbio.sequence import Sequence, DNA, RNA, Protein\n'), ((26361, 26392), 'skbio.util._misc.chunk_str', 'chunk_str', (['line', 'frag_size', '""" """'], {}), "(line, frag_size, ' ')\n", (26370, 26392), False, 'from skbio.util._misc import chunk_str\n'), ((15106, 15134), 'pandas.concat', 'pd.concat', (['parsed[1]'], {'axis': '(1)'}), '(parsed[1], axis=1)\n', (15115, 15134), True, 'import pandas as pd\n'), ((25317, 25336), 'future.builtins.range', 'range', (['(beg - 1)', 'end'], {}), '(beg - 1, end)\n', (25322, 25336), False, 'from future.builtins import range, zip\n'), ((25537, 25606), 'skbio.io.GenBankFormatError', 'GenBankFormatError', (['(\'Could not parse location string: "%s"\' % loc_str)'], {}), '(\'Could not parse location string: "%s"\' % loc_str)\n', (25555, 25606), False, 'from skbio.io import create_format, GenBankFormatError\n')] |
#!/usr/bin/env python
import sys
import serial
import time
import os.path
import cv2 as cv
import numpy as np
# load image
def load(file):
img = cv.imread(file, 1)
return cv.GaussianBlur(img, (11, 11), 5)
# black image at the size of im
def black(im):
return np.zeros(im.shape[0:2], np.uint8)
# taken from my earlier project:
# https://github.com/Tinggaard/set-solver/blob/alpha/cards.py#L35
def crop(cnt, *images):
"""Crop images into smaller bits, making it easier on the CPU usage"""
assert images is not None
#Checking that all are in fact images
if not all(isinstance(item, np.ndarray) for item in images):
raise TypeError('"*images" must be nust be of type: "numpy.ndarray"')
#Returning the cropped images
reference_y, reference_x = images[0].shape[:2]
x,y,w,h = cv.boundingRect(cnt)
#Ading offset of 2 pixels, so the contours still work properly
#Making if else statements, to prevent errors in indexing
y1 = y-2 if 2 < y else 0
x1 = x-2 if 2 < x else 0
y2 = y+h+2 if y+h+2 < reference_y else None
x2 = x+w+2 if x+w+2 < reference_x else None
return [img[y1:y2, x1:x2] for img in images]
# Convert bgr img to hsv
def to_hsv(img):
new = img.copy() # maybe not needed?
hsv = cv.cvtColor(new, cv.COLOR_BGR2HSV)
hue = hsv[:,:,0]
sat = hsv[:,:,1]
val = hsv[:,:,2]
return hsv, hue, sat, val
# save img to destination
def save(dst, img):
dir = os.path.join('../out/', dst + '.jpg')
cv.imwrite(dir, img)
# return a mask in the given range over the given image
def in_range(img, lower, upper):
mask = cv.inRange(img, lower, upper)
return mask
# black square size k*k
def kernel(k):
# return np.ones(k, k)
return cv.getStructuringElement(cv.MORPH_ELLIPSE,(k,k))
def apply(mask, img):
return cv.bitwise_and(img, img, mask=mask)
def inv(mask):
return cv.bitwise_not(mask)
def iterate_contours(contours, hsv):
blk = black(hsv)
# [(x,y) - center, color]
pieces = []
for no, contour in enumerate(contours):
# for no, contour in enumerate(contours[7:29], start=7): #7..28 singles
# for no, contour in enumerate(contours[12:13], start=12): #7..28 singles
# mask image
copy = hsv.copy()
mask = cv.drawContours(blk, [contour], -1, (255), -1)
# cropped
cp, ms = crop(contour, copy, mask)
candy = apply(ms, cp)
# save(f'singles/{no}', candy)
# hues as with sat over 100
hue = [col[0] for row in candy for col in row if col[1] > 100]
# histogram of hues
hist, _ = np.histogram(hue, 18, (0,179))
s = sum(hist)
#finding the center
(x,y), radius = cv.minEnclosingCircle(contour)
center = (int(x), int(y))
# rectangle around contour (aspect ratio)
rect = cv.minAreaRect(contour)
box = cv.boxPoints(rect)
box = np.int0(box)
x1, y1 = box[0,0], box[0,1]
x2, y2 = box[1,0], box[1,1]
x3, y3 = box[2,0], box[2,1]
#Calculates the length of the hypothenus of a triangle
s1 = np.hypot(x2-x1, y2-y1)
s2 = np.hypot(x3-x2, y3-y2)
# aspect ratio
a = max((s1, s2)) / min((s1, s2))
# yellow hue (10-20 is way bigger than everything else)
if (hist[1] > (s - hist[1])*2.5) and sum(hist[5:14]) == 0:
pieces.append(['yellow', center])
continue
# red hue (basically only 0-10)
if hist[0] > sum(hist[1:])*25:
pieces.append(['red', center])
continue
# green hue (40-70 is strong)
if sum(hist[4:7]) > s - sum(hist[4:7]):
pieces.append(['green', center])
continue
# laber larve (aspect ratio over 2:1)
if a > 2:
pieces.append(['larve', center])
continue
return np.array(pieces)
def handle_input(colors):
print('Choose a candy to pick up')
print('Available values are: yellow, red, green, larve (or exit)')
s = input('> ').strip().lower()
if s == 'exit':
sys.exit(0)
if s == 'yellow':
x = [c[1] for c in colors if c[0] == 'yellow']
print(f'Found {len(x)} pieces of that choice')
return x
if s == 'red':
x = [c[1] for c in colors if c[0] == 'red']
print(f'Found {len(x)} pieces of that choice')
return x
if s == 'green':
x = [c[1] for c in colors if c[0] == 'green']
print(f'Found {len(x)} pieces of that choice')
return x
if s == 'larve':
x = [c[1] for c in colors if c[0] == 'larve']
print(f'Found {len(x)} pieces of that choice')
return x
print('Input not understood, try again')
handle_input(colors)
def communicator(pieces):
# port to write to
# may change from machine to machine depending on port
port = '/dev/ttyS0'
# Arduino
try:
ard = serial.Serial(port, 9600, timeout=5)
except:
print('Could not connect to the Arduino')
print('Prentending to write...')
for candy in pieces:
print(f'Sending the arm to coordinate: {candy}')
return
# iterate locations
for candy in pieces:
ard.flush()
# only write x coordinate, as they are on a row (2D)
ard.write(str(candy[0]))
time.sleep(10)
# response
msg = ard.read(ard.inWaiting())
print(msg)
def main():
f = sys.argv[1]
img = load(f)
print('Loaded image')
hsv, h, s, v = to_hsv(img)
# threshold values for all candy
lower, upper = (0, 0, 25), (180, 130, 215)
# mask creation
mask = inv(in_range(hsv, lower, upper))
#opening
k = kernel(16)
open = cv.morphologyEx(mask, cv.MORPH_OPEN, k)
# finding the contours
cnt, _ = cv.findContours(open, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
contours = [c for c in cnt if cv.contourArea(c) > 2500] #size sorting
# count
antal = len(contours)
print(f'Found {antal} pieces of candy')
print('Processing image')
colors = iterate_contours(contours, hsv)
# get user input
pieces = handle_input(colors)
# communicate with Arduino
communicator(pieces)
# main program
if __name__ == '__main__':
main()
| [
"time.sleep",
"numpy.array",
"sys.exit",
"numpy.histogram",
"cv2.contourArea",
"cv2.minAreaRect",
"numpy.hypot",
"cv2.drawContours",
"cv2.boxPoints",
"cv2.minEnclosingCircle",
"numpy.int0",
"cv2.morphologyEx",
"cv2.cvtColor",
"cv2.GaussianBlur",
"cv2.imread",
"cv2.imwrite",
"cv2.inRa... | [((151, 169), 'cv2.imread', 'cv.imread', (['file', '(1)'], {}), '(file, 1)\n', (160, 169), True, 'import cv2 as cv\n'), ((181, 214), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['img', '(11, 11)', '(5)'], {}), '(img, (11, 11), 5)\n', (196, 214), True, 'import cv2 as cv\n'), ((275, 308), 'numpy.zeros', 'np.zeros', (['im.shape[0:2]', 'np.uint8'], {}), '(im.shape[0:2], np.uint8)\n', (283, 308), True, 'import numpy as np\n'), ((827, 847), 'cv2.boundingRect', 'cv.boundingRect', (['cnt'], {}), '(cnt)\n', (842, 847), True, 'import cv2 as cv\n'), ((1277, 1311), 'cv2.cvtColor', 'cv.cvtColor', (['new', 'cv.COLOR_BGR2HSV'], {}), '(new, cv.COLOR_BGR2HSV)\n', (1288, 1311), True, 'import cv2 as cv\n'), ((1506, 1526), 'cv2.imwrite', 'cv.imwrite', (['dir', 'img'], {}), '(dir, img)\n', (1516, 1526), True, 'import cv2 as cv\n'), ((1629, 1658), 'cv2.inRange', 'cv.inRange', (['img', 'lower', 'upper'], {}), '(img, lower, upper)\n', (1639, 1658), True, 'import cv2 as cv\n'), ((1754, 1804), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_ELLIPSE', '(k, k)'], {}), '(cv.MORPH_ELLIPSE, (k, k))\n', (1778, 1804), True, 'import cv2 as cv\n'), ((1838, 1873), 'cv2.bitwise_and', 'cv.bitwise_and', (['img', 'img'], {'mask': 'mask'}), '(img, img, mask=mask)\n', (1852, 1873), True, 'import cv2 as cv\n'), ((1902, 1922), 'cv2.bitwise_not', 'cv.bitwise_not', (['mask'], {}), '(mask)\n', (1916, 1922), True, 'import cv2 as cv\n'), ((3897, 3913), 'numpy.array', 'np.array', (['pieces'], {}), '(pieces)\n', (3905, 3913), True, 'import numpy as np\n'), ((5776, 5815), 'cv2.morphologyEx', 'cv.morphologyEx', (['mask', 'cv.MORPH_OPEN', 'k'], {}), '(mask, cv.MORPH_OPEN, k)\n', (5791, 5815), True, 'import cv2 as cv\n'), ((5857, 5920), 'cv2.findContours', 'cv.findContours', (['open', 'cv.RETR_EXTERNAL', 'cv.CHAIN_APPROX_SIMPLE'], {}), '(open, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n', (5872, 5920), True, 'import cv2 as cv\n'), ((2293, 2337), 'cv2.drawContours', 'cv.drawContours', (['blk', '[contour]', '(-1)', '(255)', '(-1)'], {}), '(blk, [contour], -1, 255, -1)\n', (2308, 2337), True, 'import cv2 as cv\n'), ((2626, 2657), 'numpy.histogram', 'np.histogram', (['hue', '(18)', '(0, 179)'], {}), '(hue, 18, (0, 179))\n', (2638, 2657), True, 'import numpy as np\n'), ((2732, 2762), 'cv2.minEnclosingCircle', 'cv.minEnclosingCircle', (['contour'], {}), '(contour)\n', (2753, 2762), True, 'import cv2 as cv\n'), ((2863, 2886), 'cv2.minAreaRect', 'cv.minAreaRect', (['contour'], {}), '(contour)\n', (2877, 2886), True, 'import cv2 as cv\n'), ((2901, 2919), 'cv2.boxPoints', 'cv.boxPoints', (['rect'], {}), '(rect)\n', (2913, 2919), True, 'import cv2 as cv\n'), ((2934, 2946), 'numpy.int0', 'np.int0', (['box'], {}), '(box)\n', (2941, 2946), True, 'import numpy as np\n'), ((3133, 3159), 'numpy.hypot', 'np.hypot', (['(x2 - x1)', '(y2 - y1)'], {}), '(x2 - x1, y2 - y1)\n', (3141, 3159), True, 'import numpy as np\n'), ((3169, 3195), 'numpy.hypot', 'np.hypot', (['(x3 - x2)', '(y3 - y2)'], {}), '(x3 - x2, y3 - y2)\n', (3177, 3195), True, 'import numpy as np\n'), ((4117, 4128), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4125, 4128), False, 'import sys\n'), ((4962, 4998), 'serial.Serial', 'serial.Serial', (['port', '(9600)'], {'timeout': '(5)'}), '(port, 9600, timeout=5)\n', (4975, 4998), False, 'import serial\n'), ((5379, 5393), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (5389, 5393), False, 'import time\n'), ((5955, 5972), 'cv2.contourArea', 'cv.contourArea', (['c'], {}), '(c)\n', (5969, 5972), True, 'import cv2 as cv\n')] |
"""
<NAME>
camera.py
Construct a camera matrix and apply it to project points onto an image plane.
___
/ _ \
| / \ |
| \_/ |
\___/ ___
_|_|_/[_]\__==_
[---------------]
| O /---\ |
| | | |
| \___/ |
[---------------]
[___]
| |\\
| | \\
[ ] \\_
/|_|\ ( \
//| |\\ \ \
// | | \\ \ \
// |_| \\ \_\
// | | \\
//\ | | /\\
// \ | | / \\
// \ | | / \\
// \|_|/ \\
// [_] \\
// H \\
// H \\
// H \\
// H \\
// H \\
// \\
// \\
Lights...camera...Comp Vis!
"""
import sys
import numpy as np
from numpy import sin, cos
def getIntrinsic(f, d, ic, jc):
"""
Get intrinsic camera matrix, K, from the camera's focal length (f), pixel
dimensions (d), and optical axis center (ic, jc)
Convert pixel dimensions to millimeters by dividing by 1,000
Get the adjusted focal length s_f by dividing f by d
Construct and return K
"""
d /= 1000
s = f / d
K = np.asmatrix([[s, 0, ic],
[0, s, jc],
[0, 0, 1]])
return K
def getExtrinsic(rotVec, transVec):
"""
Get extrinsic camera matrix, R_t, from the rotation and translation vectors
of the camera
Convert rotational vector to radians
Construct the x, y, and z components of the camera's rotation matrix
Multiply the x, y, and z componets to get the camera's rotation matrix, R
Concatenate the transposed rotation matrix and translation matrix
(transposed translation vector) multiplied by the transposed rotation
matrix and -1
Compute the center of the camera and it's axis direction
Return R_t, camera center, and axis direction
"""
rx, ry, rz = (np.pi * rotVec) / 180
Rx = np.asmatrix([[1, 0, 0 ],
[0, cos(rx), -1*sin(rx)],
[0, sin(rx), cos(rx) ]])
Ry = np.asmatrix([[cos(ry), 0, sin(ry)],
[0, 1, 0 ],
[-1*sin(ry), 0, cos(ry)]])
Rz = np.asmatrix([[cos(rz), -1*sin(rz), 0],
[sin(rz), cos(rz), 0],
[0, 0, 1]])
R = np.matmul(Rx, Ry)
R = np.matmul(R, Rz)
t = transVec.transpose()
Rt = np.hstack((R.transpose(), -1*R.transpose()*t))
E = np.asmatrix([0,0,1]).transpose()
cameraAxis = np.matmul(R.transpose(),E)
cameraCenter = -1*R.transpose()*t
return Rt, cameraCenter, cameraAxis
def output(P, points, cameraCenter, cameraAxis):
"""
Output stats of the camera matrix, P, and projections of points using P
Print P
Iterate throught the points, get their 2D projections, determine if they
are inside the boundaries of the image, and output results
Determine if the point is behind or in front of the camera and count the
number of visible and hidden projections, output results
"""
print("Matrix M:")
for line in P:
w,x,y,z = line[0,0], line[0,1], line[0,2], line[0,3]
print("{:.2f}, {:.2f}, {:.2f}, {:.2f}".format(w,x,y,z))
print("Projections:")
axisInd, axisDirection = axisInfo(cameraAxis, cameraCenter)
num = 0
visible = ""
hidden = ""
for coord in points:
x3D, y3D, z3D = coord
coord2D = get2D(P, coord)
y2D, x2D = coord2D
inOut = inOrOut(coord2D)
print("{}: {} {} {} => {:.1f} {:.1f} {}"
.format(num, x3D, y3D, z3D, x2D, y2D, inOut))
if axisDirection*coord[axisInd] >= cameraCenter[axisInd]:
hidden += " " + str(num)
else:
visible += " " + str(num)
num += 1
print("visible:{}".format(visible))
print("hidden:{}".format(hidden))
def axisInfo(camAxis, camCenter):
"""
Get the axis info to determine if a point is in front of or behind camera
using its center and axis of direction
Get largest direction component from direction vector and return its index
e.g. [-22, 14, 3] return 0
If Z is positive, return 1 (since the camera is facing that Z direction)
Else, return -1
"""
min = np.argmin(camAxis)
max = np.argmax(camAxis)
if (np.abs(np.min(camAxis)) > np.max(camAxis)):
axisInd = min
else:
axisInd = max
if camCenter[2] > 0:
axisDir = 1
else:
axisDir = -1
return axisInd, axisDir
def get2D(P, coord):
"""
Get 2D projection of a point by multiplying it by a camera matrix, P
Format coord so it's a matrix
Multiply coord and P
Divide new coord by its z-component
Return x and y components of 2D coord
"""
coord = np.asmatrix(coord).transpose()
coord = np.vstack((coord,1))
multCoord = np.matmul(P, coord)
coord2D = multCoord[0:2] / multCoord[2]
return coord2D[0,0], coord2D[1,0]
def inOrOut(coord):
"""
Take x,y coord and return "inside" if it's within the 6000x4000 image
Else, return "outside"
"""
x,y = coord
if 0 <= x <= 6000 and 0 <= y <= 4000:
return "inside"
else:
return "outside"
if __name__ == "__main__":
"""
Handle command line arguments
"""
if len(sys.argv) != 3:
print("Correct usage: p1_camera.py paramsFile pointsFile")
sys.exit()
else:
paramsFile = sys.argv[1]
pointsFile = sys.argv[2]
"""
Open and read paramsFile
"""
try:
openParams = open(paramsFile, "r")
except FileNotFoundError:
print("No file {} found".format(paramsFile))
sys.exit()
try:
params = list(map(float, openParams.read().split()))
except ValueError:
print("Params must be a floats or ints!")
sys.exit()
"""
Convert file info to appropriate data types
"""
rotationVec = np.asarray(params[0:3])
translationVec = np.asmatrix(params[3:6])
f, d, ic, jc = params[6:10]
"""
Open and read pointsFile
"""
try:
openPoints = open(pointsFile, "r")
except FileNotFoundError:
print("No file {} found".format(pointsFile))
sys.exit()
try:
points = np.loadtxt(openPoints, dtype=np.float64)
except ValueError:
print("Malformed points file: {}, must be numbers".format(pointsFile))
sys.exit()
"""
Calculate P
"""
K = getIntrinsic(f, d, ic, jc)
Rt, camCen, camAx = getExtrinsic(rotationVec, translationVec)
P = K * Rt
"""
Output stats
"""
output(P, points, camCen, camAx)
| [
"numpy.asmatrix",
"numpy.sin",
"numpy.asarray",
"numpy.argmax",
"numpy.min",
"numpy.max",
"numpy.matmul",
"numpy.vstack",
"numpy.cos",
"sys.exit",
"numpy.argmin",
"numpy.loadtxt"
] | [((2010, 2058), 'numpy.asmatrix', 'np.asmatrix', (['[[s, 0, ic], [0, s, jc], [0, 0, 1]]'], {}), '([[s, 0, ic], [0, s, jc], [0, 0, 1]])\n', (2021, 2058), True, 'import numpy as np\n'), ((3232, 3249), 'numpy.matmul', 'np.matmul', (['Rx', 'Ry'], {}), '(Rx, Ry)\n', (3241, 3249), True, 'import numpy as np\n'), ((3258, 3274), 'numpy.matmul', 'np.matmul', (['R', 'Rz'], {}), '(R, Rz)\n', (3267, 3274), True, 'import numpy as np\n'), ((5178, 5196), 'numpy.argmin', 'np.argmin', (['camAxis'], {}), '(camAxis)\n', (5187, 5196), True, 'import numpy as np\n'), ((5207, 5225), 'numpy.argmax', 'np.argmax', (['camAxis'], {}), '(camAxis)\n', (5216, 5225), True, 'import numpy as np\n'), ((5743, 5764), 'numpy.vstack', 'np.vstack', (['(coord, 1)'], {}), '((coord, 1))\n', (5752, 5764), True, 'import numpy as np\n'), ((5780, 5799), 'numpy.matmul', 'np.matmul', (['P', 'coord'], {}), '(P, coord)\n', (5789, 5799), True, 'import numpy as np\n'), ((6854, 6877), 'numpy.asarray', 'np.asarray', (['params[0:3]'], {}), '(params[0:3])\n', (6864, 6877), True, 'import numpy as np\n'), ((6899, 6923), 'numpy.asmatrix', 'np.asmatrix', (['params[3:6]'], {}), '(params[3:6])\n', (6910, 6923), True, 'import numpy as np\n'), ((5260, 5275), 'numpy.max', 'np.max', (['camAxis'], {}), '(camAxis)\n', (5266, 5275), True, 'import numpy as np\n'), ((6321, 6331), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6329, 6331), False, 'import sys\n'), ((7183, 7223), 'numpy.loadtxt', 'np.loadtxt', (['openPoints'], {'dtype': 'np.float64'}), '(openPoints, dtype=np.float64)\n', (7193, 7223), True, 'import numpy as np\n'), ((3369, 3391), 'numpy.asmatrix', 'np.asmatrix', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (3380, 3391), True, 'import numpy as np\n'), ((5241, 5256), 'numpy.min', 'np.min', (['camAxis'], {}), '(camAxis)\n', (5247, 5256), True, 'import numpy as np\n'), ((5700, 5718), 'numpy.asmatrix', 'np.asmatrix', (['coord'], {}), '(coord)\n', (5711, 5718), True, 'import numpy as np\n'), ((6597, 6607), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6605, 6607), False, 'import sys\n'), ((6760, 6770), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6768, 6770), False, 'import sys\n'), ((7145, 7155), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7153, 7155), False, 'import sys\n'), ((7334, 7344), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7342, 7344), False, 'import sys\n'), ((2860, 2867), 'numpy.cos', 'cos', (['rx'], {}), '(rx)\n', (2863, 2867), False, 'from numpy import sin, cos\n'), ((2908, 2915), 'numpy.sin', 'sin', (['rx'], {}), '(rx)\n', (2911, 2915), False, 'from numpy import sin, cos\n'), ((2917, 2924), 'numpy.cos', 'cos', (['rx'], {}), '(rx)\n', (2920, 2924), False, 'from numpy import sin, cos\n'), ((2955, 2962), 'numpy.cos', 'cos', (['ry'], {}), '(ry)\n', (2958, 2962), False, 'from numpy import sin, cos\n'), ((2970, 2977), 'numpy.sin', 'sin', (['ry'], {}), '(ry)\n', (2973, 2977), False, 'from numpy import sin, cos\n'), ((3066, 3073), 'numpy.cos', 'cos', (['ry'], {}), '(ry)\n', (3069, 3073), False, 'from numpy import sin, cos\n'), ((3101, 3108), 'numpy.cos', 'cos', (['rz'], {}), '(rz)\n', (3104, 3108), False, 'from numpy import sin, cos\n'), ((3149, 3156), 'numpy.sin', 'sin', (['rz'], {}), '(rz)\n', (3152, 3156), False, 'from numpy import sin, cos\n'), ((3158, 3165), 'numpy.cos', 'cos', (['rz'], {}), '(rz)\n', (3161, 3165), False, 'from numpy import sin, cos\n'), ((2872, 2879), 'numpy.sin', 'sin', (['rx'], {}), '(rx)\n', (2875, 2879), False, 'from numpy import sin, cos\n'), ((3054, 3061), 'numpy.sin', 'sin', (['ry'], {}), '(ry)\n', (3057, 3061), False, 'from numpy import sin, cos\n'), ((3113, 3120), 'numpy.sin', 'sin', (['rz'], {}), '(rz)\n', (3116, 3120), False, 'from numpy import sin, cos\n')] |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Interfaces under evaluation before upstreaming to nipype.interfaces.utility."""
import numpy as np
import re
import json
from collections import OrderedDict
from nipype.utils.filemanip import fname_presuffix
from nipype.interfaces.io import add_traits
from nipype.interfaces.base import (
BaseInterface,
BaseInterfaceInputSpec,
DynamicTraitedSpec,
File,
InputMultiObject,
isdefined,
SimpleInterface,
Str,
TraitedSpec,
traits,
)
class _KeySelectInputSpec(DynamicTraitedSpec):
key = Str(mandatory=True, desc="selective key")
keys = InputMultiObject(Str, mandatory=True, min=1, desc="index of keys")
class _KeySelectOutputSpec(DynamicTraitedSpec):
key = Str(desc="propagates selected key")
class KeySelect(BaseInterface):
"""
An interface that operates similarly to an OrderedDict.
>>> ks = KeySelect(keys=['MNI152NLin6Asym', 'MNI152Lin', 'fsaverage'],
... fields=['field1', 'field2', 'field3'])
>>> ks.inputs.field1 = ['fsl', 'mni', 'freesurfer']
>>> ks.inputs.field2 = ['volume', 'volume', 'surface']
>>> ks.inputs.field3 = [True, False, False]
>>> ks.inputs.key = 'MNI152Lin'
>>> ks.run().outputs
<BLANKLINE>
field1 = mni
field2 = volume
field3 = False
key = MNI152Lin
<BLANKLINE>
>>> ks = KeySelect(fields=['field1', 'field2', 'field3'])
>>> ks.inputs.keys=['MNI152NLin6Asym', 'MNI152Lin', 'fsaverage']
>>> ks.inputs.field1 = ['fsl', 'mni', 'freesurfer']
>>> ks.inputs.field2 = ['volume', 'volume', 'surface']
>>> ks.inputs.field3 = [True, False, False]
>>> ks.inputs.key = 'MNI152Lin'
>>> ks.run().outputs
<BLANKLINE>
field1 = mni
field2 = volume
field3 = False
key = MNI152Lin
<BLANKLINE>
>>> ks.inputs.field1 = ['fsl', 'mni', 'freesurfer']
>>> ks.inputs.field2 = ['volume', 'volume', 'surface']
>>> ks.inputs.field3 = [True, False, False]
>>> ks.inputs.key = 'fsaverage'
>>> ks.run().outputs
<BLANKLINE>
field1 = freesurfer
field2 = surface
field3 = False
key = fsaverage
<BLANKLINE>
>>> ks.inputs.field1 = ['fsl', 'mni', 'freesurfer']
>>> ks.inputs.field2 = ['volume', 'volume', 'surface']
>>> ks.inputs.field3 = [True, False] # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ValueError: Trying to set an invalid value
>>> ks.inputs.key = 'MNINLin2009cAsym'
Traceback (most recent call last):
ValueError: Selected key "MNINLin2009cAsym" not found in the index
>>> ks = KeySelect(fields=['field1', 'field2', 'field3'])
>>> ks.inputs.keys=['MNI152NLin6Asym']
>>> ks.inputs.field1 = ['fsl']
>>> ks.inputs.field2 = ['volume']
>>> ks.inputs.field3 = [True]
>>> ks.inputs.key = 'MNI152NLin6Asym'
>>> ks.run().outputs
<BLANKLINE>
field1 = fsl
field2 = volume
field3 = True
key = MNI152NLin6Asym
<BLANKLINE>
"""
input_spec = _KeySelectInputSpec
output_spec = _KeySelectOutputSpec
def __init__(self, keys=None, fields=None, **inputs):
"""
Instantiate a KeySelect utility interface.
Examples
--------
>>> ks = KeySelect(fields='field1')
>>> ks.inputs
<BLANKLINE>
field1 = <undefined>
key = <undefined>
keys = <undefined>
<BLANKLINE>
>>> ks = KeySelect(fields='field1', field1=['a', 'b'])
>>> ks.inputs
<BLANKLINE>
field1 = ['a', 'b']
key = <undefined>
keys = <undefined>
<BLANKLINE>
>>> ks = KeySelect()
Traceback (most recent call last):
ValueError: A list or multiplexed...
>>> ks = KeySelect(fields='key')
Traceback (most recent call last):
ValueError: Some fields are invalid...
"""
# Call constructor
super(KeySelect, self).__init__(**inputs)
# Handle and initiate fields
if not fields:
raise ValueError(
"A list or multiplexed fields must be provided at "
"instantiation time."
)
if isinstance(fields, str):
fields = [fields]
_invalid = set(self.input_spec.class_editable_traits()).intersection(fields)
if _invalid:
raise ValueError("Some fields are invalid (%s)." % ", ".join(_invalid))
self._fields = fields
# Attach events
self.inputs.on_trait_change(self._check_len)
if keys:
self.inputs.keys = keys
# Add fields in self._fields
add_traits(self.inputs, self._fields)
for in_field in set(self._fields).intersection(inputs.keys()):
setattr(self.inputs, in_field, inputs[in_field])
def _check_len(self, name, new):
if name == "keys":
nitems = len(new)
if len(set(new)) != nitems:
raise ValueError(
"Found duplicated entries in the index of ordered keys"
)
if not isdefined(self.inputs.keys):
return
if name == "key" and new not in self.inputs.keys:
raise ValueError('Selected key "%s" not found in the index' % new)
if name in self._fields:
if isinstance(new, str) or len(new) < 1:
raise ValueError(
'Trying to set an invalid value (%s) for input "%s"' % (new, name)
)
if len(new) != len(self.inputs.keys):
raise ValueError(
'Length of value (%s) for input field "%s" does not match '
"the length of the indexing list." % (new, name)
)
def _run_interface(self, runtime):
return runtime
def _list_outputs(self):
index = self.inputs.keys.index(self.inputs.key)
outputs = {k: getattr(self.inputs, k)[index] for k in self._fields}
outputs["key"] = self.inputs.key
return outputs
def _outputs(self):
base = super(KeySelect, self)._outputs()
base = add_traits(base, self._fields)
return base
class _AddTSVHeaderInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc="input file")
columns = traits.List(traits.Str, mandatory=True, desc="header for columns")
class _AddTSVHeaderOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="output average file")
class AddTSVHeader(SimpleInterface):
r"""Add a header row to a TSV file
Examples
--------
An example TSV:
>>> np.savetxt('data.tsv', np.arange(30).reshape((6, 5)), delimiter='\t')
Add headers:
>>> addheader = AddTSVHeader()
>>> addheader.inputs.in_file = 'data.tsv'
>>> addheader.inputs.columns = ['a', 'b', 'c', 'd', 'e']
>>> res = addheader.run()
>>> df = pd.read_csv(res.outputs.out_file, delim_whitespace=True,
... index_col=None)
>>> df.columns.ravel().tolist()
['a', 'b', 'c', 'd', 'e']
>>> np.all(df.values == np.arange(30).reshape((6, 5)))
True
"""
input_spec = _AddTSVHeaderInputSpec
output_spec = _AddTSVHeaderOutputSpec
def _run_interface(self, runtime):
out_file = fname_presuffix(
self.inputs.in_file,
suffix="_motion.tsv",
newpath=runtime.cwd,
use_ext=False,
)
data = np.loadtxt(self.inputs.in_file)
np.savetxt(
out_file,
data,
delimiter="\t",
header="\t".join(self.inputs.columns),
comments="",
)
self._results["out_file"] = out_file
return runtime
class _JoinTSVColumnsInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc="input file")
join_file = File(exists=True, mandatory=True, desc="file to be adjoined")
side = traits.Enum("right", "left", usedefault=True, desc="where to join")
columns = traits.List(traits.Str, desc="header for columns")
class _JoinTSVColumnsOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="output TSV file")
class JoinTSVColumns(SimpleInterface):
r"""Add a header row to a TSV file
Examples
--------
An example TSV:
>>> data = np.arange(30).reshape((6, 5))
>>> np.savetxt('data.tsv', data[:, :3], delimiter='\t')
>>> np.savetxt('add.tsv', data[:, 3:], delimiter='\t')
Join without naming headers:
>>> join = JoinTSVColumns()
>>> join.inputs.in_file = 'data.tsv'
>>> join.inputs.join_file = 'add.tsv'
>>> res = join.run()
>>> df = pd.read_csv(res.outputs.out_file, delim_whitespace=True,
... index_col=None, dtype=float, header=None)
>>> df.columns.ravel().tolist() == list(range(5))
True
>>> np.all(df.values.astype(int) == data)
True
Adding column names:
>>> join = JoinTSVColumns()
>>> join.inputs.in_file = 'data.tsv'
>>> join.inputs.join_file = 'add.tsv'
>>> join.inputs.columns = ['a', 'b', 'c', 'd', 'e']
>>> res = join.run()
>>> res.outputs.out_file # doctest: +ELLIPSIS
'...data_joined.tsv'
>>> df = pd.read_csv(res.outputs.out_file, delim_whitespace=True,
... index_col=None)
>>> df.columns.ravel().tolist()
['a', 'b', 'c', 'd', 'e']
>>> np.all(df.values == np.arange(30).reshape((6, 5)))
True
>>> join = JoinTSVColumns()
>>> join.inputs.in_file = 'data.tsv'
>>> join.inputs.join_file = 'add.tsv'
>>> join.inputs.side = 'left'
>>> join.inputs.columns = ['a', 'b', 'c', 'd', 'e']
>>> res = join.run()
>>> df = pd.read_csv(res.outputs.out_file, delim_whitespace=True,
... index_col=None)
>>> df.columns.ravel().tolist()
['a', 'b', 'c', 'd', 'e']
>>> np.all(df.values == np.hstack((data[:, 3:], data[:, :3])))
True
"""
input_spec = _JoinTSVColumnsInputSpec
output_spec = _JoinTSVColumnsOutputSpec
def _run_interface(self, runtime):
out_file = fname_presuffix(
self.inputs.in_file,
suffix="_joined.tsv",
newpath=runtime.cwd,
use_ext=False,
)
header = ""
if isdefined(self.inputs.columns) and self.inputs.columns:
header = "\t".join(self.inputs.columns)
with open(self.inputs.in_file) as ifh:
data = ifh.read().splitlines(keepends=False)
with open(self.inputs.join_file) as ifh:
join = ifh.read().splitlines(keepends=False)
if len(data) != len(join):
raise ValueError("Number of columns in datasets do not match")
merged = []
for d, j in zip(data, join):
line = "%s\t%s" % ((j, d) if self.inputs.side == "left" else (d, j))
merged.append(line)
if header:
merged.insert(0, header)
with open(out_file, "w") as ofh:
ofh.write("\n".join(merged))
self._results["out_file"] = out_file
return runtime
class _DictMergeInputSpec(BaseInterfaceInputSpec):
in_dicts = traits.List(
traits.Either(traits.Dict, traits.Instance(OrderedDict)),
desc="Dictionaries to be merged. In the event of a collision, values "
"from dictionaries later in the list receive precedence.",
)
class _DictMergeOutputSpec(TraitedSpec):
out_dict = traits.Dict(desc="Merged dictionary")
class DictMerge(SimpleInterface):
"""Merge (ordered) dictionaries."""
input_spec = _DictMergeInputSpec
output_spec = _DictMergeOutputSpec
def _run_interface(self, runtime):
out_dict = {}
for in_dict in self.inputs.in_dicts:
out_dict.update(in_dict)
self._results["out_dict"] = out_dict
return runtime
class _TSV2JSONInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc="Input TSV file")
index_column = traits.Str(
mandatory=True,
desc="Name of the column in the TSV to be used "
"as the top-level key in the JSON. All "
"remaining columns will be assigned as "
"nested keys.",
)
output = traits.Either(
None,
File,
desc="Path where the output file is to be saved. "
"If this is `None`, then a JSON-compatible "
"dictionary is returned instead.",
)
additional_metadata = traits.Either(
None,
traits.Dict,
traits.Instance(OrderedDict),
usedefault=True,
desc="Any additional metadata that "
"should be applied to all "
"entries in the JSON.",
)
drop_columns = traits.Either(
None,
traits.List(),
usedefault=True,
desc="List of columns in the TSV to be " "dropped from the JSON.",
)
enforce_case = traits.Bool(
True,
usedefault=True,
desc="Enforce snake case for top-level keys " "and camel case for nested keys",
)
class _TSV2JSONOutputSpec(TraitedSpec):
output = traits.Either(
traits.Dict,
File(exists=True),
traits.Instance(OrderedDict),
desc="Output dictionary or JSON file",
)
class TSV2JSON(SimpleInterface):
"""Convert metadata from TSV format to JSON format."""
input_spec = _TSV2JSONInputSpec
output_spec = _TSV2JSONOutputSpec
def _run_interface(self, runtime):
if not isdefined(self.inputs.output):
output = fname_presuffix(
self.inputs.in_file, suffix=".json", newpath=runtime.cwd, use_ext=False
)
else:
output = self.inputs.output
self._results["output"] = _tsv2json(
in_tsv=self.inputs.in_file,
out_json=output,
index_column=self.inputs.index_column,
additional_metadata=self.inputs.additional_metadata,
drop_columns=self.inputs.drop_columns,
enforce_case=self.inputs.enforce_case,
)
return runtime
def _tsv2json(
in_tsv,
out_json,
index_column,
additional_metadata=None,
drop_columns=None,
enforce_case=True,
):
"""
Convert metadata from TSV format to JSON format.
Parameters
----------
in_tsv: str
Path to the metadata in TSV format.
out_json: str
Path where the metadata should be saved in JSON format after
conversion. If this is None, then a dictionary is returned instead.
index_column: str
Name of the column in the TSV to be used as an index (top-level key in
the JSON).
additional_metadata: dict
Any additional metadata that should be applied to all entries in the
JSON.
drop_columns: list
List of columns from the input TSV to be dropped from the JSON.
enforce_case: bool
Indicates whether BIDS case conventions should be followed. Currently,
this means that index fields (column names in the associated data TSV)
use snake case and other fields use camel case.
Returns
-------
str
Path to the metadata saved in JSON format.
"""
import pandas as pd
# Adapted from https://dev.to/rrampage/snake-case-to-camel-case-and- ...
# back-using-regular-expressions-and-python-m9j
re_to_camel = r"(.*?)_([a-zA-Z0-9])"
re_to_snake = r"(^.+?|.*?)((?<![_A-Z])[A-Z]|(?<![_0-9])[0-9]+)"
def snake(match):
return "{}_{}".format(match.group(1).lower(), match.group(2).lower())
def camel(match):
return "{}{}".format(match.group(1), match.group(2).upper())
# from fmriprep
def less_breakable(a_string):
"""hardens the string to different envs (i.e. case insensitive, no
whitespace, '#'"""
return "".join(a_string.split()).strip("#")
drop_columns = drop_columns or []
additional_metadata = additional_metadata or {}
tsv_data = pd.read_csv(in_tsv, "\t")
for k, v in additional_metadata.items():
tsv_data[k] = [v] * len(tsv_data.index)
for col in drop_columns:
tsv_data.drop(labels=col, axis="columns", inplace=True)
tsv_data.set_index(index_column, drop=True, inplace=True)
if enforce_case:
tsv_data.index = [
re.sub(re_to_snake, snake, less_breakable(i), 0).lower()
for i in tsv_data.index
]
tsv_data.columns = [
re.sub(re_to_camel, camel, less_breakable(i).title(), 0).replace(
"Csf", "CSF"
)
for i in tsv_data.columns
]
json_data = tsv_data.to_json(orient="index")
json_data = json.JSONDecoder(object_pairs_hook=OrderedDict).decode(json_data)
for i in json_data:
json_data[i].update(additional_metadata)
if out_json is None:
return json_data
with open(out_json, "w") as f:
json.dump(json_data, f, indent=4)
return out_json
| [
"nipype.interfaces.base.InputMultiObject",
"nipype.interfaces.base.isdefined",
"nipype.utils.filemanip.fname_presuffix",
"nipype.interfaces.base.traits.Instance",
"nipype.interfaces.base.traits.Str",
"pandas.read_csv",
"nipype.interfaces.io.add_traits",
"nipype.interfaces.base.traits.List",
"nipype.... | [((645, 686), 'nipype.interfaces.base.Str', 'Str', ([], {'mandatory': '(True)', 'desc': '"""selective key"""'}), "(mandatory=True, desc='selective key')\n", (648, 686), False, 'from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec, DynamicTraitedSpec, File, InputMultiObject, isdefined, SimpleInterface, Str, TraitedSpec, traits\n'), ((698, 764), 'nipype.interfaces.base.InputMultiObject', 'InputMultiObject', (['Str'], {'mandatory': '(True)', 'min': '(1)', 'desc': '"""index of keys"""'}), "(Str, mandatory=True, min=1, desc='index of keys')\n", (714, 764), False, 'from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec, DynamicTraitedSpec, File, InputMultiObject, isdefined, SimpleInterface, Str, TraitedSpec, traits\n'), ((825, 860), 'nipype.interfaces.base.Str', 'Str', ([], {'desc': '"""propagates selected key"""'}), "(desc='propagates selected key')\n", (828, 860), False, 'from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec, DynamicTraitedSpec, File, InputMultiObject, isdefined, SimpleInterface, Str, TraitedSpec, traits\n'), ((6334, 6386), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'mandatory': '(True)', 'desc': '"""input file"""'}), "(exists=True, mandatory=True, desc='input file')\n", (6338, 6386), False, 'from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec, DynamicTraitedSpec, File, InputMultiObject, isdefined, SimpleInterface, Str, TraitedSpec, traits\n'), ((6401, 6467), 'nipype.interfaces.base.traits.List', 'traits.List', (['traits.Str'], {'mandatory': '(True)', 'desc': '"""header for columns"""'}), "(traits.Str, mandatory=True, desc='header for columns')\n", (6412, 6467), False, 'from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec, DynamicTraitedSpec, File, InputMultiObject, isdefined, SimpleInterface, Str, TraitedSpec, traits\n'), ((6529, 6574), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'desc': '"""output average file"""'}), "(exists=True, desc='output average file')\n", (6533, 6574), False, 'from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec, DynamicTraitedSpec, File, InputMultiObject, isdefined, SimpleInterface, Str, TraitedSpec, traits\n'), ((7882, 7934), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'mandatory': '(True)', 'desc': '"""input file"""'}), "(exists=True, mandatory=True, desc='input file')\n", (7886, 7934), False, 'from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec, DynamicTraitedSpec, File, InputMultiObject, isdefined, SimpleInterface, Str, TraitedSpec, traits\n'), ((7951, 8012), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'mandatory': '(True)', 'desc': '"""file to be adjoined"""'}), "(exists=True, mandatory=True, desc='file to be adjoined')\n", (7955, 8012), False, 'from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec, DynamicTraitedSpec, File, InputMultiObject, isdefined, SimpleInterface, Str, TraitedSpec, traits\n'), ((8024, 8091), 'nipype.interfaces.base.traits.Enum', 'traits.Enum', (['"""right"""', '"""left"""'], {'usedefault': '(True)', 'desc': '"""where to join"""'}), "('right', 'left', usedefault=True, desc='where to join')\n", (8035, 8091), False, 'from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec, DynamicTraitedSpec, File, InputMultiObject, isdefined, SimpleInterface, Str, TraitedSpec, traits\n'), ((8106, 8156), 'nipype.interfaces.base.traits.List', 'traits.List', (['traits.Str'], {'desc': '"""header for columns"""'}), "(traits.Str, desc='header for columns')\n", (8117, 8156), False, 'from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec, DynamicTraitedSpec, File, InputMultiObject, isdefined, SimpleInterface, Str, TraitedSpec, traits\n'), ((8220, 8261), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'desc': '"""output TSV file"""'}), "(exists=True, desc='output TSV file')\n", (8224, 8261), False, 'from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec, DynamicTraitedSpec, File, InputMultiObject, isdefined, SimpleInterface, Str, TraitedSpec, traits\n'), ((11523, 11560), 'nipype.interfaces.base.traits.Dict', 'traits.Dict', ([], {'desc': '"""Merged dictionary"""'}), "(desc='Merged dictionary')\n", (11534, 11560), False, 'from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec, DynamicTraitedSpec, File, InputMultiObject, isdefined, SimpleInterface, Str, TraitedSpec, traits\n'), ((11992, 12048), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'mandatory': '(True)', 'desc': '"""Input TSV file"""'}), "(exists=True, mandatory=True, desc='Input TSV file')\n", (11996, 12048), False, 'from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec, DynamicTraitedSpec, File, InputMultiObject, isdefined, SimpleInterface, Str, TraitedSpec, traits\n'), ((12068, 12242), 'nipype.interfaces.base.traits.Str', 'traits.Str', ([], {'mandatory': '(True)', 'desc': '"""Name of the column in the TSV to be used as the top-level key in the JSON. All remaining columns will be assigned as nested keys."""'}), "(mandatory=True, desc=\n 'Name of the column in the TSV to be used as the top-level key in the JSON. All remaining columns will be assigned as nested keys.'\n )\n", (12078, 12242), False, 'from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec, DynamicTraitedSpec, File, InputMultiObject, isdefined, SimpleInterface, Str, TraitedSpec, traits\n'), ((12302, 12462), 'nipype.interfaces.base.traits.Either', 'traits.Either', (['None', 'File'], {'desc': '"""Path where the output file is to be saved. If this is `None`, then a JSON-compatible dictionary is returned instead."""'}), "(None, File, desc=\n 'Path where the output file is to be saved. If this is `None`, then a JSON-compatible dictionary is returned instead.'\n )\n", (12315, 12462), False, 'from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec, DynamicTraitedSpec, File, InputMultiObject, isdefined, SimpleInterface, Str, TraitedSpec, traits\n'), ((12960, 13076), 'nipype.interfaces.base.traits.Bool', 'traits.Bool', (['(True)'], {'usedefault': '(True)', 'desc': '"""Enforce snake case for top-level keys and camel case for nested keys"""'}), "(True, usedefault=True, desc=\n 'Enforce snake case for top-level keys and camel case for nested keys')\n", (12971, 13076), False, 'from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec, DynamicTraitedSpec, File, InputMultiObject, isdefined, SimpleInterface, Str, TraitedSpec, traits\n'), ((16021, 16046), 'pandas.read_csv', 'pd.read_csv', (['in_tsv', '"""\t"""'], {}), "(in_tsv, '\\t')\n", (16032, 16046), True, 'import pandas as pd\n'), ((4719, 4756), 'nipype.interfaces.io.add_traits', 'add_traits', (['self.inputs', 'self._fields'], {}), '(self.inputs, self._fields)\n', (4729, 4756), False, 'from nipype.interfaces.io import add_traits\n'), ((6213, 6243), 'nipype.interfaces.io.add_traits', 'add_traits', (['base', 'self._fields'], {}), '(base, self._fields)\n', (6223, 6243), False, 'from nipype.interfaces.io import add_traits\n'), ((7366, 7465), 'nipype.utils.filemanip.fname_presuffix', 'fname_presuffix', (['self.inputs.in_file'], {'suffix': '"""_motion.tsv"""', 'newpath': 'runtime.cwd', 'use_ext': '(False)'}), "(self.inputs.in_file, suffix='_motion.tsv', newpath=runtime.\n cwd, use_ext=False)\n", (7381, 7465), False, 'from nipype.utils.filemanip import fname_presuffix\n'), ((7535, 7566), 'numpy.loadtxt', 'np.loadtxt', (['self.inputs.in_file'], {}), '(self.inputs.in_file)\n', (7545, 7566), True, 'import numpy as np\n'), ((10169, 10268), 'nipype.utils.filemanip.fname_presuffix', 'fname_presuffix', (['self.inputs.in_file'], {'suffix': '"""_joined.tsv"""', 'newpath': 'runtime.cwd', 'use_ext': '(False)'}), "(self.inputs.in_file, suffix='_joined.tsv', newpath=runtime.\n cwd, use_ext=False)\n", (10184, 10268), False, 'from nipype.utils.filemanip import fname_presuffix\n'), ((12590, 12618), 'nipype.interfaces.base.traits.Instance', 'traits.Instance', (['OrderedDict'], {}), '(OrderedDict)\n', (12605, 12618), False, 'from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec, DynamicTraitedSpec, File, InputMultiObject, isdefined, SimpleInterface, Str, TraitedSpec, traits\n'), ((12820, 12833), 'nipype.interfaces.base.traits.List', 'traits.List', ([], {}), '()\n', (12831, 12833), False, 'from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec, DynamicTraitedSpec, File, InputMultiObject, isdefined, SimpleInterface, Str, TraitedSpec, traits\n'), ((13205, 13222), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)'}), '(exists=True)\n', (13209, 13222), False, 'from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec, DynamicTraitedSpec, File, InputMultiObject, isdefined, SimpleInterface, Str, TraitedSpec, traits\n'), ((13232, 13260), 'nipype.interfaces.base.traits.Instance', 'traits.Instance', (['OrderedDict'], {}), '(OrderedDict)\n', (13247, 13260), False, 'from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec, DynamicTraitedSpec, File, InputMultiObject, isdefined, SimpleInterface, Str, TraitedSpec, traits\n'), ((16955, 16988), 'json.dump', 'json.dump', (['json_data', 'f'], {'indent': '(4)'}), '(json_data, f, indent=4)\n', (16964, 16988), False, 'import json\n'), ((5169, 5196), 'nipype.interfaces.base.isdefined', 'isdefined', (['self.inputs.keys'], {}), '(self.inputs.keys)\n', (5178, 5196), False, 'from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec, DynamicTraitedSpec, File, InputMultiObject, isdefined, SimpleInterface, Str, TraitedSpec, traits\n'), ((10355, 10385), 'nipype.interfaces.base.isdefined', 'isdefined', (['self.inputs.columns'], {}), '(self.inputs.columns)\n', (10364, 10385), False, 'from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec, DynamicTraitedSpec, File, InputMultiObject, isdefined, SimpleInterface, Str, TraitedSpec, traits\n'), ((11282, 11310), 'nipype.interfaces.base.traits.Instance', 'traits.Instance', (['OrderedDict'], {}), '(OrderedDict)\n', (11297, 11310), False, 'from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec, DynamicTraitedSpec, File, InputMultiObject, isdefined, SimpleInterface, Str, TraitedSpec, traits\n'), ((13539, 13568), 'nipype.interfaces.base.isdefined', 'isdefined', (['self.inputs.output'], {}), '(self.inputs.output)\n', (13548, 13568), False, 'from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec, DynamicTraitedSpec, File, InputMultiObject, isdefined, SimpleInterface, Str, TraitedSpec, traits\n'), ((13591, 13683), 'nipype.utils.filemanip.fname_presuffix', 'fname_presuffix', (['self.inputs.in_file'], {'suffix': '""".json"""', 'newpath': 'runtime.cwd', 'use_ext': '(False)'}), "(self.inputs.in_file, suffix='.json', newpath=runtime.cwd,\n use_ext=False)\n", (13606, 13683), False, 'from nipype.utils.filemanip import fname_presuffix\n'), ((16721, 16768), 'json.JSONDecoder', 'json.JSONDecoder', ([], {'object_pairs_hook': 'OrderedDict'}), '(object_pairs_hook=OrderedDict)\n', (16737, 16768), False, 'import json\n')] |
# Add parent folder to path
import sys, os
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import unittest
import numpy as np
from src.Equations.KineticEnergy import KineticEnergy
from src.Common import particle_dtype
class test_kinetic_energy(unittest.TestCase):
def test(self):
num = 100
pA = np.zeros(num, dtype=particle_dtype)
kE = KineticEnergy(len(pA), pA)
self.assertEqual(kE, 0)
# Add some props
pA['m'] = 1
pA['vx'] = 1
kE = KineticEnergy(len(pA), pA)
self.assertEqual(kE, num * 0.5 * 1 * 1)
if __name__ == "__main__":
test_kinetic_energy().test() | [
"numpy.zeros",
"os.path.join"
] | [((62, 93), 'os.path.join', 'os.path.join', (['sys.path[0]', '""".."""'], {}), "(sys.path[0], '..')\n", (74, 93), False, 'import sys, os\n'), ((321, 356), 'numpy.zeros', 'np.zeros', (['num'], {'dtype': 'particle_dtype'}), '(num, dtype=particle_dtype)\n', (329, 356), True, 'import numpy as np\n')] |
#-------------------------------------------------------------------------------
#
# Define classes for (uni/multi)-variate kernel density estimation.
#
# Currently, only Gaussian kernels are implemented.
#
# Copyright 2004-2005 by Enthought, Inc.
#
# The code has been adapted by <NAME> to work with GPUs
# using Cocos from the SciPy code available at
# https://github.com/scipy/scipy/blob/master/scipy/stats/kde.py
#
# The open source license of the original code is reproduced below:
#
# Copyright (c) 2001-2002 Enthought, Inc. 2003-2019, SciPy Developers.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#-------------------------------------------------------------------------------
# Standard library imports.
import math
from abc import ABC, abstractmethod
from dataclasses import dataclass
from cached_property import cached_property
import numbers
import typing as tp
import warnings
from cocos.multi_processing.device_pool import ComputeDevicePool
from cocos.multi_processing.single_device_batch_processing import map_combine_single_device
from cocos.multi_processing.utilities import generate_slices_with_number_of_batches
import cocos.numerics as cn
from cocos.numerics.data_types import NumericArray
import cocos.device as cd
from cocos.numerics.numerical_package_selector import select_num_pack
# SciPy imports.
from scipy import linalg, special
from scipy.special import logsumexp
from scipy._lib._util import check_random_state
from numpy import (asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi,
sqrt, ravel, power, atleast_1d, squeeze, sum, transpose,
ones, cov)
import numpy as np
# Local imports.
# # from . import mvn
# from scipy.stats import mvn
# from ._stats import gaussian_kernel_estimate
#
# from scipy.stats._stats import gaussian_kernel_estimate
__all__ = ['gaussian_kde']
def _split_points_into_batches(points: NumericArray,
number_of_points_per_batch: int) \
-> tp.List[tp.List[NumericArray]]:
number_of_points = points.shape[1]
n_begin = 0
args_list = []
while n_begin < number_of_points:
n_end = min(n_begin + number_of_points_per_batch, number_of_points)
args_list.append([points[:, n_begin:n_end]])
n_begin = n_end
return args_list
def _check_array_at_right_location_and_convert(array,
gpu: bool,
dtype: np.generic = np.float32):
if isinstance(array, np.ndarray) and gpu:
array = cn.array(array)
if isinstance(array, cn.ndarray) and not gpu:
array = np.array(array)
if array.dtype != dtype:
array = array.astype(dtype)
return array
def ensure_consistent_numeric_arrays(arrays: tp.Iterable[tp.Optional[NumericArray]],
gpu: bool,
dtype: np.generic = np.float32):
return tuple(_check_array_at_right_location_and_convert(array=array, gpu=gpu, dtype=dtype)
if array is not None
else None
for array
in arrays)
def _verify_and_get_shape_of_datapoints_datavalues_and_evaluation_points(points: NumericArray,
values: NumericArray,
xi: NumericArray) \
-> tp.Tuple[int, int, int]:
n = points.shape[0]
if points.ndim > 1:
d = points.shape[1]
else:
d = 1
m = xi.shape[0]
if values.ndim > 1:
p = values.shape[1]
else:
p = 1
if p != 1:
raise ValueError('p != 1 is not supported')
if xi.shape[1] != d:
raise ValueError(f"points and xi must have same trailing dim but the shape of xi is {xi.shape}")
return n, m, d
def gaussian_kernel_estimate_vectorized_whitened(whitening: NumericArray,
whitened_points: NumericArray,
values: NumericArray,
xi: NumericArray,
norm: float,
dtype: np.generic,
gpu: bool) -> NumericArray:
n, m, d = \
_verify_and_get_shape_of_datapoints_datavalues_and_evaluation_points(points=whitened_points,
values=values,
xi=xi)
whitened_points, values, xi, whitening = \
ensure_consistent_numeric_arrays((whitened_points, values, xi, whitening), gpu)
num_pack = select_num_pack(gpu)
whitened_points = whitened_points.astype(dtype, copy=False)
whitened_xi = num_pack.dot(xi, whitening).astype(dtype, copy=False)
values = values.astype(dtype, copy=False)
# Create the result array and evaluate the weighted sum
whitened_points = whitened_points.reshape((n, 1, d))
whitened_xi = whitened_xi.reshape((1, m, d))
residual = whitened_points - whitened_xi
arg = residual * residual
del residual
if d > 1:
assert arg.shape == (n, m, d)
arg = num_pack.sum(arg, axis=2)
else:
arg = arg.reshape((n, m))
if not gpu:
assert arg.shape == (n, m)
arg = num_pack.exp(- 0.5 * arg) * norm
if not gpu:
assert arg.shape == (n, m)
# estimate = num_pack.dot(arg.T, values)
estimate = (values * arg).sum(axis=0)
if estimate.ndim > 1:
estimate = estimate.squeeze()
if gpu:
cd.sync()
return estimate
def gaussian_kernel_estimate_vectorized(points: NumericArray,
values: NumericArray,
xi: NumericArray,
precision: NumericArray,
dtype: np.generic,
gpu: bool = False) \
-> NumericArray:
"""
def gaussian_kernel_estimate(points, real[:, :] values, xi, precision)
Evaluate a multivariate Gaussian kernel estimate.
Parameters
----------
points : array_like with shape (n, d)
Data points to estimate from in d dimenions.
values : real[:, :] with shape (n, p)
Multivariate values associated with the data points.
xi : array_like with shape (m, d)
Coordinates to evaluate the estimate at in d dimensions.
precision : array_like with shape (d, d)
Precision matrix for the Gaussian kernel.
dtype : the result dtype
gpu : whether to compute the gaussian kernel estimate on the gpu
Returns
-------
estimate : double[:, :] with shape (m, p)
Multivariate Gaussian kernel estimate evaluated at the input coordinates.
"""
num_pack = select_num_pack(gpu)
n, m, d = \
_verify_and_get_shape_of_datapoints_datavalues_and_evaluation_points(points=points,
values=values,
xi=xi)
# n = points.shape[0]
#
# if points.ndim > 1:
# d = points.shape[1]
# else:
# d = 1
# m = xi.shape[0]
#
# if values.ndim > 1:
# p = values.shape[1]
# else:
# p = 1
#
# if p != 1:
# raise ValueError('p != 1 is not supported')
#
# if xi.shape[1] != d:
# raise ValueError("points and xi must have same trailing dim")
# if precision.shape[0] != d or precision.shape[1] != d:
# raise ValueError("precision matrix must match data dims")
points, values, xi, precision = \
ensure_consistent_numeric_arrays((points, values, xi, precision), gpu)
print(f'type(points) = {type(points)}')
print(f'type(values) = {type(values)}')
print(f'type(xi) = {type(xi)}')
print(f'type(precision) = {type(precision)}')
# Rescale the data
whitening = num_pack.linalg.cholesky(precision).astype(dtype, copy=False)
points = num_pack.dot(points, whitening).astype(dtype, copy=False)
# xi = num_pack.dot(xi, whitening).astype(dtype, copy=False)
values = values.astype(dtype, copy=False)
# Evaluate the normalisation
norm = (2 * np.pi) ** (- d / 2) * num_pack.prod(num_pack.diag(whitening))
# # Create the result array and evaluate the weighted sum
# points = points.reshape((n, 1, d))
# xi = xi.reshape((1, m, d))
# residual = points - xi
# arg = residual * residual
# del residual
# if d > 1:
# assert arg.shape == (n, m, d)
# arg = num_pack.sum(arg, axis=2)
# else:
# arg = arg.reshape((n, m))
# assert arg.shape == (n, m)
# arg = num_pack.exp(- 0.5 * arg) * norm
# assert arg.shape == (n, m)
#
# estimate = num_pack.dot(arg.T, values)
#
# if gpu:
# cd.sync()
#
# return estimate.squeeze()
return gaussian_kernel_estimate_vectorized_whitened(whitening=whitening,
whitened_points=points,
xi=xi,
values=values,
norm=norm,
dtype=dtype,
gpu=gpu)
def gaussian_kernel_estimate(points, values, xi, precision, dtype):
"""
def gaussian_kernel_estimate(points, real[:, :] values, xi, precision)
Evaluate a multivariate Gaussian kernel estimate.
Parameters
----------
points : array_like with shape (n, d)
Data points to estimate from in d dimenions.
values : real[:, :] with shape (n, p)
Multivariate values associated with the data points.
xi : array_like with shape (m, d)
Coordinates to evaluate the estimate at in d dimensions.
precision : array_like with shape (d, d)
Precision matrix for the Gaussian kernel.
Returns
-------
estimate : double[:, :] with shape (m, p)
Multivariate Gaussian kernel estimate evaluated at the input coordinates.
"""
n = points.shape[0]
d = points.shape[1]
m = xi.shape[0]
p = values.shape[1]
if p != 1:
raise ValueError('p != 1 is not supported')
if xi.shape[1] != d:
raise ValueError("points and xi must have same trailing dim")
if precision.shape[0] != d or precision.shape[1] != d:
raise ValueError("precision matrix must match data dims")
# Rescale the data
whitening = np.linalg.cholesky(precision).astype(dtype, copy=False)
points_ = np.dot(points, whitening).astype(dtype, copy=False)
xi_ = np.dot(xi, whitening).astype(dtype, copy=False)
values_ = values.astype(dtype, copy=False)
# Evaluate the normalisation
norm = (2 * np.pi) ** (- d / 2)
for i in range(d):
norm *= whitening[i, i]
# Create the result array and evaluate the weighted sum
estimate = np.zeros((m, p), dtype)
for i in range(n):
for j in range(m):
arg = 0
for k in range(d):
residual = (points_[i, k] - xi_[j, k])
arg += residual * residual
arg = np.exp(-arg / 2) * norm
for k in range(p):
estimate[j, k] += values_[i, k] * arg
return np.asarray(estimate)
@dataclass(frozen=True)
class GaussianKDEInformation:
points: np.ndarray # (d, n) shaped array of datapoints
weights: np.ndarray # (d, n) shaped array of weights, optional
dimension: int # data dimension
n: int # number of data points
neff: float # effective sample size
CovarianceFactorFunctionType = tp.Callable[[GaussianKDEInformation], float]
SCOTTS_FACTOR_STRING = 'scotts'
SILVERMAN_FACTOR_STRING = 'silverman'
def compute_scotts_factor(kde_info: GaussianKDEInformation) -> float:
return power(kde_info.neff, -1.0 / (kde_info.dimension + 4))
def compute_silverman_factor(kde_info: GaussianKDEInformation) -> float:
d = kde_info.dimension
neff = kde_info.neff
return power(neff * (d + 2.0) / 4.0, -1.0 / (d + 4))
# class CovarianceFactor(ABC):
# @abstractmethod
# def compute_covariance_factor(self, kde_info: GaussianKDEInformation) -> float:
# pass
#
#
# class ScottsFactor(CovarianceFactor):
# def compute_covariance_factor(self, kde_info: GaussianKDEInformation) -> float:
# return power(kde_info.neff, -1.0 / (kde_info.dimension + 4))
#
#
# class SilvermanFactor(CovarianceFactor):
# def compute_covariance_factor(self, kde_info: GaussianKDEInformation) -> float:
# d = kde_info.dimension
# neff = kde_info.neff
# return power(neff * (d + 2.0) / 4.0, -1.0 / (d + 4))
#
#
# class LambdaCovarianceFactor(CovarianceFactor):
# def __init__(self, covariance_factor_fun: tp.Callable[[GaussianKDEInformation], float]):
# self._covariance_factor_fun = covariance_factor_fun
#
# def compute_covariance_factor(self, kde_info: GaussianKDEInformation) -> float:
# return self._covariance_factor_fun(kde_info)
class gaussian_kde:
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
weights : array_like, optional
weights of datapoints. This must be the same shape as dataset.
If None (default), the samples are assumed to be equally weighted
gpu: whether to evaluate the kernel density estimate on the gpu
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
neff : int
Effective number of datapoints.
.. versionadded:: 1.2.0
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
evaluate
__call__
integrate_gaussian
integrate_box_1d
integrate_box
integrate_kde
pdf
logpdf
resample
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
In the case of unequally weighted points, `scotts_factor` becomes::
neff**(-1./(d+4)),
with ``neff`` the effective number of datapoints.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
or in the case of unequally weighted points::
(neff * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
With a set of weighted samples, the effective number of datapoints ``neff``
is defined by::
neff = sum(weights)^2 / sum(weights^2)
as detailed in [5]_.
References
----------
.. [1] <NAME>, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] <NAME>, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] <NAME>, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] <NAME> and <NAME>, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
.. [5] <NAME>., 1969, Journal of the Royal Statistical Society.
Series A (General), 132, 272
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
... "Measurement model, return two coupled measurements."
... m1 = np.random.normal(size=n)
... m2 = np.random.normal(scale=0.5, size=n)
... return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self,
dataset: NumericArray,
bw_method: tp.Optional[tp.Union[CovarianceFactorFunctionType,
str,
tp.Callable,
numbers.Number]] = None,
weights: tp.Optional[NumericArray] = None,
gpu: bool = False):
self._num_pack = select_num_pack(gpu)
self._gpu = gpu
self.dataset = atleast_2d(asarray(dataset))
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
if weights is not None:
weights = atleast_1d(weights).astype(float)
weights /= np.sum(weights)
if weights.ndim != 1:
raise ValueError("`weights` input should be one-dimensional.")
if len(weights) != self.n:
raise ValueError("`weights` input should be of length n")
self._neff = 1.0/np.sum(weights**2)
else:
weights = ones(self.n) / self.n
if gpu:
dtype = np.float32
weights = weights.astype(dtype)
self.dataset = self.dataset.astype(dtype)
self._weights = weights
self._covariance_factor = \
self._get_covariance_factor_function_from_bandwidth_type(bw_method)
self._compute_covariance()
def _check_and_adjust_dimensions_of_points(self, points: np.ndarray) \
-> np.ndarray:
points = atleast_2d(asarray(points))
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
raise ValueError(f"points have dimension {d}, "
f"dataset has dimension {self.d}")
return points
def evaluate(self, points):
"""
Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = self._check_and_adjust_dimensions_of_points(points)
output_dtype = np.common_type(self.covariance, points)
if True:
# result = gaussian_kernel_estimate_vectorized(points=self.dataset.T,
# values=self.weights[:, None],
# xi=points.T,
# precision=self.inv_cov,
# dtype=output_dtype,
# gpu=self._gpu)
result = gaussian_kernel_estimate_vectorized_whitened(
whitening=self.whitening,
whitened_points=self.whitened_points,
values=self.weights[:, None],
xi=points.T,
norm=self.normalization_constant,
dtype=output_dtype,
gpu=self._gpu)
return result
else:
result = gaussian_kernel_estimate(points=self.dataset.T,
values=self.weights[:, None],
xi=points.T,
precision=self.inv_cov,
dtype=output_dtype)
return result[:, 0]
__call__ = evaluate
def evaluate_in_batches(self,
points: NumericArray,
maximum_number_of_elements_per_batch: int) \
-> np.ndarray:
"""
Evaluates a Gaussian KDE in batches and stores the results in main memory.
Args:
points:
numeric array with shape (d, m) containing the points at which to evaluate the kernel
density estimate
maximum_number_of_elements_per_batch:
maximum number of data points times evaluation points to process in a single batch
Returns:
a m-dimensional NumPy array of kernel density estimates
"""
points_per_batch = math.floor(maximum_number_of_elements_per_batch / (self.n * self.d))
args_list = _split_points_into_batches(points, points_per_batch)
result = \
map_combine_single_device(f=self.evaluate,
combination=lambda x: np.hstack(x),
args_list=args_list)
return result
def evaluate_in_batches_on_multiple_devices(self,
points: NumericArray,
maximum_number_of_elements_per_batch: int,
compute_device_pool: ComputeDevicePool) \
-> np.ndarray:
"""
Evaluates a Gaussian KDE in batches on multiple gpus and stores the results in main memory.
Args:
points:
numeric array with shape (d, m) containing the points at which to evaluate the kernel
density estimate
maximum_number_of_elements_per_batch:
maximum number of data points times evaluation points to process in a single batch
Returns:
a m-dimensional NumPy array of kernel density estimates
"""
if self.gpu:
raise ValueError('Multi GPU evaluation requires gaussian_kde.gpu = False.')
points = self._check_and_adjust_dimensions_of_points(points)
number_of_points = points.shape[1]
args_list = []
for begin_index, end_index in generate_slices_with_number_of_batches(number_of_points,
compute_device_pool.number_of_devices):
args_list.append([points[:, begin_index:end_index]])
# points_per_device = math.floor(number_of_points / gpu_pool.number_of_devices)
# args_list = _split_points_into_batches(points, points_per_device)
kwargs_list = compute_device_pool.number_of_devices * \
[
{'maximum_number_of_elements_per_batch': maximum_number_of_elements_per_batch,
'n': self.n,
'd': self.d}
]
def f(points_internal,
maximum_number_of_elements_per_batch: int,
n: int,
d: int):
points_per_batch = math.floor(maximum_number_of_elements_per_batch / (n * d))
args_list_internal = _split_points_into_batches(points_internal, points_per_batch)
def f_internal(points_internal_internal):
return gaussian_kernel_estimate_vectorized_whitened(
whitening=self.whitening,
whitened_points=self.whitened_points,
values=self.weights[:, None],
xi=points_internal_internal.T,
norm=self.normalization_constant,
dtype=np.float32,
gpu=True)
result = \
map_combine_single_device(f=f_internal,
combination=lambda x: np.hstack(x),
args_list=args_list_internal)
return result
result = \
compute_device_pool.map_combine(f=f,
combination=lambda x: np.hstack(x),
args_list=args_list,
kwargs_list=kwargs_list)
return result
# def evaluate_in_batches_on_multiple_gpus(self,
# points: NumericArray,
# maximum_number_of_elements_per_batch: int,
# gpu_pool: ComputeDevicePool) \
# -> np.ndarray:
# """
# Evaluates a Gaussian KDE in batches on multiple gpus and stores the results in main memory.
#
# Args:
# points:
# numeric array with shape (d, m) containing the points at which to evaluate the kernel
# density estimate
# maximum_number_of_elements_per_batch:
# maximum number of data points times evaluation points to process in a single batch
#
# Returns:
# a m-dimensional NumPy array of kernel density estimates
# """
# if self.gpu:
# raise ValueError('Multi GPU evaluation requires gaussian_kde.gpu = False.')
#
# points = self._check_and_adjust_dimensions_of_points(points)
#
# # number_of_points = points.shape[1]
# points_per_batch = math.floor(maximum_number_of_elements_per_batch / (self.n * self.d))
#
# args_list = _split_points_into_batches(points, points_per_batch)
#
# def f(x):
# result = gaussian_kernel_estimate_vectorized_whitened(
# whitening=self.whitening,
# whitened_points=self.whitened_points,
# values=self.weights[:, None],
# xi=x.T,
# norm=self.normalization_constant,
# dtype=np.float32,
# gpu=True)
#
# return result
#
# result = \
# gpu_pool.map_combine(f=f,
# combination=lambda x: np.hstack(x),
# args_list=args_list)
#
# return result
def integrate_gaussian(self, mean, cov):
"""
Multiply estimated density by a multivariate Gaussian and integrate
over the whole space.
Parameters
----------
mean : aray_like
A 1-D array, specifying the mean of the Gaussian.
cov : array_like
A 2-D array, specifying the covariance matrix of the Gaussian.
Returns
-------
result : scalar
The value of the integral.
Raises
------
ValueError
If the mean or covariance of the input Gaussian differs from
the KDE's dimensionality.
"""
mean = atleast_1d(squeeze(mean))
cov = atleast_2d(cov)
if mean.shape != (self.d,):
raise ValueError("mean does not have dimension %s" % self.d)
if cov.shape != (self.d, self.d):
raise ValueError("covariance does not have dimension %s" % self.d)
# make mean a column vector
mean = mean[:, newaxis]
sum_cov = self.covariance + cov
# This will raise LinAlgError if the new cov matrix is not s.p.d
# cho_factor returns (ndarray, bool) where bool is a flag for whether
# or not ndarray is upper or lower triangular
sum_cov_chol = linalg.cho_factor(sum_cov)
diff = self.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
energies = sum(diff * tdiff, axis=0) / 2.0
result = sum(exp(-energies)*self.weights, axis=0) / norm_const
return result
def integrate_box_1d(self, low, high):
"""
Computes the integral of a 1D pdf between two bounds.
Parameters
----------
low : scalar
Lower bound of integration.
high : scalar
Upper bound of integration.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDE is over more than one dimension.
"""
if self.d != 1:
raise ValueError("integrate_box_1d() only handles 1D pdfs")
stdev = ravel(sqrt(self.covariance))[0]
normalized_low = ravel((low - self.dataset) / stdev)
normalized_high = ravel((high - self.dataset) / stdev)
value = np.sum(self.weights*(
special.ndtr(normalized_high) -
special.ndtr(normalized_low)))
return value
# def integrate_box(self, low_bounds, high_bounds, maxpts=None):
# """Computes the integral of a pdf over a rectangular interval.
#
# Parameters
# ----------
# low_bounds : array_like
# A 1-D array containing the lower bounds of integration.
# high_bounds : array_like
# A 1-D array containing the upper bounds of integration.
# maxpts : int, optional
# The maximum number of points to use for integration.
#
# Returns
# -------
# value : scalar
# The result of the integral.
#
# """
# if maxpts is not None:
# extra_kwds = {'maxpts': maxpts}
# else:
# extra_kwds = {}
#
# value, inform = mvn.mvnun_weighted(low_bounds, high_bounds,
# self.dataset, self.weights,
# self.covariance, **extra_kwds)
# if inform:
# msg = ('An integral in mvn.mvnun requires more points than %s' %
# (self.d * 1000))
# warnings.warn(msg)
#
# return value
def integrate_kde(self, other):
"""
Computes the integral of the product of this kernel density estimate
with another.
Parameters
----------
other : gaussian_kde instance
The other kde.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDEs have different dimensionality.
"""
if other.d != self.d:
raise ValueError("KDEs are not the same dimensionality")
# we want to iterate over the smallest number of points
if other.n < self.n:
small = other
large = self
else:
small = self
large = other
sum_cov = small.covariance + large.covariance
sum_cov_chol = linalg.cho_factor(sum_cov)
result = 0.0
for i in range(small.n):
mean = small.dataset[:, i, newaxis]
diff = large.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result += sum(exp(-energies)*large.weights, axis=0)*small.weights[i]
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
result /= norm_const
return result
def resample(self, size=None, seed=None):
"""
Randomly sample a dataset from the estimated pdf.
Parameters
----------
size : int, optional
The number of samples to draw. If not provided, then the size is
the same as the effective number of samples in the underlying
dataset.
seed : {None, int, `~np.random.RandomState`, `~np.random.Generator`}, optional
This parameter defines the object to use for drawing random
variates.
If `seed` is `None` the `~np.random.RandomState` singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used, seeded
with seed.
If `seed` is already a ``RandomState`` or ``Generator`` instance,
then that object is used.
Default is None.
Specify `seed` for reproducible drawing of random variates.
Returns
-------
resample : (self.d, `size`) ndarray
The sampled dataset.
"""
if size is None:
size = int(self.neff)
random_state = check_random_state(seed)
norm = transpose(random_state.multivariate_normal(
zeros((self.d,), float), self.covariance, size=size
))
indices = random_state.choice(self.n, size=size, p=self.weights)
means = self.dataset[:, indices]
return means + norm
@staticmethod
def _get_covariance_factor_function_from_bandwidth_type(
bw_method: tp.Optional[tp.Union[CovarianceFactorFunctionType,
str,
tp.Callable,
numbers.Number]] = None) \
-> CovarianceFactorFunctionType:
"""
Infers the bandwidth selection method from
Args:
bw_method: either 'scotts' or 'silverman' or a scalar or a function returning a float
Returns:
covariance factor function
"""
if bw_method is None:
return compute_scotts_factor
elif isinstance(bw_method, str):
if bw_method == SCOTTS_FACTOR_STRING:
return compute_scotts_factor
elif bw_method == SILVERMAN_FACTOR_STRING:
return compute_silverman_factor
else:
raise ValueError(f'bw_method={bw_method} is not supported')
elif callable(bw_method):
return bw_method
elif np.isscalar(bw_method):
return lambda kde_info: bw_method
else:
raise ValueError(f'bw_method {bw_method} is not supported')
def _compute_covariance(self):
"""
Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
kde_info = GaussianKDEInformation(dimension=self.d,
n=self.n,
neff=self.neff,
points=self.dataset,
weights=self.weights)
self.factor = self._covariance_factor(kde_info)
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self._data_covariance = \
atleast_2d(cov(self.dataset,
rowvar=True,
bias=False,
aweights=self.weights))
self._data_inv_cov = linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))
def pdf(self, x: np.ndarray) -> NumericArray:
"""
Evaluate the estimated pdf on a provided set of points.
Notes
-----
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
docstring for more details.
"""
return self.evaluate(x)
def logpdf(self, x: np.ndarray) -> np.ndarray:
"""
Evaluate the log of the estimated pdf on a provided set of points.
"""
points = atleast_2d(x)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
if m >= self.n:
# there are more points than data, so loop over data
energy = zeros((self.n, m), dtype=float)
for i in range(self.n):
diff = self.dataset[:, i, newaxis] - points
tdiff = dot(self.inv_cov, diff)
energy[i] = sum(diff*tdiff, axis=0) / 2.0
result = logsumexp(-energy.T,
b=self.weights / self._norm_factor, axis=1)
else:
# loop over points
result = zeros((m,), dtype=float)
for i in range(m):
diff = self.dataset - points[:, i, newaxis]
tdiff = dot(self.inv_cov, diff)
energy = sum(diff * tdiff, axis=0) / 2.0
result[i] = logsumexp(-energy, b=self.weights /
self._norm_factor)
return result
@property
def gpu(self) -> bool:
return self._gpu
@property
def weights(self) -> np.ndarray:
return self._weights
@cached_property
def neff(self) -> float:
return 1.0/np.sum(self.weights*self.weights)
@cached_property
def whitening(self) -> NumericArray:
gpu = self.gpu
num_pack = select_num_pack(gpu)
precision = \
ensure_consistent_numeric_arrays((self.inv_cov, ), gpu)[0]
return num_pack.linalg.cholesky(precision)
@cached_property
def whitened_points(self) -> NumericArray:
gpu = self.gpu
num_pack = select_num_pack(gpu)
points = \
ensure_consistent_numeric_arrays((self.dataset.T, ), gpu)[0]
return num_pack.dot(points, self.whitening)
@cached_property
def normalization_constant(self) -> float:
gpu = self.gpu
num_pack = select_num_pack(gpu)
return (2 * np.pi) ** (- self.d / 2) * num_pack.prod(num_pack.diag(self.whitening))
# def evaluate_gaussian_kde_in_batches(kde: gaussian_kde,
# points: NumericArray,
# maximum_number_of_elements_per_batch: int) \
# -> np.ndarray:
# """
# Evaluates a Gaussian KDE in batches and stores the results in main memory.
#
# Args:
# kde: a gaussian_kde object
# points:
# numeric array with shape (d, m) containing the points at which to evaluate the kernel
# density estimate
# maximum_number_of_elements_per_batch:
# maximum number of data points times evaluation points to process in a single batch
#
# Returns:
# a m-dimensional NumPy array of kernel density estimates
# """
# number_of_points = points.shape[1]
# points_per_batch = math.floor(maximum_number_of_elements_per_batch / (kde.n * kde.d))
#
# n_begin = 0
#
# output_array = np.zeros((number_of_points, ), dtype=points.dtype)
#
# while n_begin < number_of_points:
# n_end = min(n_begin + points_per_batch, number_of_points)
# output_array[n_begin:n_end] = np.array(kde.evaluate(points[:, n_begin:n_end]))
# n_begin = n_end
#
# return output_array
def evaluate_gaussian_kde_in_batches(kde: gaussian_kde,
points: NumericArray,
maximum_number_of_elements_per_batch: int) \
-> np.ndarray:
"""
Evaluates a Gaussian KDE in batches and stores the results in main memory.
Args:
kde: a gaussian_kde object
points:
numeric array with shape (d, m) containing the points at which to evaluate the kernel
density estimate
maximum_number_of_elements_per_batch:
maximum number of data points times evaluation points to process in a single batch
Returns:
a m-dimensional NumPy array of kernel density estimates
"""
points_per_batch = math.floor(maximum_number_of_elements_per_batch / (kde.n * kde.d))
args_list = _split_points_into_batches(points, points_per_batch)
result = \
map_combine_single_device(f=kde.evaluate,
combination=lambda x: np.hstack(x),
args_list=args_list)
return result
| [
"numpy.sqrt",
"math.floor",
"numpy.hstack",
"dataclasses.dataclass",
"cocos.numerics.numerical_package_selector.select_num_pack",
"numpy.array",
"numpy.cov",
"scipy.special.logsumexp",
"numpy.atleast_2d",
"scipy.linalg.cho_solve",
"numpy.reshape",
"numpy.isscalar",
"scipy.linalg.cho_factor",... | [((13173, 13195), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (13182, 13195), False, 'from dataclasses import dataclass\n'), ((6335, 6355), 'cocos.numerics.numerical_package_selector.select_num_pack', 'select_num_pack', (['gpu'], {}), '(gpu)\n', (6350, 6355), False, 'from cocos.numerics.numerical_package_selector import select_num_pack\n'), ((8514, 8534), 'cocos.numerics.numerical_package_selector.select_num_pack', 'select_num_pack', (['gpu'], {}), '(gpu)\n', (8529, 8534), False, 'from cocos.numerics.numerical_package_selector import select_num_pack\n'), ((12786, 12809), 'numpy.zeros', 'np.zeros', (['(m, p)', 'dtype'], {}), '((m, p), dtype)\n', (12794, 12809), True, 'import numpy as np\n'), ((13149, 13169), 'numpy.asarray', 'np.asarray', (['estimate'], {}), '(estimate)\n', (13159, 13169), True, 'import numpy as np\n'), ((13701, 13754), 'numpy.power', 'power', (['kde_info.neff', '(-1.0 / (kde_info.dimension + 4))'], {}), '(kde_info.neff, -1.0 / (kde_info.dimension + 4))\n', (13706, 13754), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((13893, 13938), 'numpy.power', 'power', (['(neff * (d + 2.0) / 4.0)', '(-1.0 / (d + 4))'], {}), '(neff * (d + 2.0) / 4.0, -1.0 / (d + 4))\n', (13898, 13938), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((44407, 44473), 'math.floor', 'math.floor', (['(maximum_number_of_elements_per_batch / (kde.n * kde.d))'], {}), '(maximum_number_of_elements_per_batch / (kde.n * kde.d))\n', (44417, 44473), False, 'import math\n'), ((4049, 4064), 'cocos.numerics.array', 'cn.array', (['array'], {}), '(array)\n', (4057, 4064), True, 'import cocos.numerics as cn\n'), ((4132, 4147), 'numpy.array', 'np.array', (['array'], {}), '(array)\n', (4140, 4147), True, 'import numpy as np\n'), ((7252, 7261), 'cocos.device.sync', 'cd.sync', ([], {}), '()\n', (7259, 7261), True, 'import cocos.device as cd\n'), ((20688, 20708), 'cocos.numerics.numerical_package_selector.select_num_pack', 'select_num_pack', (['gpu'], {}), '(gpu)\n', (20703, 20708), False, 'from cocos.numerics.numerical_package_selector import select_num_pack\n'), ((22968, 23007), 'numpy.common_type', 'np.common_type', (['self.covariance', 'points'], {}), '(self.covariance, points)\n', (22982, 23007), True, 'import numpy as np\n'), ((25079, 25147), 'math.floor', 'math.floor', (['(maximum_number_of_elements_per_batch / (self.n * self.d))'], {}), '(maximum_number_of_elements_per_batch / (self.n * self.d))\n', (25089, 25147), False, 'import math\n'), ((26600, 26699), 'cocos.multi_processing.utilities.generate_slices_with_number_of_batches', 'generate_slices_with_number_of_batches', (['number_of_points', 'compute_device_pool.number_of_devices'], {}), '(number_of_points,\n compute_device_pool.number_of_devices)\n', (26638, 26699), False, 'from cocos.multi_processing.utilities import generate_slices_with_number_of_batches\n'), ((31323, 31338), 'numpy.atleast_2d', 'atleast_2d', (['cov'], {}), '(cov)\n', (31333, 31338), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((31909, 31935), 'scipy.linalg.cho_factor', 'linalg.cho_factor', (['sum_cov'], {}), '(sum_cov)\n', (31926, 31935), False, 'from scipy import linalg, special\n'), ((31988, 32024), 'scipy.linalg.cho_solve', 'linalg.cho_solve', (['sum_cov_chol', 'diff'], {}), '(sum_cov_chol, diff)\n', (32004, 32024), False, 'from scipy import linalg, special\n'), ((32965, 33000), 'numpy.ravel', 'ravel', (['((low - self.dataset) / stdev)'], {}), '((low - self.dataset) / stdev)\n', (32970, 33000), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((33027, 33063), 'numpy.ravel', 'ravel', (['((high - self.dataset) / stdev)'], {}), '((high - self.dataset) / stdev)\n', (33032, 33063), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((35268, 35294), 'scipy.linalg.cho_factor', 'linalg.cho_factor', (['sum_cov'], {}), '(sum_cov)\n', (35285, 35294), False, 'from scipy import linalg, special\n'), ((36957, 36981), 'scipy._lib._util.check_random_state', 'check_random_state', (['seed'], {}), '(seed)\n', (36975, 36981), False, 'from scipy._lib._util import check_random_state\n'), ((40100, 40113), 'numpy.atleast_2d', 'atleast_2d', (['x'], {}), '(x)\n', (40110, 40113), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((41752, 41772), 'cocos.numerics.numerical_package_selector.select_num_pack', 'select_num_pack', (['gpu'], {}), '(gpu)\n', (41767, 41772), False, 'from cocos.numerics.numerical_package_selector import select_num_pack\n'), ((42029, 42049), 'cocos.numerics.numerical_package_selector.select_num_pack', 'select_num_pack', (['gpu'], {}), '(gpu)\n', (42044, 42049), False, 'from cocos.numerics.numerical_package_selector import select_num_pack\n'), ((42306, 42326), 'cocos.numerics.numerical_package_selector.select_num_pack', 'select_num_pack', (['gpu'], {}), '(gpu)\n', (42321, 42326), False, 'from cocos.numerics.numerical_package_selector import select_num_pack\n'), ((12358, 12387), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['precision'], {}), '(precision)\n', (12376, 12387), True, 'import numpy as np\n'), ((12428, 12453), 'numpy.dot', 'np.dot', (['points', 'whitening'], {}), '(points, whitening)\n', (12434, 12453), True, 'import numpy as np\n'), ((12490, 12511), 'numpy.dot', 'np.dot', (['xi', 'whitening'], {}), '(xi, whitening)\n', (12496, 12511), True, 'import numpy as np\n'), ((20767, 20783), 'numpy.asarray', 'asarray', (['dataset'], {}), '(dataset)\n', (20774, 20783), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((21059, 21074), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (21065, 21074), True, 'import numpy as np\n'), ((21869, 21884), 'numpy.asarray', 'asarray', (['points'], {}), '(points)\n', (21876, 21884), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((27466, 27524), 'math.floor', 'math.floor', (['(maximum_number_of_elements_per_batch / (n * d))'], {}), '(maximum_number_of_elements_per_batch / (n * d))\n', (27476, 27524), False, 'import math\n'), ((31294, 31307), 'numpy.squeeze', 'squeeze', (['mean'], {}), '(mean)\n', (31301, 31307), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((32053, 32081), 'numpy.diagonal', 'np.diagonal', (['sum_cov_chol[0]'], {}), '(sum_cov_chol[0])\n', (32064, 32081), True, 'import numpy as np\n'), ((32104, 32141), 'numpy.power', 'power', (['(2 * pi)', '(sum_cov.shape[0] / 2.0)'], {}), '(2 * pi, sum_cov.shape[0] / 2.0)\n', (32109, 32141), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((32173, 32198), 'numpy.sum', 'sum', (['(diff * tdiff)'], {'axis': '(0)'}), '(diff * tdiff, axis=0)\n', (32176, 32198), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((35457, 35493), 'scipy.linalg.cho_solve', 'linalg.cho_solve', (['sum_cov_chol', 'diff'], {}), '(sum_cov_chol, diff)\n', (35473, 35493), False, 'from scipy import linalg, special\n'), ((35659, 35687), 'numpy.diagonal', 'np.diagonal', (['sum_cov_chol[0]'], {}), '(sum_cov_chol[0])\n', (35670, 35687), True, 'import numpy as np\n'), ((35710, 35747), 'numpy.power', 'power', (['(2 * pi)', '(sum_cov.shape[0] / 2.0)'], {}), '(2 * pi, sum_cov.shape[0] / 2.0)\n', (35715, 35747), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((39392, 39425), 'scipy.linalg.inv', 'linalg.inv', (['self._data_covariance'], {}), '(self._data_covariance)\n', (39402, 39425), False, 'from scipy import linalg, special\n'), ((39584, 39620), 'scipy.linalg.det', 'linalg.det', (['(2 * pi * self.covariance)'], {}), '(2 * pi * self.covariance)\n', (39594, 39620), False, 'from scipy import linalg, special\n'), ((40613, 40644), 'numpy.zeros', 'zeros', (['(self.n, m)'], {'dtype': 'float'}), '((self.n, m), dtype=float)\n', (40618, 40644), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((40868, 40932), 'scipy.special.logsumexp', 'logsumexp', (['(-energy.T)'], {'b': '(self.weights / self._norm_factor)', 'axis': '(1)'}), '(-energy.T, b=self.weights / self._norm_factor, axis=1)\n', (40877, 40932), False, 'from scipy.special import logsumexp\n'), ((41030, 41054), 'numpy.zeros', 'zeros', (['(m,)'], {'dtype': 'float'}), '((m,), dtype=float)\n', (41035, 41054), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((41613, 41648), 'numpy.sum', 'np.sum', (['(self.weights * self.weights)'], {}), '(self.weights * self.weights)\n', (41619, 41648), True, 'import numpy as np\n'), ((13028, 13044), 'numpy.exp', 'np.exp', (['(-arg / 2)'], {}), '(-arg / 2)\n', (13034, 13044), True, 'import numpy as np\n'), ((21330, 21350), 'numpy.sum', 'np.sum', (['(weights ** 2)'], {}), '(weights ** 2)\n', (21336, 21350), True, 'import numpy as np\n'), ((21385, 21397), 'numpy.ones', 'ones', (['self.n'], {}), '(self.n)\n', (21389, 21397), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((22058, 22086), 'numpy.reshape', 'reshape', (['points', '(self.d, 1)'], {}), '(points, (self.d, 1))\n', (22065, 22086), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((32913, 32934), 'numpy.sqrt', 'sqrt', (['self.covariance'], {}), '(self.covariance)\n', (32917, 32934), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((35518, 35543), 'numpy.sum', 'sum', (['(diff * tdiff)'], {'axis': '(0)'}), '(diff * tdiff, axis=0)\n', (35521, 35543), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((37053, 37076), 'numpy.zeros', 'zeros', (['(self.d,)', 'float'], {}), '((self.d,), float)\n', (37058, 37076), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((39199, 39264), 'numpy.cov', 'cov', (['self.dataset'], {'rowvar': '(True)', 'bias': '(False)', 'aweights': 'self.weights'}), '(self.dataset, rowvar=True, bias=False, aweights=self.weights)\n', (39202, 39264), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((40286, 40314), 'numpy.reshape', 'reshape', (['points', '(self.d, 1)'], {}), '(points, (self.d, 1))\n', (40293, 40314), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((40765, 40788), 'numpy.dot', 'dot', (['self.inv_cov', 'diff'], {}), '(self.inv_cov, diff)\n', (40768, 40788), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((41170, 41193), 'numpy.dot', 'dot', (['self.inv_cov', 'diff'], {}), '(self.inv_cov, diff)\n', (41173, 41193), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((41279, 41333), 'scipy.special.logsumexp', 'logsumexp', (['(-energy)'], {'b': '(self.weights / self._norm_factor)'}), '(-energy, b=self.weights / self._norm_factor)\n', (41288, 41333), False, 'from scipy.special import logsumexp\n'), ((44666, 44678), 'numpy.hstack', 'np.hstack', (['x'], {}), '(x)\n', (44675, 44678), True, 'import numpy as np\n'), ((21002, 21021), 'numpy.atleast_1d', 'atleast_1d', (['weights'], {}), '(weights)\n', (21012, 21021), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((25357, 25369), 'numpy.hstack', 'np.hstack', (['x'], {}), '(x)\n', (25366, 25369), True, 'import numpy as np\n'), ((28491, 28503), 'numpy.hstack', 'np.hstack', (['x'], {}), '(x)\n', (28500, 28503), True, 'import numpy as np\n'), ((32226, 32240), 'numpy.exp', 'exp', (['(-energies)'], {}), '(-energies)\n', (32229, 32240), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((33127, 33156), 'scipy.special.ndtr', 'special.ndtr', (['normalized_high'], {}), '(normalized_high)\n', (33139, 33156), False, 'from scipy import linalg, special\n'), ((33183, 33211), 'scipy.special.ndtr', 'special.ndtr', (['normalized_low'], {}), '(normalized_low)\n', (33195, 33211), False, 'from scipy import linalg, special\n'), ((38355, 38377), 'numpy.isscalar', 'np.isscalar', (['bw_method'], {}), '(bw_method)\n', (38366, 38377), True, 'import numpy as np\n'), ((40817, 40842), 'numpy.sum', 'sum', (['(diff * tdiff)'], {'axis': '(0)'}), '(diff * tdiff, axis=0)\n', (40820, 40842), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((41219, 41244), 'numpy.sum', 'sum', (['(diff * tdiff)'], {'axis': '(0)'}), '(diff * tdiff, axis=0)\n', (41222, 41244), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n'), ((28243, 28255), 'numpy.hstack', 'np.hstack', (['x'], {}), '(x)\n', (28252, 28255), True, 'import numpy as np\n'), ((35576, 35590), 'numpy.exp', 'exp', (['(-energies)'], {}), '(-energies)\n', (35579, 35590), False, 'from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, ones, cov\n')] |
import numpy as np
def schlawin(x):
a1 = 0.44325141463
a2 = 0.06260601220
a3 = 0.04757383546
a4 = 0.01736506451
b1 = 0.24998368310
b2 = 0.09200180037
b3 = 0.04069697526
b4 = 0.00526449639
return 1 + x*(a1 + x*(a2 + x*(a3 + x*a4))) + x*(b1 + x*(b2 + x*(b3 + x*b4)))*np.log(1/x)
| [
"numpy.log"
] | [((302, 315), 'numpy.log', 'np.log', (['(1 / x)'], {}), '(1 / x)\n', (308, 315), True, 'import numpy as np\n')] |
import logging
import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import sys
import cv2
import math
from sklearn.metrics import accuracy_score, f1_score
class ClassifyEnv(gym.Env):
"""Classification as an unsupervised OpenAI Gym RL problem.
Includes scikit-learn digits dataset, MNIST dataset
"""
def __init__(self, trainSet, target, batch):
"""
Data set is a tuple of
[0] input data: [nSamples x nInputs]
[1] labels: [nSamples x 1]
Example data sets are given at the end of this file
"""
self.t = 0 # Current batch number
self.t_limit = 0 # Number of batches if you need them
self.batch = batch # Number of examples per batch / Spam: 75 / iMDb: 400
self.seed()
self.viewer = None
self.trainSet = trainSet
self.target = target
nInputs = np.shape(trainSet)[1]
high = np.array([1.0]*nInputs)
self.action_space = spaces.Box(np.array(0,dtype=np.float32), \
np.array(1,dtype=np.float32))
self.observation_space = spaces.Box(np.array(0,dtype=np.float32), \
np.array(1,dtype=np.float32))
self.state = None
self.trainOrder = None
self.currIndx = None
def seed(self, seed=None):
''' Randomly select from training set'''
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
''' Initialize State'''
#print('Lucky number', np.random.randint(10)) # same randomness?
self.trainOrder = np.random.permutation(len(self.target))
self.t = 0 # timestep
self.currIndx = self.trainOrder[self.t:self.t+self.batch]
self.state = self.trainSet[self.currIndx,:]
return self.state
def step(self, action):
'''
Judge Classification, increment to next batch
action - [batch x output] - softmax output
'''
y = self.target[self.currIndx]
m = y.shape[0]
#p = np.argmax(action, axis=1)
#print(accuracy_score(y, p))
accuracy = np.mean(np.argmax(action, axis=1) == y)
log_likelihood = -np.log(action[range(m),y])
loss = np.sum(log_likelihood) / m
reward = -loss
if self.t_limit > 0: # We are doing batches
reward *= (1/self.t_limit) # average
self.t += 1
done = False
if self.t >= self.t_limit:
done = True
self.currIndx = self.trainOrder[(self.t*self.batch):\
(self.t*self.batch + self.batch)]
self.state = self.trainSet[self.currIndx,:]
else:
done = True
obs = self.state
return obs, reward, done, {}, accuracy
# -- Data Sets ----------------------------------------------------------- -- #
# SPAM CLASSIFICATION
def spam(embedding_style, max_features):
'''
Return Kaggle spam training data
(embeddings & labels)
'''
import pandas as pd
print("...Reading data...")
train = pd.read_csv("data/spam_classify/train.csv")
train_texts, train_labels = list(train.v2), list(train.v1)
if embedding_style == "ascii":
# ASCII
print("spam_ascii_" + str(max_features))
from domain.text_vectorizers import ASCIIVectorizer
vectorizer = ASCIIVectorizer(max_features)
z, labels = vectorizer.transform(train_texts, train_labels)
elif embedding_style == "count":
# BoW
print("spam_count_" + str(max_features))
from domain.text_vectorizers import BoWVectorizer
vectorizer = BoWVectorizer(max_features=max_features)
vectorizer.fit(train_texts)
z, labels = vectorizer.transform(train_texts, train_labels)
elif embedding_style == "lstm":
# BiLSTM mean/max pooling (default: max)
print("spam_lstm_" + str(max_features))
from domain.text_vectorizers import BiLSTMVectorizer
vectorizer = BiLSTMVectorizer(vocab_size=max_features)
z, labels = vectorizer.transform(train_texts, train_labels, bsize=32)
return z, labels
# BINARY SENTIMENT ANALYSIS
def imdb(embedding_style, max_features):
'''
Return movie review training data
(embeddings & labels)
'''
import pandas as pd
print("...Reading data...")
train = pd.read_csv("data/sen_imdb/train.csv")
train_texts, train_labels = list(train.text), list(train.pos)
if embedding_style == "ascii":
# ASCII
print("imdb_ascii_" + str(max_features))
from domain.text_vectorizers import ASCIIVectorizer
vectorizer = ASCIIVectorizer(max_features)
z, labels = vectorizer.transform(train_texts, train_labels)
print(z.shape)
print(z[0])
print(labels.shape)
elif embedding_style == "count":
# BoW
print("imdb_count_" + str(max_features))
from domain.text_vectorizers import BoWVectorizer
vectorizer = BoWVectorizer(max_features=max_features)
vectorizer.fit(train_texts)
z, labels = vectorizer.transform(train_texts, train_labels)
elif embedding_style == "lstm":
# BiLSTM mean/max pooling (default: max)
print("imdb_lstm_" + str(max_features))
from domain.text_vectorizers import BiLSTMVectorizer
vectorizer = BiLSTMVectorizer(vocab_size=max_features)
z, labels = vectorizer.transform(train_texts, train_labels, bsize=32)
return z, labels
### Spectrogram dataset
import skimage.measure
from scipy.io import wavfile
from scipy import signal
from sklearn.model_selection import train_test_split
import glob
def create_spectrogram(file_name, window_size=20, step_size=10, eps=1e-10):
"""Creates a spectrogram from audio file"""
sample_rate, audio = wavfile.read(file_name)
nperseg = int(round(window_size * sample_rate / 1e3))
noverlap = int(round(step_size * sample_rate / 1e3))
_, _, spec = signal.spectrogram(audio, fs=sample_rate,
window='hann',
nperseg=nperseg,
noverlap=noverlap,
detrend=False)
# Create log spectrogram
spectrogram = np.log(spec.astype(np.float32) + eps)
# Max pooling
spectrogram = skimage.measure.block_reduce(spectrogram, (13, 13), np.max)
# Resize to 8x8 and flatten
spectrogram = cv2.resize(spectrogram, (8,8), cv2.INTER_CUBIC).flatten()
return spectrogram
def speech_mnist():
print("Creating speech_mnist dataset")
X = np.empty((2350*10, 64))
y = np.empty((2350*10))
numbers = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']
for n, number in enumerate(numbers):
paths = glob.glob(f"datasets/speech_mnist/{number}/*.wav")
paths = sorted(paths)
for i, path in enumerate(paths):
X[n*2350+i,:] = create_spectrogram(path).flatten()
y[n*2350+i] = n
Xtr, Xte, ytr, yte = train_test_split(X,y,test_size=0.2,random_state=123)
print("done")
return Xte, yte.astype(np.uint8)
return Xtr, ytr.astype(np.uint8)
def speech_yesno():
print("Creating speech_yesno dataset")
X = np.empty((2375*2, 64))
y = np.empty((2375*2))
categories = ['no', 'yes']
for i, cat in enumerate(categories):
paths = glob.glob(f"datasets/speech_yesno/{cat}/*.wav")
paths = sorted(paths)
print(len(paths))
for j, path in enumerate(paths):
X[i*2375+j,:] = create_spectrogram(path).flatten()
y[i*2375+j] = i
Xtr, Xte, ytr, yte = train_test_split(X,y,test_size=0.2,random_state=123)
print("done")
return Xte, yte.astype(np.uint8)
return Xtr, ytr.astype(np.uint8)
| [
"cv2.resize",
"pandas.read_csv",
"domain.text_vectorizers.BiLSTMVectorizer",
"sklearn.model_selection.train_test_split",
"scipy.signal.spectrogram",
"numpy.argmax",
"numpy.array",
"numpy.sum",
"scipy.io.wavfile.read",
"numpy.empty",
"domain.text_vectorizers.ASCIIVectorizer",
"domain.text_vecto... | [((2937, 2980), 'pandas.read_csv', 'pd.read_csv', (['"""data/spam_classify/train.csv"""'], {}), "('data/spam_classify/train.csv')\n", (2948, 2980), True, 'import pandas as pd\n'), ((4135, 4173), 'pandas.read_csv', 'pd.read_csv', (['"""data/sen_imdb/train.csv"""'], {}), "('data/sen_imdb/train.csv')\n", (4146, 4173), True, 'import pandas as pd\n'), ((5502, 5525), 'scipy.io.wavfile.read', 'wavfile.read', (['file_name'], {}), '(file_name)\n', (5514, 5525), False, 'from scipy.io import wavfile\n'), ((5663, 5774), 'scipy.signal.spectrogram', 'signal.spectrogram', (['audio'], {'fs': 'sample_rate', 'window': '"""hann"""', 'nperseg': 'nperseg', 'noverlap': 'noverlap', 'detrend': '(False)'}), "(audio, fs=sample_rate, window='hann', nperseg=nperseg,\n noverlap=noverlap, detrend=False)\n", (5681, 5774), False, 'from scipy import signal\n'), ((6338, 6363), 'numpy.empty', 'np.empty', (['(2350 * 10, 64)'], {}), '((2350 * 10, 64))\n', (6346, 6363), True, 'import numpy as np\n'), ((6368, 6387), 'numpy.empty', 'np.empty', (['(2350 * 10)'], {}), '(2350 * 10)\n', (6376, 6387), True, 'import numpy as np\n'), ((6762, 6817), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(123)'}), '(X, y, test_size=0.2, random_state=123)\n', (6778, 6817), False, 'from sklearn.model_selection import train_test_split\n'), ((6969, 6993), 'numpy.empty', 'np.empty', (['(2375 * 2, 64)'], {}), '((2375 * 2, 64))\n', (6977, 6993), True, 'import numpy as np\n'), ((6998, 7016), 'numpy.empty', 'np.empty', (['(2375 * 2)'], {}), '(2375 * 2)\n', (7006, 7016), True, 'import numpy as np\n'), ((7348, 7403), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(123)'}), '(X, y, test_size=0.2, random_state=123)\n', (7364, 7403), False, 'from sklearn.model_selection import train_test_split\n'), ((916, 941), 'numpy.array', 'np.array', (['([1.0] * nInputs)'], {}), '([1.0] * nInputs)\n', (924, 941), True, 'import numpy as np\n'), ((1386, 1409), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (1403, 1409), False, 'from gym.utils import seeding\n'), ((3205, 3234), 'domain.text_vectorizers.ASCIIVectorizer', 'ASCIIVectorizer', (['max_features'], {}), '(max_features)\n', (3220, 3234), False, 'from domain.text_vectorizers import ASCIIVectorizer\n'), ((4401, 4430), 'domain.text_vectorizers.ASCIIVectorizer', 'ASCIIVectorizer', (['max_features'], {}), '(max_features)\n', (4416, 4430), False, 'from domain.text_vectorizers import ASCIIVectorizer\n'), ((6534, 6584), 'glob.glob', 'glob.glob', (['f"""datasets/speech_mnist/{number}/*.wav"""'], {}), "(f'datasets/speech_mnist/{number}/*.wav')\n", (6543, 6584), False, 'import glob\n'), ((7099, 7146), 'glob.glob', 'glob.glob', (['f"""datasets/speech_yesno/{cat}/*.wav"""'], {}), "(f'datasets/speech_yesno/{cat}/*.wav')\n", (7108, 7146), False, 'import glob\n'), ((883, 901), 'numpy.shape', 'np.shape', (['trainSet'], {}), '(trainSet)\n', (891, 901), True, 'import numpy as np\n'), ((975, 1004), 'numpy.array', 'np.array', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (983, 1004), True, 'import numpy as np\n'), ((1042, 1071), 'numpy.array', 'np.array', (['(1)'], {'dtype': 'np.float32'}), '(1, dtype=np.float32)\n', (1050, 1071), True, 'import numpy as np\n'), ((1112, 1141), 'numpy.array', 'np.array', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (1120, 1141), True, 'import numpy as np\n'), ((1179, 1208), 'numpy.array', 'np.array', (['(1)'], {'dtype': 'np.float32'}), '(1, dtype=np.float32)\n', (1187, 1208), True, 'import numpy as np\n'), ((2153, 2175), 'numpy.sum', 'np.sum', (['log_likelihood'], {}), '(log_likelihood)\n', (2159, 2175), True, 'import numpy as np\n'), ((3460, 3500), 'domain.text_vectorizers.BoWVectorizer', 'BoWVectorizer', ([], {'max_features': 'max_features'}), '(max_features=max_features)\n', (3473, 3500), False, 'from domain.text_vectorizers import BoWVectorizer\n'), ((4715, 4755), 'domain.text_vectorizers.BoWVectorizer', 'BoWVectorizer', ([], {'max_features': 'max_features'}), '(max_features=max_features)\n', (4728, 4755), False, 'from domain.text_vectorizers import BoWVectorizer\n'), ((6186, 6234), 'cv2.resize', 'cv2.resize', (['spectrogram', '(8, 8)', 'cv2.INTER_CUBIC'], {}), '(spectrogram, (8, 8), cv2.INTER_CUBIC)\n', (6196, 6234), False, 'import cv2\n'), ((2061, 2086), 'numpy.argmax', 'np.argmax', (['action'], {'axis': '(1)'}), '(action, axis=1)\n', (2070, 2086), True, 'import numpy as np\n'), ((3795, 3836), 'domain.text_vectorizers.BiLSTMVectorizer', 'BiLSTMVectorizer', ([], {'vocab_size': 'max_features'}), '(vocab_size=max_features)\n', (3811, 3836), False, 'from domain.text_vectorizers import BiLSTMVectorizer\n'), ((5049, 5090), 'domain.text_vectorizers.BiLSTMVectorizer', 'BiLSTMVectorizer', ([], {'vocab_size': 'max_features'}), '(vocab_size=max_features)\n', (5065, 5090), False, 'from domain.text_vectorizers import BiLSTMVectorizer\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import numpy as np
from arch.api.utils import log_utils
from federatedml.model_base import ModelBase
from federatedml.param.onehot_encoder_param import OneHotEncoderParam
from federatedml.protobuf.generated import onehot_param_pb2, onehot_meta_pb2
from federatedml.statistic.data_overview import get_header
from federatedml.util import consts
LOGGER = log_utils.getLogger()
MODEL_PARAM_NAME = 'OneHotParam'
MODEL_META_NAME = 'OneHotMeta'
MODEL_NAME = 'OneHotEncoder'
class OneHotInnerParam(object):
def __init__(self):
self.col_name_maps = {}
self.header = []
self.transform_indexes = []
self.transform_names = []
self.result_header = []
def set_header(self, header):
self.header = header
for idx, col_name in enumerate(self.header):
self.col_name_maps[col_name] = idx
def set_result_header(self, result_header: list or tuple):
self.result_header = result_header.copy()
def set_transform_all(self):
self.transform_indexes = [i for i in range(len(self.header))]
self.transform_names = self.header
def add_transform_indexes(self, transform_indexes):
for idx in transform_indexes:
if idx >= len(self.header):
LOGGER.warning("Adding a index that out of header's bound")
continue
if idx not in self.transform_indexes:
self.transform_indexes.append(idx)
self.transform_names.append(self.header[idx])
def add_transform_names(self, transform_names):
for col_name in transform_names:
idx = self.col_name_maps.get(col_name)
if idx is None:
LOGGER.warning("Adding a col_name that is not exist in header")
continue
if idx not in self.transform_indexes:
self.transform_indexes.append(idx)
self.transform_names.append(self.header[idx])
class TransferPair(object):
def __init__(self, name):
self.name = name
self._values = set()
self._transformed_headers = {}
def add_value(self, value):
if value in self._values:
return
self._values.add(value)
if len(self._values) > consts.ONE_HOT_LIMIT:
raise ValueError("Input data should not have more than {} possible value when doing one-hot encode"
.format(consts.ONE_HOT_LIMIT))
self._transformed_headers[value] = self.__encode_new_header(value)
@property
def values(self):
return list(self._values)
@property
def transformed_headers(self):
return [self._transformed_headers[x] for x in self.values]
def query_name_by_value(self, value):
if value not in self._values:
return None
return self._transformed_headers.get(value)
def __encode_new_header(self, value):
return '_'.join([str(x) for x in [self.name, value]])
class OneHotEncoder(ModelBase):
def __init__(self):
super(OneHotEncoder, self).__init__()
self.col_maps = {}
self.schema = {}
self.output_data = None
self.model_param = OneHotEncoderParam()
self.inner_param: OneHotInnerParam = None
def _init_model(self, model_param):
self.model_param = model_param
# self.cols_index = model_param.cols
def fit(self, data_instances):
self._init_params(data_instances)
f1 = functools.partial(self.record_new_header,
inner_param=self.inner_param)
self.col_maps = data_instances.mapPartitions(f1).reduce(self.merge_col_maps)
LOGGER.debug("Before set_schema in fit, schema is : {}, header: {}".format(self.schema,
self.inner_param.header))
self._transform_schema()
data_instances = self.transform(data_instances)
LOGGER.debug("After transform in fit, schema is : {}, header: {}".format(self.schema,
self.inner_param.header))
return data_instances
def transform(self, data_instances):
self._init_params(data_instances)
LOGGER.debug("In Onehot transform, ori_header: {}, transfered_header: {}".format(
self.inner_param.header, self.inner_param.result_header
))
one_data = data_instances.first()[1].features
LOGGER.debug("Before transform, data is : {}".format(one_data))
f = functools.partial(self.transfer_one_instance,
col_maps=self.col_maps,
inner_param=self.inner_param)
new_data = data_instances.mapValues(f)
self.set_schema(new_data)
one_data = new_data.first()[1].features
LOGGER.debug("transfered data is : {}".format(one_data))
return new_data
def _transform_schema(self):
header = self.inner_param.header.copy()
LOGGER.debug("[Result][OneHotEncoder]Before one-hot, "
"data_instances schema is : {}".format(self.inner_param.header))
result_header = []
for col_name in header:
if col_name not in self.col_maps:
result_header.append(col_name)
continue
pair_obj = self.col_maps[col_name]
new_headers = pair_obj.transformed_headers
result_header.extend(new_headers)
self.inner_param.set_result_header(result_header)
LOGGER.debug("[Result][OneHotEncoder]After one-hot, data_instances schema is : {}".format(header))
def _init_params(self, data_instances):
if len(self.schema) == 0:
self.schema = data_instances.schema
if self.inner_param is not None:
return
self.inner_param = OneHotInnerParam()
# self.schema = data_instances.schema
LOGGER.debug("In _init_params, schema is : {}".format(self.schema))
header = get_header(data_instances)
self.inner_param.set_header(header)
if self.model_param.transform_col_indexes == -1:
self.inner_param.set_transform_all()
else:
self.inner_param.add_transform_indexes(self.model_param.transform_col_indexes)
self.inner_param.add_transform_names(self.model_param.transform_col_names)
@staticmethod
def record_new_header(data, inner_param: OneHotInnerParam):
"""
Generate a new schema based on data value. Each new value will generate a new header.
Returns
-------
col_maps: a dict in which keys are original header, values are dicts. The dicts in value
e.g.
cols_map = {"x1": {1 : "x1_1"},
...}
"""
col_maps = {}
for col_name in inner_param.transform_names:
col_maps[col_name] = TransferPair(col_name)
for _, instance in data:
feature = instance.features
for col_idx, col_name in zip(inner_param.transform_indexes, inner_param.transform_names):
pair_obj = col_maps.get(col_name)
feature_value = int(feature[col_idx])
pair_obj.add_value(feature_value)
return col_maps
@staticmethod
def encode_new_header(col_name, feature_value):
return '_'.join([str(x) for x in [col_name, feature_value]])
@staticmethod
def merge_col_maps(col_map1, col_map2):
if col_map1 is None and col_map2 is None:
return None
if col_map1 is None:
return col_map2
if col_map2 is None:
return col_map1
for col_name, pair_obj in col_map2.items():
if col_name not in col_map1:
col_map1[col_name] = pair_obj
continue
else:
col_1_obj = col_map1[col_name]
for value in pair_obj.values:
col_1_obj.add_value(value)
return col_map1
@staticmethod
def transfer_one_instance(instance, col_maps, inner_param):
feature = instance.features
result_header = inner_param.result_header
# new_feature = [0 for _ in result_header]
_transformed_value = {}
for idx, col_name in enumerate(inner_param.header):
value = feature[idx]
if col_name in result_header:
_transformed_value[col_name] = value
elif col_name not in col_maps:
continue
else:
pair_obj = col_maps.get(col_name)
new_col_name = pair_obj.query_name_by_value(value)
if new_col_name is None:
continue
_transformed_value[new_col_name] = 1
new_feature = [_transformed_value[x] if x in _transformed_value else 0 for x in result_header]
feature_array = np.array(new_feature)
instance.features = feature_array
return instance
def set_schema(self, data_instance):
self.schema['header'] = self.inner_param.result_header
data_instance.schema = self.schema
def _get_meta(self):
meta_protobuf_obj = onehot_meta_pb2.OneHotMeta(transform_col_names=self.inner_param.transform_names,
header=self.inner_param.header,
need_run=self.need_run)
return meta_protobuf_obj
def _get_param(self):
pb_dict = {}
for col_name, pair_obj in self.col_maps.items():
values = [str(x) for x in pair_obj.values]
value_dict_obj = onehot_param_pb2.ColsMap(values=values,
transformed_headers=pair_obj.transformed_headers)
pb_dict[col_name] = value_dict_obj
result_obj = onehot_param_pb2.OneHotParam(col_map=pb_dict,
result_header=self.inner_param.result_header)
return result_obj
def export_model(self):
if self.model_output is not None:
LOGGER.debug("Model output is : {}".format(self.model_output))
return self.model_output
meta_obj = self._get_meta()
param_obj = self._get_param()
result = {
MODEL_META_NAME: meta_obj,
MODEL_PARAM_NAME: param_obj
}
return result
def _load_model(self, model_dict):
self._parse_need_run(model_dict, MODEL_META_NAME)
model_param = list(model_dict.get('model').values())[0].get(MODEL_PARAM_NAME)
model_meta = list(model_dict.get('model').values())[0].get(MODEL_META_NAME)
self.model_output = {
MODEL_META_NAME: model_meta,
MODEL_PARAM_NAME: model_param
}
self.inner_param = OneHotInnerParam()
self.inner_param.set_header(list(model_meta.header))
self.inner_param.add_transform_names(list(model_meta.transform_col_names))
col_maps = dict(model_param.col_map)
self.col_maps = {}
for col_name, cols_map_obj in col_maps.items():
if col_name not in self.col_maps:
self.col_maps[col_name] = TransferPair(col_name)
pair_obj = self.col_maps[col_name]
for feature_value in list(cols_map_obj.values):
pair_obj.add_value(eval(feature_value))
self.inner_param.set_result_header(list(model_param.result_header))
| [
"federatedml.protobuf.generated.onehot_param_pb2.ColsMap",
"federatedml.param.onehot_encoder_param.OneHotEncoderParam",
"federatedml.protobuf.generated.onehot_param_pb2.OneHotParam",
"numpy.array",
"arch.api.utils.log_utils.getLogger",
"federatedml.statistic.data_overview.get_header",
"functools.partial... | [((1031, 1052), 'arch.api.utils.log_utils.getLogger', 'log_utils.getLogger', ([], {}), '()\n', (1050, 1052), False, 'from arch.api.utils import log_utils\n'), ((3866, 3886), 'federatedml.param.onehot_encoder_param.OneHotEncoderParam', 'OneHotEncoderParam', ([], {}), '()\n', (3884, 3886), False, 'from federatedml.param.onehot_encoder_param import OneHotEncoderParam\n'), ((4154, 4225), 'functools.partial', 'functools.partial', (['self.record_new_header'], {'inner_param': 'self.inner_param'}), '(self.record_new_header, inner_param=self.inner_param)\n', (4171, 4225), False, 'import functools\n'), ((5262, 5365), 'functools.partial', 'functools.partial', (['self.transfer_one_instance'], {'col_maps': 'self.col_maps', 'inner_param': 'self.inner_param'}), '(self.transfer_one_instance, col_maps=self.col_maps,\n inner_param=self.inner_param)\n', (5279, 5365), False, 'import functools\n'), ((6739, 6765), 'federatedml.statistic.data_overview.get_header', 'get_header', (['data_instances'], {}), '(data_instances)\n', (6749, 6765), False, 'from federatedml.statistic.data_overview import get_header\n'), ((9641, 9662), 'numpy.array', 'np.array', (['new_feature'], {}), '(new_feature)\n', (9649, 9662), True, 'import numpy as np\n'), ((9931, 10072), 'federatedml.protobuf.generated.onehot_meta_pb2.OneHotMeta', 'onehot_meta_pb2.OneHotMeta', ([], {'transform_col_names': 'self.inner_param.transform_names', 'header': 'self.inner_param.header', 'need_run': 'self.need_run'}), '(transform_col_names=self.inner_param.\n transform_names, header=self.inner_param.header, need_run=self.need_run)\n', (9957, 10072), False, 'from federatedml.protobuf.generated import onehot_param_pb2, onehot_meta_pb2\n'), ((10613, 10709), 'federatedml.protobuf.generated.onehot_param_pb2.OneHotParam', 'onehot_param_pb2.OneHotParam', ([], {'col_map': 'pb_dict', 'result_header': 'self.inner_param.result_header'}), '(col_map=pb_dict, result_header=self.\n inner_param.result_header)\n', (10641, 10709), False, 'from federatedml.protobuf.generated import onehot_param_pb2, onehot_meta_pb2\n'), ((10400, 10494), 'federatedml.protobuf.generated.onehot_param_pb2.ColsMap', 'onehot_param_pb2.ColsMap', ([], {'values': 'values', 'transformed_headers': 'pair_obj.transformed_headers'}), '(values=values, transformed_headers=pair_obj.\n transformed_headers)\n', (10424, 10494), False, 'from federatedml.protobuf.generated import onehot_param_pb2, onehot_meta_pb2\n')] |
import os
import argparse
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoConfig
from transformers.optimization import get_linear_schedule_with_warmup, Adafactor
import nlp
from rouge_score import rouge_scorer
import pytorch_lightning as pl
from pytorch_lightning import loggers as pl_loggers
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel
from longformer import LongformerEncoderDecoderForConditionalGeneration, LongformerEncoderDecoderConfig
from longformer.sliding_chunks import pad_to_window_size
def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=-100):
"""From fairseq"""
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1)
nll_loss = -lprobs.gather(dim=-1, index=target)
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
nll_loss.masked_fill_(pad_mask, 0.0)
smooth_loss.masked_fill_(pad_mask, 0.0)
count = (~pad_mask).sum()
else:
nll_loss = nll_loss.squeeze(-1)
smooth_loss = smooth_loss.squeeze(-1)
count = nll_loss.numel()
nll_loss = nll_loss.sum() / count
smooth_loss = smooth_loss.sum() / count
eps_i = epsilon / lprobs.size(-1)
loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
return loss, nll_loss
class SummarizationDataset(Dataset):
def __init__(self, hf_dataset, tokenizer, max_input_len, max_output_len):
self.hf_dataset = hf_dataset
self.tokenizer = tokenizer
self.max_input_len = max_input_len
self.max_output_len = max_output_len
def __len__(self):
return len(self.hf_dataset)
def __getitem__(self, idx):
entry = self.hf_dataset[idx]
input_ids = self.tokenizer.encode(entry['article'], truncation=True, max_length=self.max_input_len)
output_ids = self.tokenizer.encode(entry['abstract'], truncation=True, max_length=self.max_output_len)
if self.tokenizer.bos_token_id is None: # pegasus
output_ids = [self.tokenizer.pad_token_id] + output_ids
return torch.tensor(input_ids), torch.tensor(output_ids)
@staticmethod
def collate_fn(batch):
# A hack to know if this is bart or pegasus. DDP doesn't like global variables nor class-level memebr variables
if batch[0][0][-1].item() == 2:
pad_token_id = 1 # AutoTokenizer.from_pretrained('facebook/bart-base').pad_token_id
elif batch[0][0][-1].item() == 1:
pad_token_id = 0 # AutoTokenizer.from_pretrained('google/pegasus-large').pad_token_id
else:
assert False
input_ids, output_ids = list(zip(*batch))
input_ids = torch.nn.utils.rnn.pad_sequence(input_ids, batch_first=True, padding_value=pad_token_id)
output_ids = torch.nn.utils.rnn.pad_sequence(output_ids, batch_first=True, padding_value=pad_token_id)
return input_ids, output_ids
class Summarizer(pl.LightningModule):
def __init__(self, params):
super().__init__()
self.args = params
self.hparams = params
self.tokenizer = AutoTokenizer.from_pretrained(self.args.tokenizer, use_fast=True)
if 'long' in self.args.model_path:
config = LongformerEncoderDecoderConfig.from_pretrained(self.args.model_path)
config.attention_dropout = self.args.attention_dropout
config.gradient_checkpointing = self.args.grad_ckpt
config.attention_mode = self.args.attention_mode
config.attention_window = [self.args.attention_window] * config.encoder_layers
self.model = LongformerEncoderDecoderForConditionalGeneration.from_pretrained(
self.args.model_path, config=config)
else:
config = AutoConfig.from_pretrained(self.args.model_path)
config.attention_dropout = self.args.attention_dropout
self.model = AutoModelForSeq2SeqLM.from_pretrained(
self.args.model_path, config=config)
self.train_dataloader_object = self.val_dataloader_object = self.test_dataloader_object = None
def _prepare_input(self, input_ids):
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=input_ids.device)
attention_mask[input_ids == self.tokenizer.pad_token_id] = 0
if isinstance(self.model, LongformerEncoderDecoderForConditionalGeneration):
attention_mask[:, 0] = 2 # global attention on one token for all model params to be used, which is important for gradient checkpointing to work
if self.args.attention_mode == 'sliding_chunks':
half_padding_mod = self.model.config.attention_window[0]
elif self.args.attention_mode == 'sliding_chunks_no_overlap':
half_padding_mod = self.model.config.attention_window[0] / 2
else:
raise NotImplementedError
input_ids, attention_mask = pad_to_window_size( # ideally, should be moved inside the LongformerModel
input_ids, attention_mask, half_padding_mod, self.tokenizer.pad_token_id)
return input_ids, attention_mask
def forward(self, input_ids, output_ids):
input_ids, attention_mask = self._prepare_input(input_ids)
decoder_input_ids = output_ids[:, :-1]
decoder_attention_mask = (decoder_input_ids != self.tokenizer.pad_token_id)
labels = output_ids[:, 1:].clone()
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
use_cache=False,)
lm_logits = outputs[0]
if self.args.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
ce_loss_fct = torch.nn.CrossEntropyLoss(ignore_index=self.tokenizer.pad_token_id)
assert lm_logits.shape[-1] == self.model.config.vocab_size
loss = ce_loss_fct(lm_logits.view(-1, lm_logits.shape[-1]), labels.view(-1))
else:
lprobs = torch.nn.functional.log_softmax(lm_logits, dim=-1)
loss, nll_loss = label_smoothed_nll_loss(
lprobs, labels, self.args.label_smoothing, ignore_index=self.tokenizer.pad_token_id
)
return [loss]
def training_step(self, batch, batch_nb):
output = self.forward(*batch)
loss = output[0]
lr = loss.new_zeros(1) + self.trainer.optimizers[0].param_groups[0]['lr']
tensorboard_logs = {'train_loss': loss, 'lr': lr,
'input_size': batch[0].numel(),
'output_size': batch[1].numel(),
'mem': torch.cuda.memory_allocated(loss.device) / 1024 ** 3 if torch.cuda.is_available() else 0}
return {'loss': loss, 'log': tensorboard_logs}
def validation_step(self, batch, batch_nb):
for p in self.model.parameters():
p.requires_grad = False
outputs = self.forward(*batch)
vloss = outputs[0]
input_ids, output_ids = batch
input_ids, attention_mask = self._prepare_input(input_ids)
generated_ids = self.model.generate(input_ids=input_ids, attention_mask=attention_mask,
use_cache=True, max_length=self.args.max_output_len,
num_beams=1)
generated_str = self.tokenizer.batch_decode(generated_ids.tolist(), skip_special_tokens=True)
gold_str = self.tokenizer.batch_decode(output_ids.tolist(), skip_special_tokens=True)
scorer = rouge_scorer.RougeScorer(rouge_types=['rouge1', 'rouge2', 'rougeL', 'rougeLsum'], use_stemmer=False)
rouge1 = rouge2 = rougel = rougelsum = 0.0
for ref, pred in zip(gold_str, generated_str):
score = scorer.score(ref, pred)
rouge1 += score['rouge1'].fmeasure
rouge2 += score['rouge2'].fmeasure
rougel += score['rougeL'].fmeasure
rougelsum += score['rougeLsum'].fmeasure
rouge1 /= len(generated_str)
rouge2 /= len(generated_str)
rougel /= len(generated_str)
rougelsum /= len(generated_str)
return {'vloss': vloss,
'rouge1': vloss.new_zeros(1) + rouge1,
'rouge2': vloss.new_zeros(1) + rouge2,
'rougeL': vloss.new_zeros(1) + rougel,
'rougeLsum': vloss.new_zeros(1) + rougelsum, }
def validation_epoch_end(self, outputs):
for p in self.model.parameters():
p.requires_grad = True
names = ['vloss', 'rouge1', 'rouge2', 'rougeL', 'rougeLsum']
metrics = []
for name in names:
metric = torch.stack([x[name] for x in outputs]).mean()
if self.trainer.use_ddp:
torch.distributed.all_reduce(metric, op=torch.distributed.ReduceOp.SUM)
metric /= self.trainer.world_size
metrics.append(metric)
logs = dict(zip(*[names, metrics]))
print(logs)
return {'avg_val_loss': logs['vloss'], 'log': logs, 'progress_bar': logs}
def test_step(self, batch, batch_nb):
return self.validation_step(batch, batch_nb)
def test_epoch_end(self, outputs):
result = self.validation_epoch_end(outputs)
print(result)
def configure_optimizers(self):
if self.args.adafactor:
optimizer = Adafactor(self.model.parameters(), lr=self.args.lr, scale_parameter=False, relative_step=False)
else:
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.lr)
if self.args.debug:
return optimizer # const LR
num_gpus = torch.cuda.device_count() if torch.cuda.is_available() else 1
num_steps = self.args.dataset_size * self.args.epochs / num_gpus / self.args.grad_accum / self.args.batch_size
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=self.args.warmup, num_training_steps=num_steps
)
return [optimizer], [{"scheduler": scheduler, "interval": "step"}]
def _get_dataloader(self, current_dataloader, split_name, is_train):
if current_dataloader is not None:
return current_dataloader
dataset = SummarizationDataset(hf_dataset=self.hf_datasets[split_name], tokenizer=self.tokenizer,
max_input_len=self.args.max_input_len, max_output_len=self.args.max_output_len)
sampler = torch.utils.data.distributed.DistributedSampler(dataset, shuffle=is_train) if self.trainer.use_ddp else None
return DataLoader(dataset, batch_size=self.args.batch_size, shuffle=(sampler is None),
num_workers=self.args.num_workers, sampler=sampler,
collate_fn=SummarizationDataset.collate_fn)
@pl.data_loader
def train_dataloader(self):
self.train_dataloader_object = self._get_dataloader(self.train_dataloader_object, 'train', is_train=True)
return self.train_dataloader_object
@pl.data_loader
def val_dataloader(self):
self.val_dataloader_object = self._get_dataloader(self.val_dataloader_object, 'validation', is_train=False)
return self.val_dataloader_object
@pl.data_loader
def test_dataloader(self):
self.test_dataloader_object = self._get_dataloader(self.test_dataloader_object, 'test', is_train=False)
return self.test_dataloader_object
def configure_ddp(self, model, device_ids):
self._ddp_kwargs["find_unused_parameters"] = self._ddp_kwargs.get(
"find_unused_parameters", False
)
model = LightningDistributedDataParallel(
model,
device_ids=device_ids,
**self._ddp_kwargs["find_unused_parameters"]
)
return model
@staticmethod
def add_model_specific_args(parser, root_dir):
parser.add_argument("--save_dir", type=str, default='summarization')
parser.add_argument("--save_prefix", type=str, default='test')
parser.add_argument("--batch_size", type=int, default=16, help="Batch size")
parser.add_argument("--grad_accum", type=int, default=1, help="number of gradient accumulation steps")
parser.add_argument("--gpus", type=int, default=-1,
help="Number of gpus. 0 for CPU")
parser.add_argument("--warmup", type=int, default=1000, help="Number of warmup steps")
parser.add_argument("--lr", type=float, default=0.00003, help="Maximum learning rate")
parser.add_argument("--val_every", type=float, default=1.0, help="Number of training steps between validations")
parser.add_argument("--val_percent_check", default=1.00, type=float, help='Percent of validation data used')
parser.add_argument("--num_workers", type=int, default=0, help="Number of data loader workers")
parser.add_argument("--seed", type=int, default=1234, help="Seed")
parser.add_argument("--epochs", type=int, default=5, help="Number of epochs")
parser.add_argument("--disable_checkpointing", action='store_true', help="No logging or checkpointing")
parser.add_argument("--max_output_len", type=int, default=256,
help="maximum num of wordpieces/summary. Used for training and testing")
parser.add_argument("--max_input_len", type=int, default=512,
help="maximum num of wordpieces/summary. Used for training and testing")
parser.add_argument("--test", action='store_true', help="Test only, no training")
parser.add_argument("--model_path", type=str, default='facebook/bart-base',
help="Path to the checkpoint directory or model name")
parser.add_argument("--tokenizer", type=str, default='facebook/bart-base')
parser.add_argument("--no_progress_bar", action='store_true', help="no progress bar. Good for printing")
parser.add_argument("--fp32", action='store_true', help="default is fp16. Use --fp32 to switch to fp32")
parser.add_argument("--debug", action='store_true', help="debug run")
parser.add_argument("--resume_ckpt", type=str, help="Path of a checkpoint to resume from")
parser.add_argument("--from_pretrained", type=str, default=None,
help="Path to a checkpoint to load model weights but not training state")
parser.add_argument('--grad_ckpt', action='store_true', help='Enable gradient checkpointing to save memory')
parser.add_argument("--attention_dropout", type=float, default=0.1, help="attention dropout")
parser.add_argument("--attention_mode", type=str, default='sliding_chunks', help="Longformer attention mode")
parser.add_argument("--attention_window", type=int, default=512, help="Attention window")
parser.add_argument("--label_smoothing", type=float, default=0.0, required=False)
parser.add_argument("--adafactor", action='store_true', help="Use adafactor optimizer")
return parser
def main(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
if args.from_pretrained is not None:
model = Summarizer.load_from_checkpoint(args.from_pretrained, args)
else:
model = Summarizer(args)
model.hf_datasets = nlp.load_dataset('scientific_papers', 'arxiv')
logger = pl_loggers.CometLogger(save_dir='logs/')
checkpoint_callback = ModelCheckpoint(
filepath=os.path.join(args.save_dir, args.save_prefix, "checkpoints"),
save_top_k=5,
verbose=True,
monitor='avg_val_loss',
mode='min',
period=-1,
prefix=''
)
print(args)
args.dataset_size = 203037 # hardcode dataset size. Needed to compute number of steps for the lr scheduler
trainer = pl.Trainer(gpus=args.gpus, distributed_backend='ddp' if torch.cuda.is_available() else None,
track_grad_norm=-1,
max_epochs=args.epochs if not args.debug else 100,
max_steps=None if not args.debug else 1,
replace_sampler_ddp=False,
accumulate_grad_batches=args.grad_accum,
val_check_interval=args.val_every if not args.debug else 1,
num_sanity_val_steps=2 if not args.debug else 0,
check_val_every_n_epoch=1 if not args.debug else 1,
val_percent_check=args.val_percent_check,
test_percent_check=args.val_percent_check,
logger=logger,
checkpoint_callback=checkpoint_callback if not args.disable_checkpointing else False,
show_progress_bar=not args.no_progress_bar,
use_amp=not args.fp32, amp_level='O2',
resume_from_checkpoint=args.resume_ckpt,
)
if not args.test:
trainer.fit(model)
trainer.test(model)
if __name__ == "__main__":
main_arg_parser = argparse.ArgumentParser(description="summarization")
parser = Summarizer.add_model_specific_args(main_arg_parser, os.getcwd())
args = parser.parse_args()
main(args)
| [
"torch.nn.CrossEntropyLoss",
"torch.nn.utils.rnn.pad_sequence",
"torch.cuda.device_count",
"torch.utils.data.distributed.DistributedSampler",
"torch.cuda.is_available",
"transformers.AutoTokenizer.from_pretrained",
"nlp.load_dataset",
"transformers.optimization.get_linear_schedule_with_warmup",
"arg... | [((15429, 15451), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (15440, 15451), False, 'import random\n'), ((15456, 15481), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (15470, 15481), True, 'import numpy as np\n'), ((15486, 15514), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (15503, 15514), False, 'import torch\n'), ((15522, 15547), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (15545, 15547), False, 'import torch\n'), ((15781, 15827), 'nlp.load_dataset', 'nlp.load_dataset', (['"""scientific_papers"""', '"""arxiv"""'], {}), "('scientific_papers', 'arxiv')\n", (15797, 15827), False, 'import nlp\n'), ((15842, 15882), 'pytorch_lightning.loggers.CometLogger', 'pl_loggers.CometLogger', ([], {'save_dir': '"""logs/"""'}), "(save_dir='logs/')\n", (15864, 15882), True, 'from pytorch_lightning import loggers as pl_loggers\n'), ((17560, 17612), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""summarization"""'}), "(description='summarization')\n", (17583, 17612), False, 'import argparse\n'), ((2907, 3000), 'torch.nn.utils.rnn.pad_sequence', 'torch.nn.utils.rnn.pad_sequence', (['input_ids'], {'batch_first': '(True)', 'padding_value': 'pad_token_id'}), '(input_ids, batch_first=True, padding_value=\n pad_token_id)\n', (2938, 3000), False, 'import torch\n'), ((3017, 3111), 'torch.nn.utils.rnn.pad_sequence', 'torch.nn.utils.rnn.pad_sequence', (['output_ids'], {'batch_first': '(True)', 'padding_value': 'pad_token_id'}), '(output_ids, batch_first=True, padding_value\n =pad_token_id)\n', (3048, 3111), False, 'import torch\n'), ((3326, 3391), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['self.args.tokenizer'], {'use_fast': '(True)'}), '(self.args.tokenizer, use_fast=True)\n', (3355, 3391), False, 'from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoConfig\n'), ((4391, 4461), 'torch.ones', 'torch.ones', (['input_ids.shape'], {'dtype': 'torch.long', 'device': 'input_ids.device'}), '(input_ids.shape, dtype=torch.long, device=input_ids.device)\n', (4401, 4461), False, 'import torch\n'), ((7902, 8006), 'rouge_score.rouge_scorer.RougeScorer', 'rouge_scorer.RougeScorer', ([], {'rouge_types': "['rouge1', 'rouge2', 'rougeL', 'rougeLsum']", 'use_stemmer': '(False)'}), "(rouge_types=['rouge1', 'rouge2', 'rougeL',\n 'rougeLsum'], use_stemmer=False)\n", (7926, 8006), False, 'from rouge_score import rouge_scorer\n'), ((10209, 10321), 'transformers.optimization.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': 'self.args.warmup', 'num_training_steps': 'num_steps'}), '(optimizer, num_warmup_steps=self.args.\n warmup, num_training_steps=num_steps)\n', (10240, 10321), False, 'from transformers.optimization import get_linear_schedule_with_warmup, Adafactor\n'), ((10936, 11118), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'self.args.batch_size', 'shuffle': '(sampler is None)', 'num_workers': 'self.args.num_workers', 'sampler': 'sampler', 'collate_fn': 'SummarizationDataset.collate_fn'}), '(dataset, batch_size=self.args.batch_size, shuffle=sampler is\n None, num_workers=self.args.num_workers, sampler=sampler, collate_fn=\n SummarizationDataset.collate_fn)\n', (10946, 11118), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((11985, 12098), 'pytorch_lightning.overrides.data_parallel.LightningDistributedDataParallel', 'LightningDistributedDataParallel', (['model'], {'device_ids': 'device_ids'}), "(model, device_ids=device_ids, **self.\n _ddp_kwargs['find_unused_parameters'])\n", (12017, 12098), False, 'from pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel\n'), ((15557, 15594), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (15583, 15594), False, 'import torch\n'), ((17678, 17689), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (17687, 17689), False, 'import os\n'), ((2303, 2326), 'torch.tensor', 'torch.tensor', (['input_ids'], {}), '(input_ids)\n', (2315, 2326), False, 'import torch\n'), ((2328, 2352), 'torch.tensor', 'torch.tensor', (['output_ids'], {}), '(output_ids)\n', (2340, 2352), False, 'import torch\n'), ((3457, 3525), 'longformer.LongformerEncoderDecoderConfig.from_pretrained', 'LongformerEncoderDecoderConfig.from_pretrained', (['self.args.model_path'], {}), '(self.args.model_path)\n', (3503, 3525), False, 'from longformer import LongformerEncoderDecoderForConditionalGeneration, LongformerEncoderDecoderConfig\n'), ((3834, 3940), 'longformer.LongformerEncoderDecoderForConditionalGeneration.from_pretrained', 'LongformerEncoderDecoderForConditionalGeneration.from_pretrained', (['self.args.model_path'], {'config': 'config'}), '(self.args.\n model_path, config=config)\n', (3898, 3940), False, 'from longformer import LongformerEncoderDecoderForConditionalGeneration, LongformerEncoderDecoderConfig\n'), ((3988, 4036), 'transformers.AutoConfig.from_pretrained', 'AutoConfig.from_pretrained', (['self.args.model_path'], {}), '(self.args.model_path)\n', (4014, 4036), False, 'from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoConfig\n'), ((4129, 4203), 'transformers.AutoModelForSeq2SeqLM.from_pretrained', 'AutoModelForSeq2SeqLM.from_pretrained', (['self.args.model_path'], {'config': 'config'}), '(self.args.model_path, config=config)\n', (4166, 4203), False, 'from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoConfig\n'), ((5158, 5255), 'longformer.sliding_chunks.pad_to_window_size', 'pad_to_window_size', (['input_ids', 'attention_mask', 'half_padding_mod', 'self.tokenizer.pad_token_id'], {}), '(input_ids, attention_mask, half_padding_mod, self.\n tokenizer.pad_token_id)\n', (5176, 5255), False, 'from longformer.sliding_chunks import pad_to_window_size\n'), ((6085, 6152), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'ignore_index': 'self.tokenizer.pad_token_id'}), '(ignore_index=self.tokenizer.pad_token_id)\n', (6110, 6152), False, 'import torch\n'), ((6348, 6398), 'torch.nn.functional.log_softmax', 'torch.nn.functional.log_softmax', (['lm_logits'], {'dim': '(-1)'}), '(lm_logits, dim=-1)\n', (6379, 6398), False, 'import torch\n'), ((10037, 10062), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10060, 10062), False, 'import torch\n'), ((10008, 10033), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (10031, 10033), False, 'import torch\n'), ((10812, 10886), 'torch.utils.data.distributed.DistributedSampler', 'torch.utils.data.distributed.DistributedSampler', (['dataset'], {'shuffle': 'is_train'}), '(dataset, shuffle=is_train)\n', (10859, 10886), False, 'import torch\n'), ((15944, 16004), 'os.path.join', 'os.path.join', (['args.save_dir', 'args.save_prefix', '"""checkpoints"""'], {}), "(args.save_dir, args.save_prefix, 'checkpoints')\n", (15956, 16004), False, 'import os\n'), ((7051, 7076), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7074, 7076), False, 'import torch\n'), ((9121, 9192), 'torch.distributed.all_reduce', 'torch.distributed.all_reduce', (['metric'], {'op': 'torch.distributed.ReduceOp.SUM'}), '(metric, op=torch.distributed.ReduceOp.SUM)\n', (9149, 9192), False, 'import torch\n'), ((16346, 16371), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (16369, 16371), False, 'import torch\n'), ((6995, 7035), 'torch.cuda.memory_allocated', 'torch.cuda.memory_allocated', (['loss.device'], {}), '(loss.device)\n', (7022, 7035), False, 'import torch\n'), ((9021, 9060), 'torch.stack', 'torch.stack', (['[x[name] for x in outputs]'], {}), '([x[name] for x in outputs])\n', (9032, 9060), False, 'import torch\n')] |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import pandas as pd
import math
def moving_average(a, n=7) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
#leitura de dados e montagem dos arrays para plotagem:
df = pd.read_excel('dados.xlsx')
casos = []
mortes = []
recuperados = []
dias = []
for i in range(len(df.values)):
casos.append(df.values[i][1])
mortes.append(df.values[i][2])
recuperados.append(df.values[i][3])
dias.append(df.values[i][4])
#calculo de casos diários
casos_dia = [0]
for i in range(len(casos)):
if i+1<=len(casos)-1:
novos_casos = casos[i+1] - casos[i]
casos_dia.append(novos_casos)
#calculo de casos ativos
casos_ativos = []
for i in range(len(casos)):
ativos = (casos[i] - recuperados[i]) - mortes[i]
casos_ativos.append(ativos)
#atualizar a tabela de dados com casos diários e casos ativos
dados_atualizados = [casos,mortes,recuperados,dias,casos_dia,casos_ativos]
df = pd.DataFrame(dados_atualizados,index=['Casos Confirmados','Óbitos','Recuperados','Dia','Casos Diários','Casos Ativos']).T
df.to_excel('dados.xlsx')
#cálculo taxa de mortalidade
taxa = round((mortes[len(mortes)-1] * 100) / casos[len(casos)-1],2)
#criação de informações para a legenda
string_mortalidade = 'Taxa de mortalidade: ' + str(taxa) + '%'
string_ativos = 'Casos ativos: ' + str(int(casos_ativos[len(casos_ativos)-1]))
string_mortes = 'Total de mortes: ' + str(mortes[len(mortes)-1])
#calcular número R
r = math.exp((math.log(34691/((1/(casos[len(casos)-1]/(casos[0]*34691)))-1)))/(len(dias)))
print('Número R: ' + str(r))
#criação dos arrays para plotagem
y_casos = moving_average(casos)
y_casos_dia = moving_average(casos_dia)
y_recuperados = moving_average(recuperados)
y_mortes = moving_average(mortes)
y_casos_ativos = moving_average(casos_ativos)
x = np.arange(1,len(y_casos)+1,1)
#plotagem do gráfico
f1 = plt.figure(1)
blue_patch = mpatches.Patch(color='blue',label='Casos confirmados')
purple_patch = mpatches.Patch(color='purple',label='Casos ativos')
green_patch = mpatches.Patch(color='green',label='Recuperados')
taxa_patch = mpatches.Patch(color = 'white', label = string_mortalidade)
ativos_patch = mpatches.Patch(color = 'white', label = string_ativos)
legenda = 'Dias (04/06/2020 - '+str(int(dias[len(dias)-1]))+'/08/2021 )'
plt.plot(x,y_casos,color='blue')
plt.plot(x,y_recuperados,color='green')
plt.plot(x,y_casos_ativos,color='purple')
plt.ylabel('Média móvel de 7 dias')
plt.xlabel(legenda)
plt.grid(True)
plt.legend(handles=[blue_patch,purple_patch,green_patch,taxa_patch,ativos_patch])
f2 = plt.figure(2)
orange_patch = mpatches.Patch(color='orange',label='Casos diários')
red_patch = mpatches.Patch(color='red',label='Óbitos confirmados')
mortes_patch = mpatches.Patch(color='white',label=string_mortes)
plt.plot(x,y_mortes,color='red')
plt.plot(x,y_casos_dia,color='orange')
plt.ylabel('Média móvel de 7 dias')
plt.xlabel(legenda)
plt.grid(True)
plt.legend(handles=[red_patch,orange_patch,taxa_patch,mortes_patch])
plt.show() | [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"matplotlib.patches.Patch",
"pandas.read_excel",
"pandas.DataFrame",
"numpy.cumsum",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((309, 336), 'pandas.read_excel', 'pd.read_excel', (['"""dados.xlsx"""'], {}), "('dados.xlsx')\n", (322, 336), True, 'import pandas as pd\n'), ((1952, 1965), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1962, 1965), True, 'import matplotlib.pyplot as plt\n'), ((1979, 2034), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""blue"""', 'label': '"""Casos confirmados"""'}), "(color='blue', label='Casos confirmados')\n", (1993, 2034), True, 'import matplotlib.patches as mpatches\n'), ((2049, 2101), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""purple"""', 'label': '"""Casos ativos"""'}), "(color='purple', label='Casos ativos')\n", (2063, 2101), True, 'import matplotlib.patches as mpatches\n'), ((2115, 2165), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""green"""', 'label': '"""Recuperados"""'}), "(color='green', label='Recuperados')\n", (2129, 2165), True, 'import matplotlib.patches as mpatches\n'), ((2178, 2233), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""white"""', 'label': 'string_mortalidade'}), "(color='white', label=string_mortalidade)\n", (2192, 2233), True, 'import matplotlib.patches as mpatches\n'), ((2253, 2303), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""white"""', 'label': 'string_ativos'}), "(color='white', label=string_ativos)\n", (2267, 2303), True, 'import matplotlib.patches as mpatches\n'), ((2383, 2417), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_casos'], {'color': '"""blue"""'}), "(x, y_casos, color='blue')\n", (2391, 2417), True, 'import matplotlib.pyplot as plt\n'), ((2416, 2457), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_recuperados'], {'color': '"""green"""'}), "(x, y_recuperados, color='green')\n", (2424, 2457), True, 'import matplotlib.pyplot as plt\n'), ((2456, 2499), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_casos_ativos'], {'color': '"""purple"""'}), "(x, y_casos_ativos, color='purple')\n", (2464, 2499), True, 'import matplotlib.pyplot as plt\n'), ((2499, 2534), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Média móvel de 7 dias"""'], {}), "('Média móvel de 7 dias')\n", (2509, 2534), True, 'import matplotlib.pyplot as plt\n'), ((2536, 2555), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['legenda'], {}), '(legenda)\n', (2546, 2555), True, 'import matplotlib.pyplot as plt\n'), ((2556, 2570), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2564, 2570), True, 'import matplotlib.pyplot as plt\n'), ((2571, 2660), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': '[blue_patch, purple_patch, green_patch, taxa_patch, ativos_patch]'}), '(handles=[blue_patch, purple_patch, green_patch, taxa_patch,\n ativos_patch])\n', (2581, 2660), True, 'import matplotlib.pyplot as plt\n'), ((2659, 2672), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (2669, 2672), True, 'import matplotlib.pyplot as plt\n'), ((2688, 2741), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""orange"""', 'label': '"""Casos diários"""'}), "(color='orange', label='Casos diários')\n", (2702, 2741), True, 'import matplotlib.patches as mpatches\n'), ((2753, 2808), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""red"""', 'label': '"""Óbitos confirmados"""'}), "(color='red', label='Óbitos confirmados')\n", (2767, 2808), True, 'import matplotlib.patches as mpatches\n'), ((2823, 2873), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""white"""', 'label': 'string_mortes'}), "(color='white', label=string_mortes)\n", (2837, 2873), True, 'import matplotlib.patches as mpatches\n'), ((2874, 2908), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_mortes'], {'color': '"""red"""'}), "(x, y_mortes, color='red')\n", (2882, 2908), True, 'import matplotlib.pyplot as plt\n'), ((2907, 2947), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_casos_dia'], {'color': '"""orange"""'}), "(x, y_casos_dia, color='orange')\n", (2915, 2947), True, 'import matplotlib.pyplot as plt\n'), ((2947, 2982), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Média móvel de 7 dias"""'], {}), "('Média móvel de 7 dias')\n", (2957, 2982), True, 'import matplotlib.pyplot as plt\n'), ((2984, 3003), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['legenda'], {}), '(legenda)\n', (2994, 3003), True, 'import matplotlib.pyplot as plt\n'), ((3004, 3018), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3012, 3018), True, 'import matplotlib.pyplot as plt\n'), ((3019, 3090), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': '[red_patch, orange_patch, taxa_patch, mortes_patch]'}), '(handles=[red_patch, orange_patch, taxa_patch, mortes_patch])\n', (3029, 3090), True, 'import matplotlib.pyplot as plt\n'), ((3089, 3099), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3097, 3099), True, 'import matplotlib.pyplot as plt\n'), ((161, 186), 'numpy.cumsum', 'np.cumsum', (['a'], {'dtype': 'float'}), '(a, dtype=float)\n', (170, 186), True, 'import numpy as np\n'), ((1025, 1154), 'pandas.DataFrame', 'pd.DataFrame', (['dados_atualizados'], {'index': "['Casos Confirmados', 'Óbitos', 'Recuperados', 'Dia', 'Casos Diários',\n 'Casos Ativos']"}), "(dados_atualizados, index=['Casos Confirmados', 'Óbitos',\n 'Recuperados', 'Dia', 'Casos Diários', 'Casos Ativos'])\n", (1037, 1154), True, 'import pandas as pd\n')] |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from numpy.random import RandomState
from numpy.random import mtrand
def randomu(seed, di=None, binomial=None, double=False, gamma=False,
normal=False, poisson=False):
"""
Replicates the randomu function avaiable within IDL
(Interactive Data Language, EXELISvis).
Returns an array of uniformly distributed random numbers of the
specified dimensions.
The randomu function returns one or more pseudo-random numbers
with one or more of the following distributions:
Uniform (default)
Gaussian
binomial
gamma
poisson
:param seed:
If seed is not of type mtrand.RandomState, then a new state is
initialised. Othersise seed will be used to generate the random
values.
:param di:
A list specifying the dimensions of the resulting array. If di
is a scalar then randomu returns a scalar.
Dimensions are D1, D2, D3...D8 (x,y,z,lambda...).
The list will be inverted to suit Python's inverted dimensions
i.e. (D3,D2,D1).
:param binomial:
Set this keyword to a list of length 2, [n,p], to generate
random deviates from a binomial distribution. If an event
occurs with probablility p, with n trials, then the number of
times it occurs has a binomial distribution.
:param double:
If set to True, then randomu will return a double precision
random numbers.
:param gamma:
Set this keyword to an integer order i > 0 to generate random
deviates from a gamm distribution.
:param Long:
If set to True, then randomu will return integer uniform
random deviates in the range [0...2^31-1], using the Mersenne
Twister algorithm. All other keywords will be ignored.
:param normal:
If set to True, then random deviates will be generated from a
normal distribution.
:param poisson:
Set this keyword to the mean number of events occurring during
a unit of time. The poisson keword returns a random deviate
drawn from a poisson distribution with that mean.
:param ULong:
If set to True, then randomu will return unsigned integer
uniform deviates in the range [0..2^32-1], using the Mersenne
Twister algorithm. All other keywords will be ignored.
:return:
A NumPy array of uniformly distributed random numbers of the
specified dimensions.
Example:
>>> seed = None
>>> x, sd = randomu(seed, [10,10])
>>> x, sd = randomu(seed, [100,100], binomial=[10,0.5])
>>> x, sd = randomu(seed, [100,100], gamma=2)
>>> # 200x by 100y array of normally distributed values
>>> x, sd = randomu(seed, [200,100], normal=True)
>>> # 1000 deviates from a poisson distribution with a mean of 1.5
>>> x, sd = randomu(seed, [1000], poisson=1.5)
>>> # Return a scalar from a uniform distribution
>>> x, sd = randomu(seed)
:author:
<NAME>, <EMAIL>, <EMAIL>
:copyright:
Copyright (c) 2014, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
"""
# Initialise the data type
if double:
dtype = 'float64'
else:
dtype = 'float32'
# Check the seed
# http://stackoverflow.com/questions/5836335/consistenly-create-same-random-numpy-array
if type(seed) != mtrand.RandomState:
seed = RandomState()
if di is not None:
if type(di) is not list:
raise TypeError("Dimensions must be a list or None.")
if len(di) > 8:
raise ValueError("Error. More than 8 dimensions specified.")
# Invert the dimensions list
dims = di[::-1]
else:
dims = 1
# Python has issues with overflow:
# OverflowError: Python int too large to convert to C long
# Occurs with Long and ULong
#if Long:
# res = seed.random_integers(0, 2**31-1, dims)
# if di is None:
# res = res[0]
# return res, seed
#if ULong:
# res = seed.random_integers(0, 2**32-1, dims)
# if di is None:
# res = res[0]
# return res, seed
# Check for other keywords
distributions = 0
kwds = [binomial, gamma, normal, poisson]
for kwd in kwds:
if kwd:
distributions += 1
if distributions > 1:
print("Conflicting keywords.")
return
if binomial:
if len(binomial) != 2:
msg = "Error. binomial must contain [n,p] trials & probability."
raise ValueError(msg)
n = binomial[0]
p = binomial[1]
res = seed.binomial(n, p, dims)
elif gamma:
res = seed.gamma(gamma, size=dims)
elif normal:
res = seed.normal(0, 1, dims)
elif poisson:
res = seed.poisson(poisson, dims)
else:
res = seed.uniform(size=dims)
res = res.astype(dtype)
if di is None:
res = res[0]
return res, seed
| [
"numpy.random.RandomState"
] | [((5125, 5138), 'numpy.random.RandomState', 'RandomState', ([], {}), '()\n', (5136, 5138), False, 'from numpy.random import RandomState\n')] |
import os
import numpy as np
def get_all_file(filepath):
files = os.listdir(filepath)
file_list = []
for fi in files:
fi_d = os.path.join(filepath, fi)
if os.path.isdir(fi_d):
get_all_file(fi_d)
elif 'acc_' in fi_d:
# file_list.append(os.path.join(filepath, fi_d))
file_list.append(fi_d)
file_list.sort()
# print(file_list)
return file_list
pass
def add_label(file_list):
max_val = len(file_list)
# print(max_val)
y = [i for i in range(max_val-1, -1, -1)]
return y
def get_one_bearing(path):
file_list = get_all_file(path)
y = add_label(file_list)
# print(np.array(file_list))
# print(np.array(y))
# return np.array(file_list),np.array(y)
return file_list,y
def get_all_bearings_list(path_list):
x_all = []
y_all = []
# print(path_list)
for i in range(len(path_list)):
path = path_list[i]
files = os.listdir(path)
for fi in files:
fi_d = os.path.join(path, fi)
if os.path.isdir(fi_d):
# one_bearing_file = os.path.join(fi_d, fi_d)
one_bearing_file = fi_d
one_bearig_x, one_bearing_y = get_one_bearing(one_bearing_file)
x_all.append(one_bearig_x),
y_all.append(one_bearing_y)
try:
x_all = np.concatenate(x_all)
y_all = np.concatenate(y_all)
except:
print('just one bearing!')
x_all = np.reshape(x_all,(-1,))
# y_all = y_all.reshape(-1)
y_all = np.reshape(y_all,(-1))
# print(x_all,y_all)
return x_all,y_all
def get_all_bearings(path):
files = os.listdir(path)
file_list = []
x_all = []
y_all = []
for fi in files:
fi_d = os.path.join(path, fi)
if os.path.isdir(fi_d):
one_bearing_file = os.path.join(fi_d, fi_d)
one_bearig_x, one_bearing_y = get_one_bearing(one_bearing_file)
x_all.append(one_bearig_x),
y_all.append(one_bearing_y)
x_all = np.concatenate(x_all)
x_all = np.reshape(x_all,(-1,))
y_all = np.concatenate(y_all)
y_all = y_all.reshape(-1)
# print(x_all,y_all)
return x_all,y_all
if __name__=="__main__":
# path = r'G:\Bearing\bearing\data_set\FEMTOBearingDataSet\Training_set\Learning_set'
path = r'/home/sunyaqiang/Myfile/bearing/data_set/FEMTOBearingDataSet/Test_set/Test_set'
x_all, y_all = get_all_bearings(path)
print(x_all,y_all) | [
"os.listdir",
"numpy.reshape",
"os.path.join",
"os.path.isdir",
"numpy.concatenate"
] | [((69, 89), 'os.listdir', 'os.listdir', (['filepath'], {}), '(filepath)\n', (79, 89), False, 'import os\n'), ((1494, 1518), 'numpy.reshape', 'np.reshape', (['x_all', '(-1,)'], {}), '(x_all, (-1,))\n', (1504, 1518), True, 'import numpy as np\n'), ((1562, 1583), 'numpy.reshape', 'np.reshape', (['y_all', '(-1)'], {}), '(y_all, -1)\n', (1572, 1583), True, 'import numpy as np\n'), ((1673, 1689), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1683, 1689), False, 'import os\n'), ((2055, 2076), 'numpy.concatenate', 'np.concatenate', (['x_all'], {}), '(x_all)\n', (2069, 2076), True, 'import numpy as np\n'), ((2089, 2113), 'numpy.reshape', 'np.reshape', (['x_all', '(-1,)'], {}), '(x_all, (-1,))\n', (2099, 2113), True, 'import numpy as np\n'), ((2125, 2146), 'numpy.concatenate', 'np.concatenate', (['y_all'], {}), '(y_all)\n', (2139, 2146), True, 'import numpy as np\n'), ((145, 171), 'os.path.join', 'os.path.join', (['filepath', 'fi'], {}), '(filepath, fi)\n', (157, 171), False, 'import os\n'), ((183, 202), 'os.path.isdir', 'os.path.isdir', (['fi_d'], {}), '(fi_d)\n', (196, 202), False, 'import os\n'), ((959, 975), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (969, 975), False, 'import os\n'), ((1374, 1395), 'numpy.concatenate', 'np.concatenate', (['x_all'], {}), '(x_all)\n', (1388, 1395), True, 'import numpy as np\n'), ((1412, 1433), 'numpy.concatenate', 'np.concatenate', (['y_all'], {}), '(y_all)\n', (1426, 1433), True, 'import numpy as np\n'), ((1775, 1797), 'os.path.join', 'os.path.join', (['path', 'fi'], {}), '(path, fi)\n', (1787, 1797), False, 'import os\n'), ((1809, 1828), 'os.path.isdir', 'os.path.isdir', (['fi_d'], {}), '(fi_d)\n', (1822, 1828), False, 'import os\n'), ((1020, 1042), 'os.path.join', 'os.path.join', (['path', 'fi'], {}), '(path, fi)\n', (1032, 1042), False, 'import os\n'), ((1058, 1077), 'os.path.isdir', 'os.path.isdir', (['fi_d'], {}), '(fi_d)\n', (1071, 1077), False, 'import os\n'), ((1861, 1885), 'os.path.join', 'os.path.join', (['fi_d', 'fi_d'], {}), '(fi_d, fi_d)\n', (1873, 1885), False, 'import os\n')] |
import numpy as np
import pandas as pd
from datetime import datetime as dt, timedelta
import sys
N_DATASETS = 3
args = sys.argv[1:]
if __name__ == "__main__":
if len(args) == 1:
N_DATASETS = int(args[0])
for i in range(1,N_DATASETS+1):
start_date = dt.now()
days = int(np.random.randint(low=20,high=100,size=1)[0])
end_date = start_date + timedelta(days=days)
index = pd.date_range(start_date,end_date)
values = np.random.randint(low=0,high=1000,size=days)
df = pd.DataFrame(zip(index,values),columns=['timestamp','value'])
df.to_csv(f'random_data_{i}.csv',index=False) | [
"datetime.datetime.now",
"numpy.random.randint",
"pandas.date_range",
"datetime.timedelta"
] | [((277, 285), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (283, 285), True, 'from datetime import datetime as dt, timedelta\n'), ((420, 455), 'pandas.date_range', 'pd.date_range', (['start_date', 'end_date'], {}), '(start_date, end_date)\n', (433, 455), True, 'import pandas as pd\n'), ((472, 518), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(1000)', 'size': 'days'}), '(low=0, high=1000, size=days)\n', (489, 518), True, 'import numpy as np\n'), ((383, 403), 'datetime.timedelta', 'timedelta', ([], {'days': 'days'}), '(days=days)\n', (392, 403), False, 'from datetime import datetime as dt, timedelta\n'), ((305, 348), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(20)', 'high': '(100)', 'size': '(1)'}), '(low=20, high=100, size=1)\n', (322, 348), True, 'import numpy as np\n')] |
"""
Compute the principal eigenvector of a matrix using power iteration.
See also numpy.linalg.eig which calculates all the eigenvalues and
eigenvectors.
"""
from typing import Tuple
import numpy as np
def _normalise(nvec: np.ndarray) -> np.ndarray:
"""Normalises the given numpy array."""
with np.errstate(invalid="ignore"):
result = nvec / np.sqrt((nvec @ nvec))
return result
def _squared_error(vector_1: np.ndarray, vector_2: np.ndarray) -> float:
"""Computes the squared error between two numpy arrays."""
diff = vector_1 - vector_2
s = diff @ diff
return np.sqrt(s)
def _power_iteration(mat: np.array, initial: np.ndarray) -> np.ndarray:
"""
Generator of successive approximations.
Params
------
mat: numpy.array
The matrix to use for multiplication iteration
initial: numpy.array, None
The initial state. Will be set to np.array([1, 1, ...]) if None
Yields
------
Successive powers (mat ^ k) * initial
"""
vec = initial
while True:
vec = _normalise(np.dot(mat, vec))
yield vec
def principal_eigenvector(
mat: np.array, maximum_iterations=1000, max_error=1e-3
) -> Tuple[np.ndarray, float]:
"""
Computes the (normalised) principal eigenvector of the given matrix.
Params
------
mat: numpy.array
The matrix to use for multiplication iteration
maximum_iterations: int, None
The maximum number of iterations of the approximation
max_error: float, 1e-8
Exit criterion -- error threshold of the difference of successive steps
Returns
-------
ndarray
Eigenvector estimate for the input matrix
float
Eigenvalue corresponding to the returned eigenvector
"""
mat_ = np.array(mat)
size = mat_.shape[0]
initial = np.ones(size)
# Power iteration
if not maximum_iterations:
maximum_iterations = float("inf")
last = initial
for i, vector in enumerate(_power_iteration(mat, initial=initial)):
if i > maximum_iterations:
break
if _squared_error(vector, last) < max_error:
break
last = vector
# Compute the eigenvalue (Rayleigh quotient)
eigenvalue = ((mat_ @ vector) @ vector) / (vector @ vector)
# Liberate the eigenvalue from numpy
eigenvalue = float(eigenvalue)
return vector, eigenvalue
| [
"numpy.sqrt",
"numpy.ones",
"numpy.errstate",
"numpy.array",
"numpy.dot"
] | [((604, 614), 'numpy.sqrt', 'np.sqrt', (['s'], {}), '(s)\n', (611, 614), True, 'import numpy as np\n'), ((1792, 1805), 'numpy.array', 'np.array', (['mat'], {}), '(mat)\n', (1800, 1805), True, 'import numpy as np\n'), ((1845, 1858), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (1852, 1858), True, 'import numpy as np\n'), ((308, 337), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (319, 337), True, 'import numpy as np\n'), ((363, 383), 'numpy.sqrt', 'np.sqrt', (['(nvec @ nvec)'], {}), '(nvec @ nvec)\n', (370, 383), True, 'import numpy as np\n'), ((1076, 1092), 'numpy.dot', 'np.dot', (['mat', 'vec'], {}), '(mat, vec)\n', (1082, 1092), True, 'import numpy as np\n')] |
"""CIFAR10
****************************************
This is a dataset of 50,000 32x32 color training images and 10,000 test
images, labeled over 10 categories. See more info at the
`CIFAR homepage <https://www.cs.toronto.edu/~kriz/cifar.html>`_
The classes are:
- airplane
- automobile
- bird
- cat
- deer
- dog
- frog
- horse
- ship
- truck
Returns:
Tuple of NumPy arrays: ``(x_train, y_train), (x_test, y_test)``.
**x_train**: uint8 NumPy array of grayscale image data with shapes
``(50000, 32, 32, 3)``, containing the training data. Pixel values range
from 0 to 255.
**y_train**: uint8 NumPy array of labels (integers in range 0-9)
with shape ``(50000, 1)`` for the training data.
**x_test**: uint8 NumPy array of grayscale image data with shapes
(10000, 32, 32, 3), containing the test data. Pixel values range
from 0 to 255.
**y_test**: uint8 NumPy array of labels (integers in range 0-9)
with shape ``(10000, 1)`` for the test data.
Example:
.. code-block::
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
assert x_train.shape == (50000, 32, 32, 3)
assert x_test.shape == (10000, 32, 32, 3)
assert y_train.shape == (50000, 1)
assert y_test.shape == (10000, 1)
"""
import os
from typing import Tuple
import numpy as np
import pickle
from tensorflow.keras import backend
from mltk.utils.archive_downloader import download_verify_extract
from mltk.utils.path import create_user_dir
from mltk.utils.logger import get_logger
from keras_preprocessing.image.utils import array_to_img
INPUT_SHAPE = (32,32,3)
CLASSES = [
'airplane',
'automobile',
'bird',
'cat',
'deer',
'dog',
'frog',
'horse',
'ship',
'truck'
]
def load_data() -> Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]:
"""Download the dataset, extract, load into memory,
and return as a tuple of numpy arrays
Returns:
Tuple of NumPy arrays: ``(x_train, y_train), (x_test, y_test)``
"""
path = download_verify_extract(
url='https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',
dest_subdir='datasets/cifar10',
file_hash='6d958be074577803d12ecdefd02955f39262c83c16fe9348329d7fe0b5c001ce',
show_progress=True,
remove_root_dir=True
)
num_train_samples = 50000
x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')
y_train = np.empty((num_train_samples,), dtype='uint8')
for i in range(1, 6):
fpath = os.path.join(path, 'data_batch_' + str(i))
(x_train[(i - 1) * 10000:i * 10000, :, :, :],
y_train[(i - 1) * 10000:i * 10000]) = _load_batch(fpath)
fpath = os.path.join(path, 'test_batch')
x_test, y_test = _load_batch(fpath)
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if backend.image_data_format() == 'channels_last':
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
x_test = x_test.astype(x_train.dtype)
y_test = y_test.astype(y_train.dtype)
return (x_train, y_train), (x_test, y_test)
def load_data_directory() -> str:
"""Download the dataset, extract all sample images to a directory,
and return the path to the directory.
Each sample type is extract to its corresponding subdirectory, e.g.:
~/.mltk/datasets/cifar10/airplane
~/.mltk/datasets/cifar10/automobile
...
Returns:
Path to extract directory:
"""
dataset_dir = f'{create_user_dir()}/datasets/cifar10'
(x_train, y_train), (x_test, y_test) = load_data()
x_samples = np.concatenate((x_train, x_test))
y_samples = np.concatenate((y_train, y_test))
class_ids, class_counts = np.unique(y_samples, return_counts=True)
expected_class_counts = {}
for class_id, class_count in zip(class_ids, class_counts):
expected_class_counts[CLASSES[class_id]] = class_count
for class_id, class_label in enumerate(CLASSES):
dataset_class_dir = f'{dataset_dir}/{class_label}'
os.makedirs(dataset_class_dir, exist_ok=True)
class_count = len(os.listdir(dataset_class_dir))
if class_count != expected_class_counts[class_label]:
get_logger().warning(f'Generating {dataset_class_dir}')
sample_count = 0
for x, y in zip(x_samples, y_samples):
if class_id != y:
continue
sample_path = f'{dataset_class_dir}/{sample_count}.jpg'
sample_count += 1
img = array_to_img(x, scale=False, dtype='uint8')
img.save(sample_path)
return dataset_dir
def _load_batch(fpath, label_key='labels'):
"""Internal utility for parsing CIFAR data.
Arguments:
fpath: path the file to parse.
label_key: key for label data in the retrieve
dictionary.
Returns:
A tuple `(data, labels)`.
"""
with open(fpath, 'rb') as f:
d = pickle.load(f, encoding='bytes')
# decode utf8
d_decoded = {}
for k, v in d.items():
d_decoded[k.decode('utf8')] = v
d = d_decoded
data = d['data']
labels = d[label_key]
data = data.reshape(data.shape[0], 3, 32, 32)
return data, labels | [
"os.listdir",
"numpy.unique",
"os.makedirs",
"mltk.utils.logger.get_logger",
"keras_preprocessing.image.utils.array_to_img",
"os.path.join",
"pickle.load",
"numpy.empty",
"mltk.utils.archive_downloader.download_verify_extract",
"numpy.concatenate",
"mltk.utils.path.create_user_dir",
"tensorflo... | [((2006, 2263), 'mltk.utils.archive_downloader.download_verify_extract', 'download_verify_extract', ([], {'url': '"""https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"""', 'dest_subdir': '"""datasets/cifar10"""', 'file_hash': '"""6d958be074577803d12ecdefd02955f39262c83c16fe9348329d7fe0b5c001ce"""', 'show_progress': '(True)', 'remove_root_dir': '(True)'}), "(url=\n 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz', dest_subdir=\n 'datasets/cifar10', file_hash=\n '6d958be074577803d12ecdefd02955f39262c83c16fe9348329d7fe0b5c001ce',\n show_progress=True, remove_root_dir=True)\n", (2029, 2263), False, 'from mltk.utils.archive_downloader import download_verify_extract\n'), ((2336, 2391), 'numpy.empty', 'np.empty', (['(num_train_samples, 3, 32, 32)'], {'dtype': '"""uint8"""'}), "((num_train_samples, 3, 32, 32), dtype='uint8')\n", (2344, 2391), True, 'import numpy as np\n'), ((2406, 2451), 'numpy.empty', 'np.empty', (['(num_train_samples,)'], {'dtype': '"""uint8"""'}), "((num_train_samples,), dtype='uint8')\n", (2414, 2451), True, 'import numpy as np\n'), ((2670, 2702), 'os.path.join', 'os.path.join', (['path', '"""test_batch"""'], {}), "(path, 'test_batch')\n", (2682, 2702), False, 'import os\n'), ((3636, 3669), 'numpy.concatenate', 'np.concatenate', (['(x_train, x_test)'], {}), '((x_train, x_test))\n', (3650, 3669), True, 'import numpy as np\n'), ((3686, 3719), 'numpy.concatenate', 'np.concatenate', (['(y_train, y_test)'], {}), '((y_train, y_test))\n', (3700, 3719), True, 'import numpy as np\n'), ((3751, 3791), 'numpy.unique', 'np.unique', (['y_samples'], {'return_counts': '(True)'}), '(y_samples, return_counts=True)\n', (3760, 3791), True, 'import numpy as np\n'), ((2855, 2882), 'tensorflow.keras.backend.image_data_format', 'backend.image_data_format', ([], {}), '()\n', (2880, 2882), False, 'from tensorflow.keras import backend\n'), ((4071, 4116), 'os.makedirs', 'os.makedirs', (['dataset_class_dir'], {'exist_ok': '(True)'}), '(dataset_class_dir, exist_ok=True)\n', (4082, 4116), False, 'import os\n'), ((4991, 5023), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""bytes"""'}), "(f, encoding='bytes')\n", (5002, 5023), False, 'import pickle\n'), ((3526, 3543), 'mltk.utils.path.create_user_dir', 'create_user_dir', ([], {}), '()\n', (3541, 3543), False, 'from mltk.utils.path import create_user_dir\n'), ((4143, 4172), 'os.listdir', 'os.listdir', (['dataset_class_dir'], {}), '(dataset_class_dir)\n', (4153, 4172), False, 'import os\n'), ((4577, 4620), 'keras_preprocessing.image.utils.array_to_img', 'array_to_img', (['x'], {'scale': '(False)', 'dtype': '"""uint8"""'}), "(x, scale=False, dtype='uint8')\n", (4589, 4620), False, 'from keras_preprocessing.image.utils import array_to_img\n'), ((4249, 4261), 'mltk.utils.logger.get_logger', 'get_logger', ([], {}), '()\n', (4259, 4261), False, 'from mltk.utils.logger import get_logger\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.