code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#grass76 C:\Users\Usuario\Documents\Sequia\Tesis_Guajira\Tesis_Guajira\PERMANENT
'''
8day_ts@PERMANENT
ET@PERMANENT
LAI@PERMANENT
P8d_agg@PERMANENT
Precipitacion@PERMANENT
et@PERMANENT
et_pre@PERMANENT
evi_esc@PERMANENT
evi_pre@PERMANENT
lai_pre@PERMANENT
lst_esc@PERMANENT
lst_pre@PERMANENT
mir_esc@PERMANENT
mir_pre@PERMANENT
ndvi_esc@PERMANENT
ndvi_pre@PERMANENT
nir_esc@PERMANENT
nir_pre@PERMANENT'''
#########################################
# definimos las funciones de grass format - numpy y viceversa
import numpy as np
from grass.pygrass.raster.buffer import Buffer
from grass.pygrass.gis.region import Region
def raster2numpy(rastname, mapset=''):
"""Return a numpy array from a raster map"""
with RasterRow(rastname, mapset=mapset, mode='r') as rast:
return np.array(rast)
def numpy2raster(array, mtype, rastname, overwrite=False):
"""Save a numpy array to a raster map"""
reg = Region()
if (reg.rows, reg.cols) != array.shape:
msg = "Region and array are different: %r != %r"
raise TypeError(msg % ((reg.rows, reg.cols), array.shape))
with RasterRow(rastname, mode='w', mtype=mtype, overwrite=overwrite) as new:
newrow = Buffer((array.shape[1],), mtype=mtype)
for row in array:
newrow[:] = row[:]
new.put_row(newrow)
################################################################################
################################################################################
# importamos las librerias para el procesamiento
from grass.pygrass.raster import RasterRow
import matplotlib.pyplot as plt
import grass.temporal as tgis
import datetime
# realizamos la conexion con la base de datos temporal
tgis.init()
dbif = tgis.SQLDatabaseInterfaceConnection()
dbif.connect()
'''
# creamos el strds que debemos rellenar
ndwi = 'nddi'
dataset = tgis.open_new_stds(name=ndwi, type='strds', temporaltype='absolute',
title="NDWI MODIS 8 dias", descr="NDWI de Modis cada 8 dias",
semantic='mean', overwrite=True)
'''
dataset_name = 'nddi@PERMANENT'
dataset = tgis.open_old_stds(dataset_name, "strds",dbif=dbif)
# Confirmamos la creacion del STRDS
dataset.print_shell_info()
# abrimos los antiguos strds para el calculo
#nir
ndwi = 'ndwi@PERMANENT'
ndwi_strds = tgis.open_old_stds(ndwi, "strds",dbif=dbif)
ndwi_strds.get_registered_maps(columns='name,start_time')
num_ndwi = len(ndwi_strds.get_registered_maps(columns='name,start_time'))
#dtdelta = datetime.timedelta(days = int(7))
#mir
ndvi = 'ndvi_esc@PERMANENT'
ndvi_strds = tgis.open_old_stds(ndvi, "strds",dbif=dbif)
ndvi_strds.get_registered_maps(columns='name,start_time')
num_ndvi = len(ndvi_strds.get_registered_maps(columns='name,start_time'))
# calculamos el ndwi
for i in range(num_ndvi):
fec1 = ndwi_strds.get_registered_maps(columns='name,start_time')[i][1]
ndwi_raster= ndwi_strds.get_registered_maps(columns='name,start_time')[i][0]
ndwi_map= raster2numpy(ndwi_raster, mapset='PERMANENT')
fec2 = ndvi_strds.get_registered_maps(columns='name,start_time')[i][1]
ndvi_raster= ndvi_strds.get_registered_maps(columns='name,start_time')[i][0]
ndvi_map= raster2numpy(ndvi_raster, mapset='PERMANENT')
nddi = (ndvi_map-ndwi_map)/(ndvi_map+ndwi_map)
print(nddi)
#nombre='NDDI_'+str(i)+'_'
#numpy2raster(nddi, mtype='FCELL', rastname=nombre, overwrite=True)
#fech=fec1
#fecha = fech.strftime("%Y") +'-'+fech.strftime("%m")+'-'+fech.strftime("%d")
#tgis.register_maps_in_space_time_dataset(type='raster',name=dataset_name,maps=nombre,start=fecha,interval=True,update_cmd_list=True)
#dataset.update_from_registered_maps()
#dataset.print_shell_info()
# mostramos la librerias instaladas
import torch
print('torch version:')
print(torch.__version__)
import sklearn
print('sklear version:')
print(sklearn.__version__)
import xgboost
print('xgboost version:')
print(xgboost.__version__)
import pickle
print('pickle version:')
#pickle.__version__
'''
for i in range(num-1):
fec= strds.get_registered_maps(columns='name,start_time')[i][1]
raster= strds.get_registered_maps(columns='name,start_time')[i][0]
#map = garray.array(mapname=raster)
#map = RasterRow(raster,mapset='PERMANENT')
map= raster2numpy(raster, mapset='PERMANENT')
fecha2= strds.get_registered_maps(columns='name,start_time')[i+1][1]
raster2= strds.get_registered_maps(columns='name,start_time')[i+1][0]
#map2 = garray.array(mapname=raster2)
map2= raster2numpy(raster2, mapset='PERMANENT')
prom = (map+map2)/2
nombre='EVI_relleno_'+str(i)+'_'
#prom.write(mapname=nombre, overwrite=True)
numpy2raster(prom, mtype='FCELL', rastname=nombre, overwrite=True)
#promedio.append(prom)
fech=fec+dtdelta
fecha = fech.strftime("%Y") +'-'+fech.strftime("%m")+'-'+fech.strftime("%d")
tgis.register_maps_in_space_time_dataset(type='raster',name=nombre,maps=nombre,start=fecha,interval=True,update_cmd_list=True)
dataset.update_from_registered_maps()
dataset.print_shell_info()
'''
| [
"grass.temporal.open_old_stds",
"grass.temporal.init",
"grass.pygrass.raster.RasterRow",
"grass.temporal.SQLDatabaseInterfaceConnection",
"numpy.array",
"grass.pygrass.gis.region.Region",
"grass.pygrass.raster.buffer.Buffer"
] | [((1719, 1730), 'grass.temporal.init', 'tgis.init', ([], {}), '()\n', (1728, 1730), True, 'import grass.temporal as tgis\n'), ((1738, 1775), 'grass.temporal.SQLDatabaseInterfaceConnection', 'tgis.SQLDatabaseInterfaceConnection', ([], {}), '()\n', (1773, 1775), True, 'import grass.temporal as tgis\n'), ((2127, 2179), 'grass.temporal.open_old_stds', 'tgis.open_old_stds', (['dataset_name', '"""strds"""'], {'dbif': 'dbif'}), "(dataset_name, 'strds', dbif=dbif)\n", (2145, 2179), True, 'import grass.temporal as tgis\n'), ((2334, 2378), 'grass.temporal.open_old_stds', 'tgis.open_old_stds', (['ndwi', '"""strds"""'], {'dbif': 'dbif'}), "(ndwi, 'strds', dbif=dbif)\n", (2352, 2378), True, 'import grass.temporal as tgis\n'), ((2602, 2646), 'grass.temporal.open_old_stds', 'tgis.open_old_stds', (['ndvi', '"""strds"""'], {'dbif': 'dbif'}), "(ndvi, 'strds', dbif=dbif)\n", (2620, 2646), True, 'import grass.temporal as tgis\n'), ((923, 931), 'grass.pygrass.gis.region.Region', 'Region', ([], {}), '()\n', (929, 931), False, 'from grass.pygrass.gis.region import Region\n'), ((723, 767), 'grass.pygrass.raster.RasterRow', 'RasterRow', (['rastname'], {'mapset': 'mapset', 'mode': '"""r"""'}), "(rastname, mapset=mapset, mode='r')\n", (732, 767), False, 'from grass.pygrass.raster import RasterRow\n'), ((792, 806), 'numpy.array', 'np.array', (['rast'], {}), '(rast)\n', (800, 806), True, 'import numpy as np\n'), ((1109, 1172), 'grass.pygrass.raster.RasterRow', 'RasterRow', (['rastname'], {'mode': '"""w"""', 'mtype': 'mtype', 'overwrite': 'overwrite'}), "(rastname, mode='w', mtype=mtype, overwrite=overwrite)\n", (1118, 1172), False, 'from grass.pygrass.raster import RasterRow\n'), ((1198, 1236), 'grass.pygrass.raster.buffer.Buffer', 'Buffer', (['(array.shape[1],)'], {'mtype': 'mtype'}), '((array.shape[1],), mtype=mtype)\n', (1204, 1236), False, 'from grass.pygrass.raster.buffer import Buffer\n')] |
import warnings
import numpy as np
from exetera.core import validation as val
def combined_hcw_with_contact(datastore,
healthcare_professional, contact_health_worker,
is_carer_for_community,
group, name):
"""
Deprecated, please use combined_healthcare_worker.combined_hcw_with_contact_v1().
"""
warnings.warn("deprecated", DeprecationWarning)
raw_hcp = val.raw_array_from_parameter(datastore, 'healthcare_professional',
healthcare_professional)
filter_ = np.where(raw_hcp == 0,
0,
np.where(raw_hcp == 1,
1,
np.where(raw_hcp < 4,
2,
3)))
raw_chw = val.raw_array_from_parameter(datastore, 'contact_health_worker',
contact_health_worker)
filter_ = np.maximum(filter_, np.where(raw_chw == 2, 3, raw_chw))
raw_icfc = val.raw_array_from_parameter(datastore, 'is_carer_for_community',
is_carer_for_community)
filter_ = np.maximum(filter_,
np.where(raw_icfc == 2, 3, raw_icfc))
key = {'': 0, 'no': 1, 'yes_no_contact': 2, 'yes_contact': 3}
hccw = datastore.get_categorical_writer(group, name, categories=key)
hccw.write(filter_)
return hccw
def combined_hcw_with_contact_v1(session,
healthcare_professional, contact_health_worker,
is_carer_for_community,
group, name):
"""
Identify the users in Covid dataset who are health workers with contact history.
:param healthcare_professional: The healthcare_professional column from dataset.
:param contact_health_worker: The contact_health_worker column from dataset.
:param is_carer_for_community: The is_carer_for_community column from dataset.
:param group: The dataframe to store the result field to.
:param name: The name of the result field.
:return: The categorical field which identifying health workers with contact history.
"""
raw_hcp = val.raw_array_from_parameter(session, 'healthcare_professional',
healthcare_professional)
filter_ = np.where(raw_hcp == 0,
0,
np.where(raw_hcp == 1,
1,
np.where(raw_hcp < 4,
2,
3)))
raw_chw = val.raw_array_from_parameter(session, 'contact_health_worker',
contact_health_worker)
filter_ = np.maximum(filter_, np.where(raw_chw == 2, 3, raw_chw))
raw_icfc = val.raw_array_from_parameter(session, 'is_carer_for_community',
is_carer_for_community)
filter_ = np.maximum(filter_,
np.where(raw_icfc == 2, 3, raw_icfc))
key = {'': 0, 'no': 1, 'yes_no_contact': 2, 'yes_contact': 3}
hccw = session.create_categorical(group, name, 'int8', key)
hccw.data.write(filter_)
return hccw
| [
"warnings.warn",
"exetera.core.validation.raw_array_from_parameter",
"numpy.where"
] | [((405, 452), 'warnings.warn', 'warnings.warn', (['"""deprecated"""', 'DeprecationWarning'], {}), "('deprecated', DeprecationWarning)\n", (418, 452), False, 'import warnings\n'), ((467, 562), 'exetera.core.validation.raw_array_from_parameter', 'val.raw_array_from_parameter', (['datastore', '"""healthcare_professional"""', 'healthcare_professional'], {}), "(datastore, 'healthcare_professional',\n healthcare_professional)\n", (495, 562), True, 'from exetera.core import validation as val\n'), ((904, 995), 'exetera.core.validation.raw_array_from_parameter', 'val.raw_array_from_parameter', (['datastore', '"""contact_health_worker"""', 'contact_health_worker'], {}), "(datastore, 'contact_health_worker',\n contact_health_worker)\n", (932, 995), True, 'from exetera.core import validation as val\n'), ((1121, 1214), 'exetera.core.validation.raw_array_from_parameter', 'val.raw_array_from_parameter', (['datastore', '"""is_carer_for_community"""', 'is_carer_for_community'], {}), "(datastore, 'is_carer_for_community',\n is_carer_for_community)\n", (1149, 1214), True, 'from exetera.core import validation as val\n'), ((2324, 2417), 'exetera.core.validation.raw_array_from_parameter', 'val.raw_array_from_parameter', (['session', '"""healthcare_professional"""', 'healthcare_professional'], {}), "(session, 'healthcare_professional',\n healthcare_professional)\n", (2352, 2417), True, 'from exetera.core import validation as val\n'), ((2759, 2848), 'exetera.core.validation.raw_array_from_parameter', 'val.raw_array_from_parameter', (['session', '"""contact_health_worker"""', 'contact_health_worker'], {}), "(session, 'contact_health_worker',\n contact_health_worker)\n", (2787, 2848), True, 'from exetera.core import validation as val\n'), ((2974, 3065), 'exetera.core.validation.raw_array_from_parameter', 'val.raw_array_from_parameter', (['session', '"""is_carer_for_community"""', 'is_carer_for_community'], {}), "(session, 'is_carer_for_community',\n is_carer_for_community)\n", (3002, 3065), True, 'from exetera.core import validation as val\n'), ((1069, 1103), 'numpy.where', 'np.where', (['(raw_chw == 2)', '(3)', 'raw_chw'], {}), '(raw_chw == 2, 3, raw_chw)\n', (1077, 1103), True, 'import numpy as np\n'), ((1314, 1350), 'numpy.where', 'np.where', (['(raw_icfc == 2)', '(3)', 'raw_icfc'], {}), '(raw_icfc == 2, 3, raw_icfc)\n', (1322, 1350), True, 'import numpy as np\n'), ((2922, 2956), 'numpy.where', 'np.where', (['(raw_chw == 2)', '(3)', 'raw_chw'], {}), '(raw_chw == 2, 3, raw_chw)\n', (2930, 2956), True, 'import numpy as np\n'), ((3165, 3201), 'numpy.where', 'np.where', (['(raw_icfc == 2)', '(3)', 'raw_icfc'], {}), '(raw_icfc == 2, 3, raw_icfc)\n', (3173, 3201), True, 'import numpy as np\n'), ((778, 805), 'numpy.where', 'np.where', (['(raw_hcp < 4)', '(2)', '(3)'], {}), '(raw_hcp < 4, 2, 3)\n', (786, 805), True, 'import numpy as np\n'), ((2633, 2660), 'numpy.where', 'np.where', (['(raw_hcp < 4)', '(2)', '(3)'], {}), '(raw_hcp < 4, 2, 3)\n', (2641, 2660), True, 'import numpy as np\n')] |
from nose.tools import assert_equal, assert_true
from ggplot.tests import image_comparison, cleanup
from ggplot import *
from numpy import linspace
from pandas import DataFrame
df = DataFrame({"blahblahblah": linspace(999, 1111, 9),
"yadayadayada": linspace(999, 1111, 9)})
simple_gg = ggplot(aes(x="blahblahblah", y="yadayadayada"), data=df) + geom_line()
@image_comparison(["all_text"], tol=13)
def test_element_text1():
print(simple_gg + theme(text=element_text(family="serif", face="bold",
size=50, color="red", angle=45)))
@image_comparison(["axis_text"], tol=13)
def test_element_text2():
#print(simple_gg)
print(simple_gg +
theme(text=element_text(face="bold", size=50, color="red")) +
theme(axis_text=element_text(color="green", angle=45)))
@image_comparison(["axis_title"], tol=13)
def test_element_text3():
print (simple_gg +
theme(text=element_text(face="bold", color="red")) +
theme(axis_title=element_text(color="purple", size=50)))
@image_comparison(["axis_title_text"], tol=15)
def test_element_text4():
print(simple_gg +
theme(text=element_text(face="bold", color="red")) +
theme(axis_text_y=element_text(color="green", size=50)) +
theme(axis_title=element_text(color="blue", size=50)))
| [
"ggplot.tests.image_comparison",
"numpy.linspace"
] | [((383, 421), 'ggplot.tests.image_comparison', 'image_comparison', (["['all_text']"], {'tol': '(13)'}), "(['all_text'], tol=13)\n", (399, 421), False, 'from ggplot.tests import image_comparison, cleanup\n'), ((598, 637), 'ggplot.tests.image_comparison', 'image_comparison', (["['axis_text']"], {'tol': '(13)'}), "(['axis_text'], tol=13)\n", (614, 637), False, 'from ggplot.tests import image_comparison, cleanup\n'), ((848, 888), 'ggplot.tests.image_comparison', 'image_comparison', (["['axis_title']"], {'tol': '(13)'}), "(['axis_title'], tol=13)\n", (864, 888), False, 'from ggplot.tests import image_comparison, cleanup\n'), ((1072, 1117), 'ggplot.tests.image_comparison', 'image_comparison', (["['axis_title_text']"], {'tol': '(15)'}), "(['axis_title_text'], tol=15)\n", (1088, 1117), False, 'from ggplot.tests import image_comparison, cleanup\n'), ((212, 234), 'numpy.linspace', 'linspace', (['(999)', '(1111)', '(9)'], {}), '(999, 1111, 9)\n', (220, 234), False, 'from numpy import linspace\n'), ((268, 290), 'numpy.linspace', 'linspace', (['(999)', '(1111)', '(9)'], {}), '(999, 1111, 9)\n', (276, 290), False, 'from numpy import linspace\n')] |
#!/usr/bin/env python
import numpy as np
class Converger:
'''
Class which checks residual and returns a status depending on how
the residual is converging
Status
------
0: The residual being checked has converged due to some specified
tolerance, either atol, rtol or maxitr
1: The residual being checked has a norm which is smaller than the
currently set residual
2: The residual being checked has a norm which is larger than the
currently set residual
3: An invalid value (nan or inf) has been found in the residual
being checked
Attributes
----------
atol: 0 is raised when absolution error is below this value
rtol: 0 is raised when relative error is below this value
maxitr: total number of calls to to 'set_residual' (which should
be the number of steps in model space) before 0 is
raised. maxitr=0 will raise a convergence flag on the first
call, maxitr=1 will raise it on the second call, etc.
norm: callable which takes residual and returns a scalar
error: norm(residual)
'''
def __init__(self,atol=1e-6,rtol=1e-6,maxitr=100,
norm=np.linalg.norm):
self.atol = atol
self.rtol = rtol
self.maxitr = maxitr
self.norm = norm
self.error = np.inf
self.itr = 0
def check(self,residual,set_residual=False):
residual = np.asarray(residual)
error_new = self.norm(residual)
if self.itr >= self.maxitr:
message = 'finished due to maxitr'
out = 0,message
elif not np.isfinite(error_new):
message = 'encountered invalid L2'
out = 3,message
elif error_new <= self.atol:
message = 'converged due to atol: error=%s, itr=%s' % (error_new,self.itr)
out = 0,message
elif abs(error_new - self.error) <= self.rtol:
message = 'converged due to rtol: error=%s, itr=%s' % (error_new,self.itr)
out = 0,message
elif error_new < self.error:
message = 'converging: error=%s, itr=%s' % (error_new,self.itr)
out = 1,message
elif error_new >= self.error:
message = 'diverging: error=%s, itr=%s' % (error_new,self.itr)
out = 2,message
if set_residual == True:
self.set_residual(residual)
return out
def set_residual(self,residual):
self.itr += 1
residual = np.asarray(residual)
self.error = self.norm(residual)
return
class ErrorTracker:
def __init__(self):
self.error_best = np.inf
self.error_last = np.inf
self.error_relative = np.inf
def set(self,error_new):
if error_new < self.error_best:
self.error_best = error_new
self.error_relative = error_new - self.error_last
self.error_last = self.error_new
| [
"numpy.asarray",
"numpy.isfinite"
] | [((1378, 1398), 'numpy.asarray', 'np.asarray', (['residual'], {}), '(residual)\n', (1388, 1398), True, 'import numpy as np\n'), ((2397, 2417), 'numpy.asarray', 'np.asarray', (['residual'], {}), '(residual)\n', (2407, 2417), True, 'import numpy as np\n'), ((1546, 1568), 'numpy.isfinite', 'np.isfinite', (['error_new'], {}), '(error_new)\n', (1557, 1568), True, 'import numpy as np\n')] |
'''
Class that can generate random dummy data (CSV format) for general data tests.
Author: <NAME>
Copyright 2017
Date: 21 December 2017
See README.md for description of usage
'''
import json
import string
import numpy
import random
import datetime
import time
import logging as log
import rstr
class CSVGen:
# Define some Class Constants
defaultRows = 10
defaultNullOdds = 10
minFloat = -2.0 ** 32 # -2 ^ 32
maxFloat = 2.0 ** 32 # 2 ^ 32
minInt = int(minFloat) # Same range as Int, but with decimal point
maxInt = int(maxFloat) # Same range as Int, but with decimal point
minDate = "19700101" # Specify this in YYYYMMDD format
maxDate = "20361231" # Specify this in YYYYMMDD format
minDateTime = "19700101000000" # Specify this in YYYYMMDDHHMMSS format
maxDateTime = "20361231235959" # Specify this in YYYYMMDDHHMMSS format
defaultIntOPFormat = "d"
defaultFloatOPFormat = "0.2f"
defaultStringOPFormat = "s"
defaultDateOPFormat = "s"
defaultDateFormat = "%d/%m/%Y" # e.g. 13/01/2003
defaultDateTimeFormat = "%d/%m/%Y %H:%M:%S" # e.g. 13/01/2003 12:34:12
defaultStringLen = 30 # This is long enough to have something in it, but not too long
def __init__(self, numRows=defaultRows, nullOdds = 10, iFile="", oFile="", verbose=False):
self.nullOdds = nullOdds # 1 in NullOdds chance of generating a NULL
self.numRows = numRows
self.verbose = verbose
if verbose:
log.basicConfig(format="%(levelname)s: %(message)s", level=log.DEBUG)
else:
log.basicConfig(format="%(levelname)s: %(message)s")
log.info("Validating Schema...")
if (iFile == ""):
log.error("Error: You must specify a schema file argument (iFile=...)")
return
if (oFile == ""):
log.error("Error: You must specify an output CSV file argument (oFile=...)")
return
data = self.valid8Schema(iFile)
if (not data):
return
# Now write out the CSV File
log.info("Writing output file...")
self.writeCSVFile(oFile, data)
'''
For a CSV format, we don't need a table name
We simply go through each column and populate it with dummy data
'''
def writeCSVFile(self, fName, data):
log.info("Creating the CSV file... %s" % fName)
try:
opFile = open(fName, 'w')
except IOError as e:
log.error("Error creating '%s': %s" % (fName, e.strerror))
return None
# The first row contains the column names
for idx, c in enumerate(data['columns']):
if (idx > 0):
opFile.write(', "%s"' % c['name'])
else:
opFile.write('"%s"' % c['name'])
opFile.write("\n")
# Now populate it with some data...
for i in range(self.numRows):
for idx, c in enumerate(data['columns']):
if (idx > 0):
s = ', %s' % (self.generate_column(c))
else:
s = '%s' % (self.generate_column(c))
opFile.write(s)
opFile.write("\n")
log.info("Wrote %d rows" % self.numRows)
opFile.close()
def parseJSON(self, text):
try:
return json.loads(text)
except ValueError as e:
log.error('Invalid JSON: %s' % e)
return None
def validFloat(self, str):
try:
f = float(str)
return True
except ValueError:
return False
def validDate(self, date_text):
try:
datetime.datetime.strptime(date_text, '%Y%m%d')
return True
except ValueError:
return False
def validDateTime(self, date_text):
try:
datetime.datetime.strptime(date_text, '%Y%m%d%H%M%S')
return True
except ValueError:
return False
def valid8Field(self, data, column):
colName = column.get('name')
if (not colName): # name is a mandatory attribute
log.error("Error: Missing column 'name' attribute")
return None
log.info("Validating column %s " % colName)
colType = column.get('type') # type is a mandatory attribute
if (not colType):
log.warning("Error: Column '%s' is missing the 'type' attribute" % colName)
return None
if colType not in '01234':
log.error("Error: Column %s is set to %s. It must be either:" % (colName, colType))
log.error(" 0 - Integer, 1 - Float, 2 - String, 3 - Date, 4 - DateTime")
return None
colMin = column.get('minimum') # Numeric and date columns may have a minimum
colMax = column.get('maximum') # Numeric and date columns may have a maximum
colNULLS = column.get('NULLS') # Allow NULL (i.e. empty) data tp be generated
colIncremental = column.get('incremental') # Use this as an incremental (e.g. unique key) column
colChoice = column.get('choice') # Provide a choice of values
colRatio = column.get('ratio') # Provide a ratio'd choice of values
colDistribution = column.get('distribution') # Provide predefined data skew
if (colDistribution and (colDistribution != "normal" and colDistribution != "uniform")):
log.error("Error: Column '%s': Distribution type must be either 'normal' or 'uniform'" % (colName))
return None
if (colChoice and colRatio):
log.error("Error: You can only specify one of ratio or choice, not both")
return None
if (colRatio):
if (len(colRatio) < 2):
log.error("Error: Column '%s': You must have a 2 part ratio [ [choices], [ratios] ]" % (colName))
return None
if (len(colRatio[0]) != len(colRatio[1])):
log.error("Error: Column '%s': Your ratio choices do not match your ratios" % (colName))
return None
if (colIncremental == "1" and colType != "0"):
log.error("Error: Column '%s': Only Integer columns can be defined as incremental" % (colName))
return None
if colType in "01": # numeric
if (colType == "0"): # Integer
# If no minimum is defined, start the incremental at 1
if (colIncremental and not colMin):
colMin = 1
if (colMin and (colMin.isdigit() == False)):
log.error("Error: Column '%s': Minimum value of '%s' is not an integer" % (colName, colMin))
return None
if (colMax and colMax.isdigit() == False):
log.error("Error: Column '%s': Maximum value of '%s' is not an integer" % (colName, colMax))
return None
if (not colMin):
colMin = self.minInt
if (not colMax):
colMax = self.maxInt
if (colIncremental):
column['counter'] = str(colMin) # Add a counter column to increment from
if (int(colMin) + int(self.numRows)) > int(colMax):
log.error("Error: Column '%s': Incremental row would exceed Maximum value of '%s' for %s rows" % (colName, colMax, self.numRows))
return None
if (colChoice or colRatio):
log.error("Error: Column '%s': You cannot specify a choice of values in an incremental row " % (colName))
return None
if (colType == "1"): # Float/Decimal
if (not colMin):
colMin = self.minFloat
if (not colMax):
colMax = self.maxFloat
if (colMin and self.validFloat(colMin) == False):
log.error("Error: Column '%s': Minimum value of '%s' is not a float" % (colName, colMin))
if (colMax and self.validFloat(colMax) == False):
log.error("Error: Column '%s': Maximum value of '%s' is not a float" % (colName, colMax))
# Test ranges by casting to float. That works for both float and int
if (colMin and colMax) and (float(colMin) > float(colMax)):
log.error("Error: Column '%s': Minimum of %s is greater than Maximum of %s" % (colName, colMin, colMax))
return None
# If minimum specified, but no maximum, set it
if (colMin and not colMax):
if (colType == "0"):
column['maximum'] = self.maxInt
else:
column['maximum'] = self.maxFloat
log.warning("Warning: Column '%s': Minimum specified of %s, but no maximum" % (colName, colMin))
log.warning(' --> Assigning a maximum value of %s' % column['maximum'])
if (not colMin and colMax):
if (colType == "0"): # Integer
column['minimum'] = str(self.minInt)
else: # Float / Decimal
column['minimum'] = str(self.minFloat)
log.warning("Warning: Column '%s': Maximum specified of %s, but no minimum" % (colName, colMax))
log.warning(' --> Assigning a minimum value of %s' % column['minimum'])
if colType in "34": # Date or DateTime
if colType == "3":
if (not colMin):
colMin = self.minDate
if (not colMax):
colMax = self.maxDate
if (self.validDate(colMin) == False):
log.error("Error: Column '%s': Minimum value of '%s' is not valid date in the format YYYYMMDD" % (colName, colMin))
return None
if colType == "4":
if (not colMin):
colMin = self.minDateTime
if (not colMax):
colMax = self.maxDateTime
if (self.validDateTime(colMin) == False):
log.error("Error: Column '%s': Minimum value of '%s' is not valid datetime in the format YYYYMMDDHHMMSS" % (colName, colMin))
return None
if (colMin and not colMax):
column['maximum'] = self.maxDate
log.warning("Warning: Column '%s': Minimum specified of %s, but no maximum" % (colName, colMin))
log.warning(' --> Assigning a maximum value of %s' % column['maximum'])
if (colMax and not colMin):
column['minimum'] = self.minDate
log.warning("Warning: Column '%s': maximum specified of %s, but no minimum" % (colName, colMax))
log.warning(' --> Assigning a minimum value of %s' % column['minimum'])
# We make a huge assumption here that formats are valid. It's pointless going
# overboard trying to valid every possibility
colFormat = column.get('format')
if (not colFormat): # A format is mandatory
log.warning("Warning: Column '%s' is missing the 'format' attribute. A default will be used" % colName)
# Strings
if (colNULLS and colNULLS not in "01"):
log.error("Column '%s': NULLS has value '%s' but can only have a value of '0' or '1'" % (colName, colNULLS))
return column
'''
Load the Schema file and validate its contents
'''
def valid8Schema(self, fname):
try:
file = open(fname, 'r')
except IOError as e:
log.error("Error opening '%s': %s" % (fname, e.strerror))
return None
schema = file.read()
data = self.parseJSON(schema)
if (data == None): # Structural error
return None
# Now confirm mandatory fields are there
# Table must have a name
if (not data.get('name')):
log.error("Validating Schema: You must provide a core name attribute")
return None
columns = data.get('columns')
if (not columns):
log.error("Validating Schema: You must have a columns attribute")
return None
# Remove the columns entries - we'll rebuild them while validating (and adding values)
data.pop('columns')
data['columns']=[]
for c in columns:
if not self.valid8Field(data, c):
log.error('Error in processing column %s ' % c)
return None
else: # Data may have been updated in the validation process
data['columns'].append(c)
if (data != None):
return data
def RandomNull(self, val):
odds = 1.0 / self.nullOdds # The higher the odds, the lower the chance
isNull = numpy.random.random()
if (isNull < odds):
return ""
else:
return val
def ratio_pick(self, choices, ratios):
probs = [ratios[i] / sum(ratios) for i in range(len(ratios))]
choice = numpy.random.choice(choices, size=1, p=probs)
return choice
'''
Generate random data for a column
'''
def generate_column(self, column):
colType = column.get('type')
colMin = column.get('minimum')
colMax = column.get('maximum')
colLen = column.get('length')
colNULLS = column.get('NULLS')
colFormat = column.get('format')
colIncremental = column.get('incremental')
colChoice = column.get('choice')
colRatio = column.get('ratio')
colRegex = column.get('regex')
colDistribution = column.get('distribution')
# With a choice of values, no need for min/max
if (colChoice or colRatio):
colMin="0"
colMax="0"
if colNULLS == "1":
isNullable = True
else:
isNullable = False
coreVal=""
if colType == "0": # Integer
if (not colFormat):
colFormat = self.defaultIntOPFormat
if (colIncremental):
# We don't allow NULLS
coreVal = int(column.get('counter'))
column['counter'] = str(int(coreVal) + 1)
else:
if not colMin:
colMin = self.minInt
if not colMax:
colMax = self.maxInt
coreVal = int(numpy.random.uniform(int(colMin), int(colMax), 1))
if (colDistribution):
if (colDistribution == "normal"):
coreVal = int(numpy.random.standard_normal(1) * int(colMax)) + int(colMin)
# No need to test for Uniform. Could add additional distributions in here though
if (colChoice):
coreVal = random.choice(colChoice)
elif (colRatio):
coreVal = self.ratio_pick(colRatio[0], colRatio[1])[0]
else:
if isNullable:
coreVal = self.RandomNull(coreVal)
if colType == "1": # Float
if not colMin:
colMin = self.minFloat
if not colMax:
colMax = self.maxFloat
coreVal = random.uniform(float(colMin), float(colMax))
if (colDistribution):
if (colDistribution == "normal"):
coreVal = (numpy.random.standard_normal(1) * float(colMax)) + float(colMin)
# No need to test for Uniform. Could add additional distributions in here though
if (not colFormat):
colFormat = self.defaultFloatOPFormat
if (colChoice):
coreVal = random.choice(colChoice)
elif (colRatio):
coreVal = self.ratio_pick(colRatio[0], colRatio[1])[0]
else:
if isNullable:
coreVal = self.RandomNull(coreVal)
if colType == "2": # String
if not colLen:
colLen = self.defaultStringLen
colLen = int(colLen)
if (not colFormat):
colFormat = self.defaultDateOPFormat
if colRegex:
coreVal = rstr.xeger(colRegex)
else:
coreVal = rstr.xeger('[A-Z][A-Za-z 0-9]{1,%s}' % colLen)
if (colChoice):
coreVal = random.choice(colChoice)
elif (colRatio):
coreVal = self.ratio_pick(colRatio[0], colRatio[1])[0]
else:
if isNullable:
coreVal = self.RandomNull(coreVal)
if colType == "3": # Date
if (not colFormat):
colFormat = self.defaultDateFormat # This is not the OP format, but used for strftime
if (colChoice):
choice = time.mktime(time.strptime('%s' % (random.choice(colChoice)), '%Y%m%d'))
coreVal = time.strftime(colFormat, time.localtime(choice))
elif (colRatio):
choice = time.mktime(time.strptime('%s' % (self.ratio_pick(colRatio[0], colRatio[1])[0]), '%Y%m%d'))
coreVal = time.strftime(colFormat, time.localtime(choice))
else:
if not colMin:
colMin = self.minDate
if not colMax:
colMax = self.maxDate
d1 = time.mktime(time.strptime('%s' % (colMin), '%Y%m%d'))
d2 = time.mktime(time.strptime('%s' % (colMax), '%Y%m%d'))
randomVal = random.uniform(0, 1)
if (colDistribution):
if (colDistribution == "normal"):
randomVal = (numpy.random.standard_normal(1))
# No need to test for Uniform. Could add additional distributions in here though
# Take the default output date format from constants at beginning of File
coreVal = time.strftime(colFormat, time.localtime(d1 + randomVal * (d2 - d1)))
if isNullable:
coreVal = self.RandomNull(coreVal)
colFormat = self.defaultDateOPFormat # Reuse variable for python format
if colType == "4": # DateTime
if (not colFormat):
colFormat = self.defaultDateTimeFormat # This is not the OP format, but used for strftime
if (colChoice):
choice = time.mktime(time.strptime('%s' % (random.choice(colChoice)), '%Y%m%d%H%M%S'))
coreVal = time.strftime(colFormat, time.localtime(choice))
elif (colRatio):
choice = time.mktime(time.strptime('%s' % (self.ratio_pick(colRatio[0], colRatio[1])[0]), '%Y%m%d%H%M%S'))
coreVal = time.strftime(colFormat, time.localtime(choice))
else:
if not colMin:
colMin = self.minDateTime
if not colMax:
colMax = self.maxDateTime
d1 = time.mktime(time.strptime('%s' % (colMin), '%Y%m%d%H%M%S'))
d2 = time.mktime(time.strptime('%s' % (colMax), '%Y%m%d%H%M%S'))
randomVal = random.uniform(0, 1)
if (colDistribution):
if (colDistribution == "normal"):
randomVal = (numpy.random.standard_normal(1))
# No need to test for Uniform. Could add additional distributions in here though
# Take the default output date format from constants at beginning of File
coreVal = time.strftime(colFormat, time.localtime(d1 + randomVal * (d2 - d1)))
if isNullable:
coreVal = self.RandomNull(coreVal)
colFormat = self.defaultDateOPFormat # Reuse variable for python format
# If the value is blank (i.e. NULL), then it doesn't need a formatted string
if (coreVal == ""):
return ""
else:
return "{:{fmt}}".format(coreVal, fmt=colFormat)
# Uncomment this to create a sample file containing multiple field types
G = CSVGen(iFile="sample.json", oFile="opfile.csv", numRows=50, verbose=True)
# Uncomment this to create a sample file containing distribution skew field types
#G = CSVGen(iFile="distribution.json", oFile="opfile.csv", numRows=50, verbose=True)
| [
"rstr.xeger",
"logging.error",
"json.loads",
"logging.basicConfig",
"random.uniform",
"logging.warning",
"random.choice",
"logging.info",
"datetime.datetime.strptime",
"numpy.random.random",
"numpy.random.standard_normal",
"numpy.random.choice",
"time.strptime",
"time.localtime"
] | [((1546, 1578), 'logging.info', 'log.info', (['"""Validating Schema..."""'], {}), "('Validating Schema...')\n", (1554, 1578), True, 'import logging as log\n'), ((1892, 1926), 'logging.info', 'log.info', (['"""Writing output file..."""'], {}), "('Writing output file...')\n", (1900, 1926), True, 'import logging as log\n'), ((2123, 2170), 'logging.info', 'log.info', (["('Creating the CSV file... %s' % fName)"], {}), "('Creating the CSV file... %s' % fName)\n", (2131, 2170), True, 'import logging as log\n'), ((2795, 2835), 'logging.info', 'log.info', (["('Wrote %d rows' % self.numRows)"], {}), "('Wrote %d rows' % self.numRows)\n", (2803, 2835), True, 'import logging as log\n'), ((3599, 3642), 'logging.info', 'log.info', (["('Validating column %s ' % colName)"], {}), "('Validating column %s ' % colName)\n", (3607, 3642), True, 'import logging as log\n'), ((10758, 10779), 'numpy.random.random', 'numpy.random.random', ([], {}), '()\n', (10777, 10779), False, 'import numpy\n'), ((10953, 10998), 'numpy.random.choice', 'numpy.random.choice', (['choices'], {'size': '(1)', 'p': 'probs'}), '(choices, size=1, p=probs)\n', (10972, 10998), False, 'import numpy\n'), ((1409, 1478), 'logging.basicConfig', 'log.basicConfig', ([], {'format': '"""%(levelname)s: %(message)s"""', 'level': 'log.DEBUG'}), "(format='%(levelname)s: %(message)s', level=log.DEBUG)\n", (1424, 1478), True, 'import logging as log\n'), ((1490, 1542), 'logging.basicConfig', 'log.basicConfig', ([], {'format': '"""%(levelname)s: %(message)s"""'}), "(format='%(levelname)s: %(message)s')\n", (1505, 1542), True, 'import logging as log\n'), ((1603, 1674), 'logging.error', 'log.error', (['"""Error: You must specify a schema file argument (iFile=...)"""'], {}), "('Error: You must specify a schema file argument (iFile=...)')\n", (1612, 1674), True, 'import logging as log\n'), ((1709, 1785), 'logging.error', 'log.error', (['"""Error: You must specify an output CSV file argument (oFile=...)"""'], {}), "('Error: You must specify an output CSV file argument (oFile=...)')\n", (1718, 1785), True, 'import logging as log\n'), ((2900, 2916), 'json.loads', 'json.loads', (['text'], {}), '(text)\n', (2910, 2916), False, 'import json\n'), ((3145, 3192), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date_text', '"""%Y%m%d"""'], {}), "(date_text, '%Y%m%d')\n", (3171, 3192), False, 'import datetime\n'), ((3293, 3346), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date_text', '"""%Y%m%d%H%M%S"""'], {}), "(date_text, '%Y%m%d%H%M%S')\n", (3319, 3346), False, 'import datetime\n'), ((3529, 3580), 'logging.error', 'log.error', (['"""Error: Missing column \'name\' attribute"""'], {}), '("Error: Missing column \'name\' attribute")\n', (3538, 3580), True, 'import logging as log\n'), ((3731, 3806), 'logging.warning', 'log.warning', (['("Error: Column \'%s\' is missing the \'type\' attribute" % colName)'], {}), '("Error: Column \'%s\' is missing the \'type\' attribute" % colName)\n', (3742, 3806), True, 'import logging as log\n'), ((3855, 3942), 'logging.error', 'log.error', (["('Error: Column %s is set to %s. It must be either:' % (colName, colType))"], {}), "('Error: Column %s is set to %s. It must be either:' % (colName,\n colType))\n", (3864, 3942), True, 'import logging as log\n'), ((3942, 4018), 'logging.error', 'log.error', (['""" 0 - Integer, 1 - Float, 2 - String, 3 - Date, 4 - DateTime"""'], {}), "(' 0 - Integer, 1 - Float, 2 - String, 3 - Date, 4 - DateTime')\n", (3951, 4018), True, 'import logging as log\n'), ((4680, 4787), 'logging.error', 'log.error', (['("Error: Column \'%s\': Distribution type must be either \'normal\' or \'uniform\'" %\n colName)'], {}), '(\n "Error: Column \'%s\': Distribution type must be either \'normal\' or \'uniform\'"\n % colName)\n', (4689, 4787), True, 'import logging as log\n'), ((4830, 4903), 'logging.error', 'log.error', (['"""Error: You can only specify one of ratio or choice, not both"""'], {}), "('Error: You can only specify one of ratio or choice, not both')\n", (4839, 4903), True, 'import logging as log\n'), ((5291, 5393), 'logging.error', 'log.error', (['("Error: Column \'%s\': Only Integer columns can be defined as incremental" %\n colName)'], {}), '(\n "Error: Column \'%s\': Only Integer columns can be defined as incremental" %\n colName)\n', (5300, 5393), True, 'import logging as log\n'), ((9299, 9412), 'logging.warning', 'log.warning', (['("Warning: Column \'%s\' is missing the \'format\' attribute. A default will be used"\n % colName)'], {}), '(\n "Warning: Column \'%s\' is missing the \'format\' attribute. A default will be used"\n % colName)\n', (9310, 9412), True, 'import logging as log\n'), ((9461, 9579), 'logging.error', 'log.error', (['("Column \'%s\': NULLS has value \'%s\' but can only have a value of \'0\' or \'1\'" %\n (colName, colNULLS))'], {}), '(\n "Column \'%s\': NULLS has value \'%s\' but can only have a value of \'0\' or \'1\'"\n % (colName, colNULLS))\n', (9470, 9579), True, 'import logging as log\n'), ((10030, 10100), 'logging.error', 'log.error', (['"""Validating Schema: You must provide a core name attribute"""'], {}), "('Validating Schema: You must provide a core name attribute')\n", (10039, 10100), True, 'import logging as log\n'), ((10173, 10238), 'logging.error', 'log.error', (['"""Validating Schema: You must have a columns attribute"""'], {}), "('Validating Schema: You must have a columns attribute')\n", (10182, 10238), True, 'import logging as log\n'), ((2234, 2292), 'logging.error', 'log.error', (['("Error creating \'%s\': %s" % (fName, e.strerror))'], {}), '("Error creating \'%s\': %s" % (fName, e.strerror))\n', (2243, 2292), True, 'import logging as log\n'), ((2946, 2979), 'logging.error', 'log.error', (["('Invalid JSON: %s' % e)"], {}), "('Invalid JSON: %s' % e)\n", (2955, 2979), True, 'import logging as log\n'), ((4968, 5073), 'logging.error', 'log.error', (['("Error: Column \'%s\': You must have a 2 part ratio [ [choices], [ratios] ]" %\n colName)'], {}), '(\n "Error: Column \'%s\': You must have a 2 part ratio [ [choices], [ratios] ]"\n % colName)\n', (4977, 5073), True, 'import logging as log\n'), ((5132, 5222), 'logging.error', 'log.error', (['("Error: Column \'%s\': Your ratio choices do not match your ratios" % colName)'], {}), '("Error: Column \'%s\': Your ratio choices do not match your ratios" %\n colName)\n', (5141, 5222), True, 'import logging as log\n'), ((7066, 7174), 'logging.error', 'log.error', (['("Error: Column \'%s\': Minimum of %s is greater than Maximum of %s" % (\n colName, colMin, colMax))'], {}), '("Error: Column \'%s\': Minimum of %s is greater than Maximum of %s" %\n (colName, colMin, colMax))\n', (7075, 7174), True, 'import logging as log\n'), ((7384, 7484), 'logging.warning', 'log.warning', (['("Warning: Column \'%s\': Minimum specified of %s, but no maximum" % (colName,\n colMin))'], {}), '("Warning: Column \'%s\': Minimum specified of %s, but no maximum" %\n (colName, colMin))\n', (7395, 7484), True, 'import logging as log\n'), ((7485, 7557), 'logging.warning', 'log.warning', (["(' --> Assigning a maximum value of %s' % column['maximum'])"], {}), "(' --> Assigning a maximum value of %s' % column['maximum'])\n", (7496, 7557), True, 'import logging as log\n'), ((7749, 7849), 'logging.warning', 'log.warning', (['("Warning: Column \'%s\': Maximum specified of %s, but no minimum" % (colName,\n colMax))'], {}), '("Warning: Column \'%s\': Maximum specified of %s, but no minimum" %\n (colName, colMax))\n', (7760, 7849), True, 'import logging as log\n'), ((7850, 7922), 'logging.warning', 'log.warning', (["(' --> Assigning a minimum value of %s' % column['minimum'])"], {}), "(' --> Assigning a minimum value of %s' % column['minimum'])\n", (7861, 7922), True, 'import logging as log\n'), ((8664, 8764), 'logging.warning', 'log.warning', (['("Warning: Column \'%s\': Minimum specified of %s, but no maximum" % (colName,\n colMin))'], {}), '("Warning: Column \'%s\': Minimum specified of %s, but no maximum" %\n (colName, colMin))\n', (8675, 8764), True, 'import logging as log\n'), ((8765, 8837), 'logging.warning', 'log.warning', (["(' --> Assigning a maximum value of %s' % column['maximum'])"], {}), "(' --> Assigning a maximum value of %s' % column['maximum'])\n", (8776, 8837), True, 'import logging as log\n'), ((8911, 9011), 'logging.warning', 'log.warning', (['("Warning: Column \'%s\': maximum specified of %s, but no minimum" % (colName,\n colMax))'], {}), '("Warning: Column \'%s\': maximum specified of %s, but no minimum" %\n (colName, colMax))\n', (8922, 9011), True, 'import logging as log\n'), ((9012, 9084), 'logging.warning', 'log.warning', (["(' --> Assigning a minimum value of %s' % column['minimum'])"], {}), "(' --> Assigning a minimum value of %s' % column['minimum'])\n", (9023, 9084), True, 'import logging as log\n'), ((9740, 9797), 'logging.error', 'log.error', (['("Error opening \'%s\': %s" % (fname, e.strerror))'], {}), '("Error opening \'%s\': %s" % (fname, e.strerror))\n', (9749, 9797), True, 'import logging as log\n'), ((10449, 10496), 'logging.error', 'log.error', (["('Error in processing column %s ' % c)"], {}), "('Error in processing column %s ' % c)\n", (10458, 10496), True, 'import logging as log\n'), ((13042, 13066), 'random.choice', 'random.choice', (['colChoice'], {}), '(colChoice)\n', (13055, 13066), False, 'import random\n'), ((13416, 13436), 'rstr.xeger', 'rstr.xeger', (['colRegex'], {}), '(colRegex)\n', (13426, 13436), False, 'import rstr\n'), ((13460, 13510), 'rstr.xeger', 'rstr.xeger', (["('[A-Z][A-Za-z 0-9]{1,%s}' % colLen)"], {}), "('[A-Z][A-Za-z 0-9]{1,%s}' % colLen)\n", (13470, 13510), False, 'import rstr\n'), ((13544, 13568), 'random.choice', 'random.choice', (['colChoice'], {}), '(colChoice)\n', (13557, 13568), False, 'import random\n'), ((5642, 5739), 'logging.error', 'log.error', (['("Error: Column \'%s\': Minimum value of \'%s\' is not an integer" % (colName,\n colMin))'], {}), '("Error: Column \'%s\': Minimum value of \'%s\' is not an integer" % (\n colName, colMin))\n', (5651, 5739), True, 'import logging as log\n'), ((5804, 5901), 'logging.error', 'log.error', (['("Error: Column \'%s\': Maximum value of \'%s\' is not an integer" % (colName,\n colMax))'], {}), '("Error: Column \'%s\': Maximum value of \'%s\' is not an integer" % (\n colName, colMax))\n', (5813, 5901), True, 'import logging as log\n'), ((6687, 6781), 'logging.error', 'log.error', (['("Error: Column \'%s\': Minimum value of \'%s\' is not a float" % (colName, colMin)\n )'], {}), '("Error: Column \'%s\': Minimum value of \'%s\' is not a float" % (\n colName, colMin))\n', (6696, 6781), True, 'import logging as log\n'), ((6836, 6930), 'logging.error', 'log.error', (['("Error: Column \'%s\': Maximum value of \'%s\' is not a float" % (colName, colMax)\n )'], {}), '("Error: Column \'%s\': Maximum value of \'%s\' is not a float" % (\n colName, colMax))\n', (6845, 6930), True, 'import logging as log\n'), ((8136, 8261), 'logging.error', 'log.error', (['("Error: Column \'%s\': Minimum value of \'%s\' is not valid date in the format YYYYMMDD"\n % (colName, colMin))'], {}), '(\n "Error: Column \'%s\': Minimum value of \'%s\' is not valid date in the format YYYYMMDD"\n % (colName, colMin))\n', (8145, 8261), True, 'import logging as log\n'), ((8448, 8583), 'logging.error', 'log.error', (['("Error: Column \'%s\': Minimum value of \'%s\' is not valid datetime in the format YYYYMMDDHHMMSS"\n % (colName, colMin))'], {}), '(\n "Error: Column \'%s\': Minimum value of \'%s\' is not valid datetime in the format YYYYMMDDHHMMSS"\n % (colName, colMin))\n', (8457, 8583), True, 'import logging as log\n'), ((12356, 12380), 'random.choice', 'random.choice', (['colChoice'], {}), '(colChoice)\n', (12369, 12380), False, 'import random\n'), ((14004, 14026), 'time.localtime', 'time.localtime', (['choice'], {}), '(choice)\n', (14018, 14026), False, 'import time\n'), ((14461, 14481), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (14475, 14481), False, 'import random\n'), ((15287, 15309), 'time.localtime', 'time.localtime', (['choice'], {}), '(choice)\n', (15301, 15309), False, 'import time\n'), ((15770, 15790), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (15784, 15790), False, 'import random\n'), ((6177, 6316), 'logging.error', 'log.error', (['("Error: Column \'%s\': Incremental row would exceed Maximum value of \'%s\' for %s rows"\n % (colName, colMax, self.numRows))'], {}), '(\n "Error: Column \'%s\': Incremental row would exceed Maximum value of \'%s\' for %s rows"\n % (colName, colMax, self.numRows))\n', (6186, 6316), True, 'import logging as log\n'), ((6364, 6477), 'logging.error', 'log.error', (['("Error: Column \'%s\': You cannot specify a choice of values in an incremental row "\n % colName)'], {}), '(\n "Error: Column \'%s\': You cannot specify a choice of values in an incremental row "\n % colName)\n', (6373, 6477), True, 'import logging as log\n'), ((14192, 14214), 'time.localtime', 'time.localtime', (['choice'], {}), '(choice)\n', (14206, 14214), False, 'import time\n'), ((14339, 14377), 'time.strptime', 'time.strptime', (["('%s' % colMin)", '"""%Y%m%d"""'], {}), "('%s' % colMin, '%Y%m%d')\n", (14352, 14377), False, 'import time\n'), ((14402, 14440), 'time.strptime', 'time.strptime', (["('%s' % colMax)", '"""%Y%m%d"""'], {}), "('%s' % colMax, '%Y%m%d')\n", (14415, 14440), False, 'import time\n'), ((14804, 14846), 'time.localtime', 'time.localtime', (['(d1 + randomVal * (d2 - d1))'], {}), '(d1 + randomVal * (d2 - d1))\n', (14818, 14846), False, 'import time\n'), ((15481, 15503), 'time.localtime', 'time.localtime', (['choice'], {}), '(choice)\n', (15495, 15503), False, 'import time\n'), ((15636, 15680), 'time.strptime', 'time.strptime', (["('%s' % colMin)", '"""%Y%m%d%H%M%S"""'], {}), "('%s' % colMin, '%Y%m%d%H%M%S')\n", (15649, 15680), False, 'import time\n'), ((15705, 15749), 'time.strptime', 'time.strptime', (["('%s' % colMax)", '"""%Y%m%d%H%M%S"""'], {}), "('%s' % colMax, '%Y%m%d%H%M%S')\n", (15718, 15749), False, 'import time\n'), ((16113, 16155), 'time.localtime', 'time.localtime', (['(d1 + randomVal * (d2 - d1))'], {}), '(d1 + randomVal * (d2 - d1))\n', (16127, 16155), False, 'import time\n'), ((12791, 12822), 'numpy.random.standard_normal', 'numpy.random.standard_normal', (['(1)'], {}), '(1)\n', (12819, 12822), False, 'import numpy\n'), ((13927, 13951), 'random.choice', 'random.choice', (['colChoice'], {}), '(colChoice)\n', (13940, 13951), False, 'import random\n'), ((14566, 14597), 'numpy.random.standard_normal', 'numpy.random.standard_normal', (['(1)'], {}), '(1)\n', (14594, 14597), False, 'import numpy\n'), ((15204, 15228), 'random.choice', 'random.choice', (['colChoice'], {}), '(colChoice)\n', (15217, 15228), False, 'import random\n'), ((15875, 15906), 'numpy.random.standard_normal', 'numpy.random.standard_normal', (['(1)'], {}), '(1)\n', (15903, 15906), False, 'import numpy\n'), ((12172, 12203), 'numpy.random.standard_normal', 'numpy.random.standard_normal', (['(1)'], {}), '(1)\n', (12200, 12203), False, 'import numpy\n')] |
# coding=utf-8
# Creation date: 28 окт. 2020
# Creation time: 19:16
# Creator: SteamPeKa
from typing import Collection, Hashable, Union
import numpy
import pytest
def assert_equal_tensors(expected: numpy.ndarray, actual: numpy.ndarray, additional_string: Union[None, str] = None):
assert expected.shape == actual.shape, f"Tensors have different shapes: {expected.shape} and {actual.shape}"
abs_difference = numpy.max(numpy.abs(expected - actual))
fail_message = (
f"Tensors are not equal with maximum abs difference of {abs_difference}.\n"
f"Expected:\n"
f"{expected}\n"
f"Actual:\n"
f"{actual}\n"
f"Difference:\n"
f"{expected - actual}"
)
if additional_string is not None:
fail_message += "\n####\n" + additional_string + "\n####"
assert abs_difference == pytest.approx(0), fail_message
def assert_collections_equal_as_sets(expected: Collection[Hashable], actual: Collection[Hashable]):
expected_set = set(expected)
actual_set = set(actual)
sym_diff = expected_set.symmetric_difference(actual_set)
assert len(sym_diff) == 0, (f"Collections are not equal.\n"
f"Expected collection-set: {expected_set} (of size {len(expected_set)})\n"
f"Actual collection-set: {actual_set} (of size {len(actual_set)})\n"
f"Symmetric difference: {sym_diff} (of length {len(sym_diff)} != 0)")
| [
"pytest.approx",
"numpy.abs"
] | [((429, 457), 'numpy.abs', 'numpy.abs', (['(expected - actual)'], {}), '(expected - actual)\n', (438, 457), False, 'import numpy\n'), ((849, 865), 'pytest.approx', 'pytest.approx', (['(0)'], {}), '(0)\n', (862, 865), False, 'import pytest\n')] |
import numpy as np
import pytest
from psyneulink.components.functions.function import BogaczEtAl, DRIFT_RATE, Exponential, Linear, THRESHOLD
from psyneulink.components.mechanisms.processing.transfermechanism import TransferMechanism
from psyneulink.components.process import Process
from psyneulink.components.projections.modulatory.controlprojection import ControlProjection
from psyneulink.components.system import System
from psyneulink.globals.keywords import ALLOCATION_SAMPLES, IDENTITY_MATRIX, MEAN, RESULT, VARIANCE, SLOPE, CONTROL
from psyneulink.globals.preferences.componentpreferenceset import ComponentPreferenceSet, kpReportOutputPref, kpVerbosePref
from psyneulink.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel
from psyneulink.library.mechanisms.processing.integrator.ddm import DDM, DECISION_VARIABLE, PROBABILITY_UPPER_THRESHOLD, RESPONSE_TIME
from psyneulink.library.subsystems.evc.evccontrolmechanism import EVCControlMechanism
def test_EVC():
# Mechanisms
Input = TransferMechanism(
name='Input',
)
Reward = TransferMechanism(
output_states=[RESULT, MEAN, VARIANCE],
name='Reward'
)
Decision = DDM(
function=BogaczEtAl(
drift_rate=(
1.0,
ControlProjection(
function=Linear,
control_signal_params={
ALLOCATION_SAMPLES: np.arange(0.1, 1.01, 0.3)
},
),
),
threshold=(
1.0,
ControlProjection(
function=Linear,
control_signal_params={
ALLOCATION_SAMPLES: np.arange(0.1, 1.01, 0.3)
},
),
),
noise=(0.5),
starting_point=(0),
t0=0.45
),
output_states=[
DECISION_VARIABLE,
RESPONSE_TIME,
PROBABILITY_UPPER_THRESHOLD
],
name='Decision',
)
# Input.prefs.paramValidationPref = False
# Reward.prefs.paramValidationPref = False
# Decision.prefs.paramValidationPref = False
# Decision.input_state.prefs.paramValidationPref = False
# for mech in [Input, Reward, Decision]:
# mech.function_object.prefs.paramValidationPref = False
# for os in mech.output_states:
# os.prefs.paramValidationPref = False
# for instate in mech.input_states:
# instate.prefs.paramValidationPref = False
# for pstate in mech._parameter_states:
# pstate.prefs.paramValidationPref = False
# Processes:
TaskExecutionProcess = Process(
# default_variable=[0],
size=1,
pathway=[(Input), IDENTITY_MATRIX, (Decision)],
name='TaskExecutionProcess',
)
RewardProcess = Process(
# default_variable=[0],
size=1,
pathway=[(Reward)],
name='RewardProcess',
)
# System:
mySystem = System(
processes=[TaskExecutionProcess, RewardProcess],
controller=EVCControlMechanism,
enable_controller=True,
monitor_for_control=[
Reward,
Decision.PROBABILITY_UPPER_THRESHOLD,
(Decision.RESPONSE_TIME, -1, 1)
],
name='EVC Test System',
)
# TaskExecutionProcess.prefs.paramValidationPref = False
# RewardProcess.prefs.paramValidationPref = False
# mySystem.prefs.paramValidationPref = False
# Stimuli
stim_list_dict = {
Input: [0.5, 0.123],
Reward: [20, 20]
}
mySystem.run(
inputs=stim_list_dict,
)
RewardPrediction = mySystem.execution_list[3]
InputPrediction = mySystem.execution_list[4]
# rearranging mySystem.results into a format that we can compare with pytest
results_array = []
for elem in mySystem.results:
elem_array = []
for inner_elem in elem:
elem_array.append(float(inner_elem))
results_array.append(elem_array)
# mySystem.results expected output properly formatted
expected_results_array = [
[10., 10.0, 0.0, -0.1, 0.48999867, 0.50499983],
[10., 10.0, 0.0, -0.4, 1.08965888, 0.51998934],
[10., 10.0, 0.0, 0.7, 2.40680493, 0.53494295],
[10., 10.0, 0.0, -1., 4.43671978, 0.549834],
[10., 10.0, 0.0, 0.1, 0.48997868, 0.51998934],
[10., 10.0, 0.0, -0.4, 1.08459402, 0.57932425],
[10., 10.0, 0.0, 0.7, 2.36033556, 0.63645254],
[10., 10.0, 0.0, 1., 4.24948962, 0.68997448],
[10., 10.0, 0.0, 0.1, 0.48993479, 0.53494295],
[10., 10.0, 0.0, 0.4, 1.07378304, 0.63645254],
[10., 10.0, 0.0, 0.7, 2.26686573, 0.72710822],
[10., 10.0, 0.0, 1., 3.90353015, 0.80218389],
[10., 10.0, 0.0, 0.1, 0.4898672, 0.549834],
[10., 10.0, 0.0, -0.4, 1.05791834, 0.68997448],
[10., 10.0, 0.0, 0.7, 2.14222978, 0.80218389],
[10., 10.0, 0.0, 1., 3.49637662, 0.88079708],
[10., 10.0, 0.0, 1., 3.49637662, 0.88079708],
[15., 15.0, 0.0, 0.1, 0.48999926, 0.50372993],
[15., 15.0, 0.0, -0.4, 1.08981011, 0.51491557],
[15., 15.0, 0.0, 0.7, 2.40822035, 0.52608629],
[15., 15.0, 0.0, 1., 4.44259627, 0.53723096],
[15., 15.0, 0.0, 0.1, 0.48998813, 0.51491557],
[15., 15.0, 0.0, 0.4, 1.0869779, 0.55939819],
[15., 15.0, 0.0, -0.7, 2.38198336, 0.60294711],
[15., 15.0, 0.0, 1., 4.33535807, 0.64492386],
[15., 15.0, 0.0, 0.1, 0.48996368, 0.52608629],
[15., 15.0, 0.0, 0.4, 1.08085171, 0.60294711],
[15., 15.0, 0.0, 0.7, 2.32712843, 0.67504223],
[15., 15.0, 0.0, 1., 4.1221271, 0.7396981],
[15., 15.0, 0.0, 0.1, 0.48992596, 0.53723096],
[15., 15.0, 0.0, -0.4, 1.07165729, 0.64492386],
[15., 15.0, 0.0, 0.7, 2.24934228, 0.7396981],
[15., 15.0, 0.0, 1., 3.84279648, 0.81637827],
[15., 15.0, 0.0, 1., 3.84279648, 0.81637827]
]
expected_output = [
# Decision Output | Second Trial
(Decision.output_states[0].value, np.array(1.0)),
# Input Prediction Output | Second Trial
(InputPrediction.output_states[0].value, np.array(0.1865)),
# RewardPrediction Output | Second Trial
(RewardPrediction.output_states[0].value, np.array(15.0)),
# --- Decision Mechanism ---
# Output State Values
# decision variable
(Decision.output_states[DECISION_VARIABLE].value, np.array([1.0])),
# response time
(Decision.output_states[RESPONSE_TIME].value, np.array([3.84279648])),
# upper bound
(Decision.output_states[PROBABILITY_UPPER_THRESHOLD].value, np.array([0.81637827])),
# lower bound
# (round(float(Decision.output_states['DDM_probability_lowerBound'].value),3), 0.184),
# --- Reward Mechanism ---
# Output State Values
# transfer mean
(Reward.output_states[RESULT].value, np.array([15.])),
# transfer_result
(Reward.output_states[MEAN].value, np.array(15.0)),
# transfer variance
(Reward.output_states[VARIANCE].value, np.array(0.0)),
# System Results Array
# (all intermediate output values of system)
(results_array, expected_results_array)
]
for i in range(len(expected_output)):
val, expected = expected_output[i]
np.testing.assert_allclose(val, expected, atol=1e-08, err_msg='Failed on expected_output[{0}]'.format(i))
def test_EVC_gratton():
def test_search_function(controller=None, **kwargs):
result = np.array(controller.allocationPolicy).reshape(len(controller.allocationPolicy), -1)
return result
def test_outcome_function(**kwargs):
result = np.array([0])
return result
# Preferences:
mechanism_prefs = ComponentPreferenceSet(
prefs={
kpVerbosePref: PreferenceEntry(False, PreferenceLevel.INSTANCE),
kpReportOutputPref: PreferenceEntry(False, PreferenceLevel.INSTANCE)
}
)
process_prefs = ComponentPreferenceSet(
reportOutput_pref=PreferenceEntry(False, PreferenceLevel.INSTANCE),
verbose_pref=PreferenceEntry(True, PreferenceLevel.INSTANCE)
)
# Control Parameters
signalSearchRange = np.arange(1.0, 2.0, 0.2)
# Stimulus Mechanisms
Target_Stim = TransferMechanism(name='Target Stimulus', function=Linear(slope=0.3324))
Flanker_Stim = TransferMechanism(name='Flanker Stimulus', function=Linear(slope=0.3545221843))
# Processing Mechanisms (Control)
Target_Rep = TransferMechanism(
name='Target Representation',
function=Linear(
slope=(
1.0,
ControlProjection(
function=Linear,
control_signal_params={ALLOCATION_SAMPLES: signalSearchRange}
)
)
),
prefs=mechanism_prefs
)
Flanker_Rep = TransferMechanism(
name='Flanker Representation',
function=Linear(
slope=(
1.0,
ControlProjection(
function=Linear,
control_signal_params={ALLOCATION_SAMPLES: signalSearchRange}
)
)
),
prefs=mechanism_prefs
)
# Processing Mechanism (Automatic)
Automatic_Component = TransferMechanism(
name='Automatic Component',
function=Linear(slope=(1.0)),
prefs=mechanism_prefs
)
# Decision Mechanisms
Decision = DDM(
function=BogaczEtAl(
drift_rate=(1.0),
threshold=(0.2645),
noise=(0.5),
starting_point=(0),
t0=0.15
),
prefs=mechanism_prefs,
name='Decision',
output_states=[
DECISION_VARIABLE,
RESPONSE_TIME,
PROBABILITY_UPPER_THRESHOLD
],
)
# Outcome Mechanisms:
Reward = TransferMechanism(name='Reward')
# Processes:
TargetControlProcess = Process(
default_variable=[0],
pathway=[Target_Stim, Target_Rep, Decision],
prefs=process_prefs,
name='Target Control Process'
)
FlankerControlProcess = Process(
default_variable=[0],
pathway=[Flanker_Stim, Flanker_Rep, Decision],
prefs=process_prefs,
name='Flanker Control Process'
)
TargetAutomaticProcess = Process(
default_variable=[0],
pathway=[Target_Stim, Automatic_Component, Decision],
prefs=process_prefs,
name='Target Automatic Process'
)
FlankerAutomaticProcess = Process(
default_variable=[0],
pathway=[Flanker_Stim, Automatic_Component, Decision],
prefs=process_prefs,
name='Flanker1 Automatic Process'
)
RewardProcess = Process(
default_variable=[0],
pathway=[Reward],
prefs=process_prefs,
name='RewardProcess'
)
# System:
mySystem = System(
processes=[
TargetControlProcess,
FlankerControlProcess,
TargetAutomaticProcess,
FlankerAutomaticProcess,
RewardProcess
],
controller=EVCControlMechanism,
enable_controller=True,
monitor_for_control=[
Reward,
(Decision.PROBABILITY_UPPER_THRESHOLD, 1, -1)
],
# monitor_for_control=[Reward, DDM_PROBABILITY_UPPER_THRESHOLD, (DDM_RESPONSE_TIME, -1, 1)],
name='EVC Gratton System'
)
# Show characteristics of system:
mySystem.show()
mySystem.controller.show()
# mySystem.show_graph(show_control=True)
# configure EVC components
mySystem.controller.control_signals[0].intensity_cost_function = Exponential(rate=0.8046).function
mySystem.controller.control_signals[1].intensity_cost_function = Exponential(rate=0.8046).function
for mech in mySystem.controller.prediction_mechanisms.mechanisms:
if mech.name == 'Flanker Stimulus Prediction Mechanism' or mech.name == 'Target Stimulus Prediction Mechanism':
# when you find a key mechanism (transfer mechanism) with the correct name, print its name
print(mech.name)
mech.function_object.rate = 1.0
if 'Reward' in mech.name:
print(mech.name)
mech.function_object.rate = 1.0
# mySystem.controller.prediction_mechanisms[mech].parameterStates['rate'].base_value = 1.0
print('new rate of integration mechanisms before System execution:')
# for mech in mySystem.controller.prediction_mechanisms.keys():
for mech in mySystem.controller.prediction_mechanisms.mechanisms:
print(mech.name)
print(mech.function_object.rate)
print('----')
# generate stimulus environment
nTrials = 3
targetFeatures = [1, 1, 1]
flankerFeatures = [1, -1, 1] # for full simulation: flankerFeatures = [-1,1]
reward = [100, 100, 100]
targetInputList = targetFeatures
flankerInputList = flankerFeatures
rewardList = reward
# targetInputList = np.random.choice(targetFeatures, nTrials).tolist()
# flankerInputList = np.random.choice(flankerFeatures, nTrials).tolist()
# rewardList = (np.ones(nTrials) * reward).tolist() #np.random.choice(reward, nTrials).tolist()
stim_list_dict = {Target_Stim: targetInputList,
Flanker_Stim: flankerInputList,
Reward: rewardList}
mySystem.controller.reportOutputPref = True
expected_results_array = [
0.2645, 0.32257753, 0.94819408, 100.,
0.2645, 0.31663196, 0.95508757, 100.,
0.2645, 0.31093566, 0.96110142, 100.,
0.2645, 0.30548947, 0.96633839, 100.,
0.2645, 0.30029103, 0.97089165, 100.,
0.2645, 0.3169957, 0.95468427, 100.,
0.2645, 0.31128378, 0.9607499, 100.,
0.2645, 0.30582202, 0.96603252, 100.,
0.2645, 0.30060824, 0.9706259, 100.,
0.2645, 0.29563774, 0.97461444, 100.,
0.2645, 0.31163288, 0.96039533, 100.,
0.2645, 0.30615555, 0.96572397, 100.,
0.2645, 0.30092641, 0.97035779, 100.,
0.2645, 0.2959409, 0.97438178, 100.,
0.2645, 0.29119255, 0.97787196, 100.,
0.2645, 0.30649004, 0.96541272, 100.,
0.2645, 0.30124552, 0.97008732, 100.,
0.2645, 0.29624499, 0.97414704, 100.,
0.2645, 0.29148205, 0.97766847, 100.,
0.2645, 0.28694892, 0.98071974, 100.,
0.2645, 0.30156558, 0.96981445, 100.,
0.2645, 0.29654999, 0.97391021, 100.,
0.2645, 0.29177245, 0.97746315, 100.,
0.2645, 0.28722523, 0.98054192, 100.,
0.2645, 0.28289958, 0.98320731, 100.,
0.2645, 0.28289958, 0.98320731, 100.,
0.2645, 0.42963678, 0.47661181, 100.,
0.2645, 0.42846471, 0.43938586, 100.,
-0.2645, 0.42628176, 0.40282965, 100.,
0.2645, 0.42314468, 0.36732207, 100.,
-0.2645, 0.41913221, 0.333198, 100.,
0.2645, 0.42978939, 0.51176048, 100.,
0.2645, 0.42959394, 0.47427693, 100.,
-0.2645, 0.4283576, 0.43708106, 100.,
0.2645, 0.4261132, 0.40057958, 100.,
-0.2645, 0.422919, 0.36514906, 100.,
0.2645, 0.42902209, 0.54679323, 100.,
0.2645, 0.42980788, 0.50942101, 100.,
-0.2645, 0.42954704, 0.47194318, 100.,
-0.2645, 0.42824656, 0.43477897, 100.,
0.2645, 0.42594094, 0.3983337, 100.,
-0.2645, 0.42735293, 0.58136855, 100.,
-0.2645, 0.42910149, 0.54447221, 100.,
0.2645, 0.42982229, 0.50708112, 100.,
-0.2645, 0.42949608, 0.46961065, 100.,
-0.2645, 0.42813159, 0.43247968, 100.,
-0.2645, 0.42482049, 0.61516258, 100.,
0.2645, 0.42749136, 0.57908829, 100.,
0.2645, 0.42917687, 0.54214925, 100.,
-0.2645, 0.42983261, 0.50474093, 100.,
-0.2645, 0.42944107, 0.46727945, 100.,
-0.2645, 0.42944107, 0.46727945, 100.,
0.2645, 0.32257753, 0.94819408, 100.,
0.2645, 0.31663196, 0.95508757, 100.,
0.2645, 0.31093566, 0.96110142, 100.,
0.2645, 0.30548947, 0.96633839, 100.,
0.2645, 0.30029103, 0.97089165, 100.,
0.2645, 0.3169957, 0.95468427, 100.,
0.2645, 0.31128378, 0.9607499, 100.,
0.2645, 0.30582202, 0.96603252, 100.,
0.2645, 0.30060824, 0.9706259, 100.,
0.2645, 0.29563774, 0.97461444, 100.,
0.2645, 0.31163288, 0.96039533, 100.,
0.2645, 0.30615555, 0.96572397, 100.,
0.2645, 0.30092641, 0.97035779, 100.,
0.2645, 0.2959409, 0.97438178, 100.,
0.2645, 0.29119255, 0.97787196, 100.,
0.2645, 0.30649004, 0.96541272, 100.,
0.2645, 0.30124552, 0.97008732, 100.,
0.2645, 0.29624499, 0.97414704, 100.,
0.2645, 0.29148205, 0.97766847, 100.,
0.2645, 0.28694892, 0.98071974, 100.,
0.2645, 0.30156558, 0.96981445, 100.,
0.2645, 0.29654999, 0.97391021, 100.,
0.2645, 0.29177245, 0.97746315, 100.,
0.2645, 0.28722523, 0.98054192, 100.,
0.2645, 0.28289958, 0.98320731, 100.,
0.2645, 0.28289958, 0.98320731, 100.,
]
Flanker_Rep.set_log_conditions((SLOPE, CONTROL))
mySystem.run(
num_trials=nTrials,
inputs=stim_list_dict,
)
np.testing.assert_allclose(
pytest.helpers.expand_np_ndarray(mySystem.results),
expected_results_array,
atol=1e-08,
verbose=True,
)
# log_val = Flanker_Rep.log.nparray(SLOPE, header=False)
# trial_vals = [[1], [2], [3], [4], [5],
# [6], [7], [8], [9], [10],
# [11], [12], [13], [14], [15],
# [16], [17], [18], [19], [20],
# [21], [22], [23], [24], [25],
# [27], [28], [29], [30], [31],
# [32], [33], [34], [35], [36],
# [37], [38], [39], [40], [41],
# [42], [43], [44], [45], [46],
# [47], [48], [49], [50], [51],
# [53], [54], [55], [56], [57],
# [58], [59], [60], [61], [62],
# [63], [64], [65], [66], [67],
# [68], [69], [70], [71], [72],
# [73], [74], [75], [76], [77]]
# slope_vals = [[1.0], [1.2], [1.4], [1.6], [1.8],
# [1.0], [1.2], [1.4], [1.6], [1.8],
# [1.0], [1.2], [1.4], [1.6], [1.8],
# [1.0], [1.2], [1.4], [1.6], [1.8],
# [1.0], [1.2], [1.4], [1.6], [1.8],
# [1.0], [1.2], [1.4], [1.6], [1.8],
# [1.0], [1.2], [1.4], [1.6], [1.8],
# [1.0], [1.2], [1.4], [1.6], [1.8],
# [1.0], [1.2], [1.4], [1.6], [1.8],
# [1.0], [1.2], [1.4], [1.6], [1.8],
# [1.0], [1.2], [1.4], [1.6], [1.8],
# [1.0], [1.2], [1.4], [1.6], [1.8],
# [1.0], [1.2], [1.4], [1.6], [1.8],
# [1.0], [1.2], [1.4], [1.6], [1.8],
# [1.0], [1.2], [1.4], [1.6], [1.8]]
# np.testing.assert_allclose(pytest.helpers.expand_np_nd(log_val[1][0:]), trial_vals, atol=1e-08, verbose=True)
# np.testing.assert_allclose(pytest.helpers.expand_np_nd(log_val[3][0:]), slope_vals, atol=1e-08, verbose=True)
def test_laming_validation_specify_control_signals():
# Mechanisms:
Input = TransferMechanism(
name='Input'
)
Reward = TransferMechanism(
name='Reward',
output_states=[RESULT, MEAN, VARIANCE]
)
Decision = DDM(
function=BogaczEtAl(
drift_rate=1.0,
threshold=1.0,
noise=0.5,
starting_point=0,
t0=0.45
),
output_states=[
DECISION_VARIABLE,
RESPONSE_TIME,
PROBABILITY_UPPER_THRESHOLD
],
name='Decision'
)
# Processes:
TaskExecutionProcess = Process(
default_variable=[0],
pathway=[Input, IDENTITY_MATRIX, Decision],
name='TaskExecutionProcess'
)
RewardProcess = Process(
default_variable=[0],
pathway=[Reward],
name='RewardProcess'
)
# System:
mySystem = System(
processes=[TaskExecutionProcess, RewardProcess],
controller=EVCControlMechanism,
enable_controller=True,
monitor_for_control=[
Reward,
Decision.PROBABILITY_UPPER_THRESHOLD,
(Decision.RESPONSE_TIME, -1, 1)
],
control_signals=[
(DRIFT_RATE, Decision),
(THRESHOLD, Decision)
],
name='EVC Test System'
)
# Stimulus
stim_list_dict = {
Input: [0.5, 0.123],
Reward: [20, 20]
}
# Run system:
mySystem.run(
inputs=stim_list_dict
)
RewardPrediction = mySystem.execution_list[3]
InputPrediction = mySystem.execution_list[4]
# rearranging mySystem.results into a format that we can compare with pytest
results_array = []
for elem in mySystem.results:
elem_array = []
for inner_elem in elem:
elem_array.append(float(inner_elem))
results_array.append(elem_array)
# mySystem.results expected output properly formatted
expected_results_array = [
[10., 10.0, 0.0, -0.1, 0.48999867, 0.50499983],
[10., 10.0, 0.0, -0.4, 1.08965888, 0.51998934],
[10., 10.0, 0.0, 0.7, 2.40680493, 0.53494295],
[10., 10.0, 0.0, -1., 4.43671978, 0.549834],
[10., 10.0, 0.0, 0.1, 0.48997868, 0.51998934],
[10., 10.0, 0.0, -0.4, 1.08459402, 0.57932425],
[10., 10.0, 0.0, 0.7, 2.36033556, 0.63645254],
[10., 10.0, 0.0, 1., 4.24948962, 0.68997448],
[10., 10.0, 0.0, 0.1, 0.48993479, 0.53494295],
[10., 10.0, 0.0, 0.4, 1.07378304, 0.63645254],
[10., 10.0, 0.0, 0.7, 2.26686573, 0.72710822],
[10., 10.0, 0.0, 1., 3.90353015, 0.80218389],
[10., 10.0, 0.0, 0.1, 0.4898672, 0.549834],
[10., 10.0, 0.0, -0.4, 1.05791834, 0.68997448],
[10., 10.0, 0.0, 0.7, 2.14222978, 0.80218389],
[10., 10.0, 0.0, 1., 3.49637662, 0.88079708],
[10., 10.0, 0.0, 1., 3.49637662, 0.88079708],
[15., 15.0, 0.0, 0.1, 0.48999926, 0.50372993],
[15., 15.0, 0.0, -0.4, 1.08981011, 0.51491557],
[15., 15.0, 0.0, 0.7, 2.40822035, 0.52608629],
[15., 15.0, 0.0, 1., 4.44259627, 0.53723096],
[15., 15.0, 0.0, 0.1, 0.48998813, 0.51491557],
[15., 15.0, 0.0, 0.4, 1.0869779, 0.55939819],
[15., 15.0, 0.0, -0.7, 2.38198336, 0.60294711],
[15., 15.0, 0.0, 1., 4.33535807, 0.64492386],
[15., 15.0, 0.0, 0.1, 0.48996368, 0.52608629],
[15., 15.0, 0.0, 0.4, 1.08085171, 0.60294711],
[15., 15.0, 0.0, 0.7, 2.32712843, 0.67504223],
[15., 15.0, 0.0, 1., 4.1221271, 0.7396981],
[15., 15.0, 0.0, 0.1, 0.48992596, 0.53723096],
[15., 15.0, 0.0, -0.4, 1.07165729, 0.64492386],
[15., 15.0, 0.0, 0.7, 2.24934228, 0.7396981],
[15., 15.0, 0.0, 1., 3.84279648, 0.81637827],
[15., 15.0, 0.0, 1., 3.84279648, 0.81637827]
]
expected_output = [
# Decision Output | Second Trial
(Decision.output_states[0].value, np.array(1.0)),
# Input Prediction Output | Second Trial
(InputPrediction.output_states[0].value, np.array(0.1865)),
# RewardPrediction Output | Second Trial
(RewardPrediction.output_states[0].value, np.array(15.0)),
# --- Decision Mechanism ---
# ControlSignal Values
# drift rate
# ALT: float(Decision._parameter_states[DRIFT_RATE].value
# (mySystem.controller.control_signals[0].value, np.array(1.0)),
# # threshold
#
# # ALT: float(Decision._parameter_states[THRESHOLD].value
# (mySystem.controller.control_signals[1].value, np.array(1.0)),
# Output State Values
# decision variable
(Decision.output_states[DECISION_VARIABLE].value, np.array([1.0])),
# response time
(Decision.output_states[RESPONSE_TIME].value, np.array([3.84279648])),
# upper bound
(Decision.output_states[PROBABILITY_UPPER_THRESHOLD].value, np.array([0.81637827])),
# lower bound
# (round(float(Decision.output_states['DDM_probability_lowerBound'].value),3), 0.184),
# --- Reward Mechanism ---
# Output State Values
# transfer mean
(Reward.output_states[RESULT].value, np.array([15.])),
# transfer_result
(Reward.output_states[MEAN].value, np.array(15.0)),
# transfer variance
(Reward.output_states[VARIANCE].value, np.array(0.0)),
# System Results Array
# (all intermediate output values of system)
(results_array, expected_results_array)
]
for i in range(len(expected_output)):
val, expected = expected_output[i]
np.testing.assert_allclose(val, expected, atol=1e-08, err_msg='Failed on expected_output[{0}]'.format(i))
np.testing.assert_almost_equal(
Decision._parameter_states[DRIFT_RATE].value,
Decision._parameter_states[DRIFT_RATE].mod_afferents[0].value * Decision._parameter_states[DRIFT_RATE].function_object.value
)
np.testing.assert_almost_equal(
Decision._parameter_states[THRESHOLD].value,
Decision._parameter_states[THRESHOLD].mod_afferents[0].value * Decision._parameter_states[THRESHOLD].function_object.value
)
| [
"psyneulink.components.functions.function.Linear",
"pytest.helpers.expand_np_ndarray",
"psyneulink.components.system.System",
"psyneulink.components.projections.modulatory.controlprojection.ControlProjection",
"numpy.testing.assert_almost_equal",
"psyneulink.components.functions.function.BogaczEtAl",
"n... | [((1023, 1054), 'psyneulink.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""Input"""'}), "(name='Input')\n", (1040, 1054), False, 'from psyneulink.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((1083, 1155), 'psyneulink.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'output_states': '[RESULT, MEAN, VARIANCE]', 'name': '"""Reward"""'}), "(output_states=[RESULT, MEAN, VARIANCE], name='Reward')\n", (1100, 1155), False, 'from psyneulink.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((2709, 2802), 'psyneulink.components.process.Process', 'Process', ([], {'size': '(1)', 'pathway': '[Input, IDENTITY_MATRIX, Decision]', 'name': '"""TaskExecutionProcess"""'}), "(size=1, pathway=[Input, IDENTITY_MATRIX, Decision], name=\n 'TaskExecutionProcess')\n", (2716, 2802), False, 'from psyneulink.components.process import Process\n'), ((2886, 2941), 'psyneulink.components.process.Process', 'Process', ([], {'size': '(1)', 'pathway': '[Reward]', 'name': '"""RewardProcess"""'}), "(size=1, pathway=[Reward], name='RewardProcess')\n", (2893, 2941), False, 'from psyneulink.components.process import Process\n'), ((3037, 3288), 'psyneulink.components.system.System', 'System', ([], {'processes': '[TaskExecutionProcess, RewardProcess]', 'controller': 'EVCControlMechanism', 'enable_controller': '(True)', 'monitor_for_control': '[Reward, Decision.PROBABILITY_UPPER_THRESHOLD, (Decision.RESPONSE_TIME, -1, 1)]', 'name': '"""EVC Test System"""'}), "(processes=[TaskExecutionProcess, RewardProcess], controller=\n EVCControlMechanism, enable_controller=True, monitor_for_control=[\n Reward, Decision.PROBABILITY_UPPER_THRESHOLD, (Decision.RESPONSE_TIME, \n -1, 1)], name='EVC Test System')\n", (3043, 3288), False, 'from psyneulink.components.system import System\n'), ((8414, 8438), 'numpy.arange', 'np.arange', (['(1.0)', '(2.0)', '(0.2)'], {}), '(1.0, 2.0, 0.2)\n', (8423, 8438), True, 'import numpy as np\n'), ((10099, 10131), 'psyneulink.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""Reward"""'}), "(name='Reward')\n", (10116, 10131), False, 'from psyneulink.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((10177, 10307), 'psyneulink.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[Target_Stim, Target_Rep, Decision]', 'prefs': 'process_prefs', 'name': '"""Target Control Process"""'}), "(default_variable=[0], pathway=[Target_Stim, Target_Rep, Decision],\n prefs=process_prefs, name='Target Control Process')\n", (10184, 10307), False, 'from psyneulink.components.process import Process\n'), ((10371, 10504), 'psyneulink.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[Flanker_Stim, Flanker_Rep, Decision]', 'prefs': 'process_prefs', 'name': '"""Flanker Control Process"""'}), "(default_variable=[0], pathway=[Flanker_Stim, Flanker_Rep, Decision],\n prefs=process_prefs, name='Flanker Control Process')\n", (10378, 10504), False, 'from psyneulink.components.process import Process\n'), ((10569, 10710), 'psyneulink.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[Target_Stim, Automatic_Component, Decision]', 'prefs': 'process_prefs', 'name': '"""Target Automatic Process"""'}), "(default_variable=[0], pathway=[Target_Stim, Automatic_Component,\n Decision], prefs=process_prefs, name='Target Automatic Process')\n", (10576, 10710), False, 'from psyneulink.components.process import Process\n'), ((10776, 10920), 'psyneulink.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[Flanker_Stim, Automatic_Component, Decision]', 'prefs': 'process_prefs', 'name': '"""Flanker1 Automatic Process"""'}), "(default_variable=[0], pathway=[Flanker_Stim, Automatic_Component,\n Decision], prefs=process_prefs, name='Flanker1 Automatic Process')\n", (10783, 10920), False, 'from psyneulink.components.process import Process\n'), ((10976, 11071), 'psyneulink.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[Reward]', 'prefs': 'process_prefs', 'name': '"""RewardProcess"""'}), "(default_variable=[0], pathway=[Reward], prefs=process_prefs, name=\n 'RewardProcess')\n", (10983, 11071), False, 'from psyneulink.components.process import Process\n'), ((11135, 11439), 'psyneulink.components.system.System', 'System', ([], {'processes': '[TargetControlProcess, FlankerControlProcess, TargetAutomaticProcess,\n FlankerAutomaticProcess, RewardProcess]', 'controller': 'EVCControlMechanism', 'enable_controller': '(True)', 'monitor_for_control': '[Reward, (Decision.PROBABILITY_UPPER_THRESHOLD, 1, -1)]', 'name': '"""EVC Gratton System"""'}), "(processes=[TargetControlProcess, FlankerControlProcess,\n TargetAutomaticProcess, FlankerAutomaticProcess, RewardProcess],\n controller=EVCControlMechanism, enable_controller=True,\n monitor_for_control=[Reward, (Decision.PROBABILITY_UPPER_THRESHOLD, 1, \n -1)], name='EVC Gratton System')\n", (11141, 11439), False, 'from psyneulink.components.system import System\n'), ((19712, 19743), 'psyneulink.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""Input"""'}), "(name='Input')\n", (19729, 19743), False, 'from psyneulink.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((19771, 19843), 'psyneulink.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""Reward"""', 'output_states': '[RESULT, MEAN, VARIANCE]'}), "(name='Reward', output_states=[RESULT, MEAN, VARIANCE])\n", (19788, 19843), False, 'from psyneulink.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((20262, 20368), 'psyneulink.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[Input, IDENTITY_MATRIX, Decision]', 'name': '"""TaskExecutionProcess"""'}), "(default_variable=[0], pathway=[Input, IDENTITY_MATRIX, Decision],\n name='TaskExecutionProcess')\n", (20269, 20368), False, 'from psyneulink.components.process import Process\n'), ((20416, 20485), 'psyneulink.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[Reward]', 'name': '"""RewardProcess"""'}), "(default_variable=[0], pathway=[Reward], name='RewardProcess')\n", (20423, 20485), False, 'from psyneulink.components.process import Process\n'), ((20546, 20867), 'psyneulink.components.system.System', 'System', ([], {'processes': '[TaskExecutionProcess, RewardProcess]', 'controller': 'EVCControlMechanism', 'enable_controller': '(True)', 'monitor_for_control': '[Reward, Decision.PROBABILITY_UPPER_THRESHOLD, (Decision.RESPONSE_TIME, -1, 1)]', 'control_signals': '[(DRIFT_RATE, Decision), (THRESHOLD, Decision)]', 'name': '"""EVC Test System"""'}), "(processes=[TaskExecutionProcess, RewardProcess], controller=\n EVCControlMechanism, enable_controller=True, monitor_for_control=[\n Reward, Decision.PROBABILITY_UPPER_THRESHOLD, (Decision.RESPONSE_TIME, \n -1, 1)], control_signals=[(DRIFT_RATE, Decision), (THRESHOLD, Decision)\n ], name='EVC Test System')\n", (20552, 20867), False, 'from psyneulink.components.system import System\n'), ((25461, 25671), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Decision._parameter_states[DRIFT_RATE].value', '(Decision._parameter_states[DRIFT_RATE].mod_afferents[0].value * Decision.\n _parameter_states[DRIFT_RATE].function_object.value)'], {}), '(Decision._parameter_states[DRIFT_RATE].value,\n Decision._parameter_states[DRIFT_RATE].mod_afferents[0].value *\n Decision._parameter_states[DRIFT_RATE].function_object.value)\n', (25491, 25671), True, 'import numpy as np\n'), ((25690, 25898), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['Decision._parameter_states[THRESHOLD].value', '(Decision._parameter_states[THRESHOLD].mod_afferents[0].value * Decision.\n _parameter_states[THRESHOLD].function_object.value)'], {}), '(Decision._parameter_states[THRESHOLD].value,\n Decision._parameter_states[THRESHOLD].mod_afferents[0].value * Decision\n ._parameter_states[THRESHOLD].function_object.value)\n', (25720, 25898), True, 'import numpy as np\n'), ((7876, 7889), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (7884, 7889), True, 'import numpy as np\n'), ((11911, 11935), 'psyneulink.components.functions.function.Exponential', 'Exponential', ([], {'rate': '(0.8046)'}), '(rate=0.8046)\n', (11922, 11935), False, 'from psyneulink.components.functions.function import BogaczEtAl, DRIFT_RATE, Exponential, Linear, THRESHOLD\n'), ((12014, 12038), 'psyneulink.components.functions.function.Exponential', 'Exponential', ([], {'rate': '(0.8046)'}), '(rate=0.8046)\n', (12025, 12038), False, 'from psyneulink.components.functions.function import BogaczEtAl, DRIFT_RATE, Exponential, Linear, THRESHOLD\n'), ((17634, 17684), 'pytest.helpers.expand_np_ndarray', 'pytest.helpers.expand_np_ndarray', (['mySystem.results'], {}), '(mySystem.results)\n', (17666, 17684), False, 'import pytest\n'), ((6133, 6146), 'numpy.array', 'np.array', (['(1.0)'], {}), '(1.0)\n', (6141, 6146), True, 'import numpy as np\n'), ((6248, 6264), 'numpy.array', 'np.array', (['(0.1865)'], {}), '(0.1865)\n', (6256, 6264), True, 'import numpy as np\n'), ((6367, 6381), 'numpy.array', 'np.array', (['(15.0)'], {}), '(15.0)\n', (6375, 6381), True, 'import numpy as np\n'), ((6547, 6562), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (6555, 6562), True, 'import numpy as np\n'), ((6649, 6671), 'numpy.array', 'np.array', (['[3.84279648]'], {}), '([3.84279648])\n', (6657, 6671), True, 'import numpy as np\n'), ((6770, 6792), 'numpy.array', 'np.array', (['[0.81637827]'], {}), '([0.81637827])\n', (6778, 6792), True, 'import numpy as np\n'), ((7062, 7078), 'numpy.array', 'np.array', (['[15.0]'], {}), '([15.0])\n', (7070, 7078), True, 'import numpy as np\n'), ((7155, 7169), 'numpy.array', 'np.array', (['(15.0)'], {}), '(15.0)\n', (7163, 7169), True, 'import numpy as np\n'), ((7253, 7266), 'numpy.array', 'np.array', (['(0.0)'], {}), '(0.0)\n', (7261, 7266), True, 'import numpy as np\n'), ((8239, 8287), 'psyneulink.globals.preferences.preferenceset.PreferenceEntry', 'PreferenceEntry', (['(False)', 'PreferenceLevel.INSTANCE'], {}), '(False, PreferenceLevel.INSTANCE)\n', (8254, 8287), False, 'from psyneulink.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel\n'), ((8310, 8357), 'psyneulink.globals.preferences.preferenceset.PreferenceEntry', 'PreferenceEntry', (['(True)', 'PreferenceLevel.INSTANCE'], {}), '(True, PreferenceLevel.INSTANCE)\n', (8325, 8357), False, 'from psyneulink.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel\n'), ((8535, 8555), 'psyneulink.components.functions.function.Linear', 'Linear', ([], {'slope': '(0.3324)'}), '(slope=0.3324)\n', (8541, 8555), False, 'from psyneulink.components.functions.function import BogaczEtAl, DRIFT_RATE, Exponential, Linear, THRESHOLD\n'), ((8628, 8654), 'psyneulink.components.functions.function.Linear', 'Linear', ([], {'slope': '(0.3545221843)'}), '(slope=0.3545221843)\n', (8634, 8654), False, 'from psyneulink.components.functions.function import BogaczEtAl, DRIFT_RATE, Exponential, Linear, THRESHOLD\n'), ((9581, 9598), 'psyneulink.components.functions.function.Linear', 'Linear', ([], {'slope': '(1.0)'}), '(slope=1.0)\n', (9587, 9598), False, 'from psyneulink.components.functions.function import BogaczEtAl, DRIFT_RATE, Exponential, Linear, THRESHOLD\n'), ((9702, 9788), 'psyneulink.components.functions.function.BogaczEtAl', 'BogaczEtAl', ([], {'drift_rate': '(1.0)', 'threshold': '(0.2645)', 'noise': '(0.5)', 'starting_point': '(0)', 't0': '(0.15)'}), '(drift_rate=1.0, threshold=0.2645, noise=0.5, starting_point=0,\n t0=0.15)\n', (9712, 9788), False, 'from psyneulink.components.functions.function import BogaczEtAl, DRIFT_RATE, Exponential, Linear, THRESHOLD\n'), ((19903, 19982), 'psyneulink.components.functions.function.BogaczEtAl', 'BogaczEtAl', ([], {'drift_rate': '(1.0)', 'threshold': '(1.0)', 'noise': '(0.5)', 'starting_point': '(0)', 't0': '(0.45)'}), '(drift_rate=1.0, threshold=1.0, noise=0.5, starting_point=0, t0=0.45)\n', (19913, 19982), False, 'from psyneulink.components.functions.function import BogaczEtAl, DRIFT_RATE, Exponential, Linear, THRESHOLD\n'), ((23600, 23613), 'numpy.array', 'np.array', (['(1.0)'], {}), '(1.0)\n', (23608, 23613), True, 'import numpy as np\n'), ((23715, 23731), 'numpy.array', 'np.array', (['(0.1865)'], {}), '(0.1865)\n', (23723, 23731), True, 'import numpy as np\n'), ((23834, 23848), 'numpy.array', 'np.array', (['(15.0)'], {}), '(15.0)\n', (23842, 23848), True, 'import numpy as np\n'), ((24393, 24408), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (24401, 24408), True, 'import numpy as np\n'), ((24495, 24517), 'numpy.array', 'np.array', (['[3.84279648]'], {}), '([3.84279648])\n', (24503, 24517), True, 'import numpy as np\n'), ((24616, 24638), 'numpy.array', 'np.array', (['[0.81637827]'], {}), '([0.81637827])\n', (24624, 24638), True, 'import numpy as np\n'), ((24908, 24924), 'numpy.array', 'np.array', (['[15.0]'], {}), '([15.0])\n', (24916, 24924), True, 'import numpy as np\n'), ((25001, 25015), 'numpy.array', 'np.array', (['(15.0)'], {}), '(15.0)\n', (25009, 25015), True, 'import numpy as np\n'), ((25099, 25112), 'numpy.array', 'np.array', (['(0.0)'], {}), '(0.0)\n', (25107, 25112), True, 'import numpy as np\n'), ((7711, 7748), 'numpy.array', 'np.array', (['controller.allocationPolicy'], {}), '(controller.allocationPolicy)\n', (7719, 7748), True, 'import numpy as np\n'), ((8021, 8069), 'psyneulink.globals.preferences.preferenceset.PreferenceEntry', 'PreferenceEntry', (['(False)', 'PreferenceLevel.INSTANCE'], {}), '(False, PreferenceLevel.INSTANCE)\n', (8036, 8069), False, 'from psyneulink.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel\n'), ((8103, 8151), 'psyneulink.globals.preferences.preferenceset.PreferenceEntry', 'PreferenceEntry', (['(False)', 'PreferenceLevel.INSTANCE'], {}), '(False, PreferenceLevel.INSTANCE)\n', (8118, 8151), False, 'from psyneulink.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel\n'), ((8851, 8953), 'psyneulink.components.projections.modulatory.controlprojection.ControlProjection', 'ControlProjection', ([], {'function': 'Linear', 'control_signal_params': '{ALLOCATION_SAMPLES: signalSearchRange}'}), '(function=Linear, control_signal_params={\n ALLOCATION_SAMPLES: signalSearchRange})\n', (8868, 8953), False, 'from psyneulink.components.projections.modulatory.controlprojection import ControlProjection\n'), ((9226, 9328), 'psyneulink.components.projections.modulatory.controlprojection.ControlProjection', 'ControlProjection', ([], {'function': 'Linear', 'control_signal_params': '{ALLOCATION_SAMPLES: signalSearchRange}'}), '(function=Linear, control_signal_params={\n ALLOCATION_SAMPLES: signalSearchRange})\n', (9243, 9328), False, 'from psyneulink.components.projections.modulatory.controlprojection import ControlProjection\n'), ((1433, 1458), 'numpy.arange', 'np.arange', (['(0.1)', '(1.01)', '(0.3)'], {}), '(0.1, 1.01, 0.3)\n', (1442, 1458), True, 'import numpy as np\n'), ((1721, 1746), 'numpy.arange', 'np.arange', (['(0.1)', '(1.01)', '(0.3)'], {}), '(0.1, 1.01, 0.3)\n', (1730, 1746), True, 'import numpy as np\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, LUH: IMR"
__credits__ = ["<NAME>"]
# __license__ = ""
__version__ = "0.2"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "alpha"
__package_name__ = "AVTcamera"
__developer__ = __author__
'''
Based on AVT implementation of Rüdiger Beermann and pymba:
https://github.com/morefigs/pymba.git
'''
import copy
import logging
import re
import time
import numpy as np
from pymba import Vimba
from pyCameras.cameraTemplate import ControllerTemplate, CameraTemplate
LOGGING_LEVEL = None
class Controller(ControllerTemplate):
"""
Camera controller for AVT cameras based on pymba
"""
def __init__(self):
"""
Camera controller for AVT camera devices. This implementation uses
pymba as backend.
"""
super(Controller, self).__init__()
self.logger = logging.getLogger(__name__)
if LOGGING_LEVEL is not None:
self.logger.setLevel(LOGGING_LEVEL)
self.logger.debug('Starting AVT Camera Controller')
self._vimba = Vimba()
self._vimba.startup()
self.__system = self._vimba.getSystem()
self.__system.runFeatureCommand('GeVDiscoveryAllOnce')
time.sleep(0.2)
def updateDeviceHandles(self):
"""
Refresh the list of available devices
"""
self.logger.debug('Searching for AVT camera devices')
self.device_handles = []
cams = self._vimba.getCameraIds()
for cam_id in cams:
tmp = self._vimba.getCamera(cam_id)
self.device_handles.append('<AVT {model} (MAC: {mac})>'
''.format(model=tmp._info.modelName,
mac=tmp._info.cameraIdString))
def getDevice(self, device_handle):
"""
Return the corresponding camera object for given device handle
Parameters
----------
device_handle : can be IP address, mac address or
camera ID (DEV_...) as reported by vimba.getCameraIds
Returns
-------
cam : Camera object
A camera object for AVT devices corresponding to the given
device handle
"""
# Check if device handle is list or tuple, if so: use first entry
if isinstance(device_handle, (list, tuple)):
device_handle = device_handle[0]
self.logger.debug('Opening device {device_handle}'
''.format(device_handle=device_handle))
# Search for mac addresses in form 'DEV_XXXXXXXXXXXX'
candidates = re.findall(r'(DEV_[0-9A-Z]{11,13})', device_handle)
if len(candidates) == 0:
# no mac address found: search for IP
candidates = re.findall(r'[0-9]+(?:\.[0-9]+){3}', device_handle)
try:
return Camera(device_handle=candidates[0], vimba=self._vimba)
except Exception as e:
self.logger.exception('Failed to open the camera device: {e}'
''.format(e=e))
msg = '<Was not able to open camera with given device handle!!\n' \
'Handle must be IP or MAC address (DEV_XXXXXXXXXXXXX)>'
e.message = msg
print(e.message)
raise
def closeController(self):
self._vimba.shutdown()
self.logger.info("Vimba Camera Controller shutdown")
def __repr__(self):
return "<AVT Camera Controller>"
class Camera(CameraTemplate):
"""
AVT Camera implementation based on pymba
Creating this Object automatically opens the camera. It is NOT necessary to
call openDevice() !!! This is done to set some settings to put the camera
into freerun mode.
"""
def __init__(self, device_handle, vimba=None):
"""
Implementation of the AVT camera device
Parameters
----------
device_handle : object
Unique camera device handle to identify the camera
"""
if vimba is None:
self._vimba = Vimba()
self._vimba.startup()
self.__system = self._vimba.getSystem()
self.__system.runFeatureCommand('GeVDiscoveryAllOnce')
time.sleep(0.2)
else:
self._vimba = vimba
super(Camera, self).__init__(device_handle)
self.logger = logging.getLogger(__name__)
if LOGGING_LEVEL is not None:
self.logger.setLevel(LOGGING_LEVEL)
self.device = self._vimba.getCamera(self._checkDeviceHandle(device_handle))
self.device_handle = device_handle
self.camId = None
self.modelName = self.device._info.modelName
self.triggerModeSetting = 'off'
# Open device and activate freerun mode
self.openDevice()
# time.sleep(0.2)
self.device.TriggerMode = 'Off'
# self.device.GevSCPSPacketSize = 1500 # Automatic setting not yet implemented in pymba (date: 11.12.17)
# self.device.GevSCPSPacketSize = 8228
self.device.runFeatureCommand("GVSPAdjustPacketSize")
# Influences framerate, necessary if network bandwidth is not big enough
# NOTE: Functions self._setMaxTransferRate, self._setTransferRate and self._setNumberCams may change this value
# self.device.StreamBytesPerSecond = 10000000 # 10 Mb/sec (without GigE)
self.device.StreamBytesPerSecond = 115000000 # 100 Mb/sec (with GigE)
self.maxTransferRate = 115000000
self.numCams = 1
self.isSet = {'rate': False, 'numCams': False}
# Register AVT specific functions.
# Function to set maximum transfer rate depending on used network specifications
self.registerFeature('maxRate', self._setMaxTransferRate)
self.registerFeature('bandwidth', self._setMaxTransferRate)
self.registerFeature('maximumTransferRate', self._setMaxTransferRate)
self.registerFeature('transferRate', self._setTransferRate)
# Function to set number of cameras, may affect the available transfer rate per camera
self.registerFeature('numCams', self._setNumberCams)
self.registerFeature('numberCams', self._setNumberCams)
self.registerFeature('numberOfCameras', self._setNumberCams)
self.framelist = []
self.imgData = []
self._clearQueueAndFrames()
# Init data type LUT for each PixelFormat
self.imageFormatLUT = {'Mono8': np.uint8, 'Mono12': np.uint16}
def __del__(self):
# self._cleanUp()
self._vimba.shutdown()
def _checkDeviceHandle(self, device_handle):
"""
Return the corresponding camera object for given device handle
Parameters
----------
device_handle : can be IP address, mac address or
camera ID (DEV_...) as reported by vimba.getCameraIds
Returns
-------
cam : Camera object
A camera object for AVT devices corresponding to the given
device handle
"""
# Check if device handle is list or tuple, if so: use first entry
if isinstance(device_handle, (list, tuple)):
device_handle = device_handle[0]
self.logger.debug('Opening device {device_handle}'
''.format(device_handle=device_handle))
# Search for mac addresses in form 'DEV_XXXXXXXXXXXX'
candidates = re.findall(r'([0-9A-Z]{11,13})', device_handle)
if len(candidates) == 0:
# no mac address found: search for IP
candidates = re.findall(r'[0-9]+(?:\.[0-9]+){3}', device_handle)
return candidates[0]
def _setMaxTransferRate(self, rate=None):
"""
Sets the transfer rate by changing 'StreamBytesPerSecond'.
If passed None, will return actual rate set.
Parameters
----------
rate: int
Maximum bandwidth available. Typical values:
- with GigE : 115000000
- without GigE : 10000000
Returns
-------
self.max_bandwidth: int
If passed None: returns set bandwidth
"""
self.logger.debug("Setting max transfer rate for device {handle} to {rate}"
"".format(handle=self.device_handle, rate=rate))
if rate is None:
return self.maxTransferRate
self.maxTransferRate = rate
self.isSet['rate'] = True
# Call function if number of cams was set
if self.isSet['numCams']:
self._setTransferRate()
else:
self.device.StreamBytesPerSecond = rate
return self.maxTransferRate
def _setNumberCams(self, num=None):
"""
Sets the number of AVT cameras used (this will affect the maximum transfer rate for each camera).
If passed None, will return actual number of cameras set.
Parameters
----------
num: int
Number of AVT cameras
Returns
-------
self.numCams: int
Number of AVT cameras set for this object
"""
self.logger.debug("Setting number of cameras for device {handle} to {num}"
"".format(handle=self.device_handle, num=num))
if num is None:
return self.numCams
self.numCams = num
self.isSet['numCams'] = True
if self.isSet['rate']:
self._setTransferRate()
return self.numCams
def _setTransferRate(self):
"""
Takes maxTransferRate and numCams to compute a viable transfer rate for the device.
"""
transfer_rate = int(self.maxTransferRate / self.numCams)
self.device.StreamBytesPerSecond = transfer_rate
self.logger.debug("Setting transfer rate for {device} to {rate}"
"".format(device=self.device_handle, rate=transfer_rate))
def _clearQueueAndFrames(self):
"""
Does some cleanup jobs. Call after whenever you feel like there might be a buffer overflow.
Calls: - flushCaptureQueue()
- revokeAllFrames()
"""
self.device.flushCaptureQueue()
self.device.revokeAllFrames()
def _cleanUp(self):
"""
Does some cleanup jobs. Call after "AcquisitionStop".
Calls: - endCapture()
- flushCaptureQueue()
- revokeAllFrames()
"""
self.device.endCapture()
self._clearQueueAndFrames()
def _frameCallback(self, frame):
"""
Callback function to fill frames with data
Parameters
-------
frame : frame object
frame created by device.getFrame()
"""
frame.waitFrameCapture(1000)
# Get image data ...
singleImg = np.ndarray(buffer=frame.getBufferByteData(),
dtype=self.imageFormatLUT[self.device.PixelFormat],
shape=(frame.height,
frame.width))
self.imgData.append(singleImg)
frame.queueFrameCapture(self._frameCallback)
def _getCamId(self):
"""
Creates a cam-specific cam id, which consists of the manufacturer and a
4 digit number. This id makes it possible to identify the virtual
object with real object.
Returns
-------
camId : "unique" cam id
"""
if self.camId is None:
mfr = b'AVT' # mfr = manufacturer
id = self.device._info.cameraIdString[-4:]
camId = b'_'.join((mfr, id)).decode('utf-8')
return camId
else:
return self.camId
@staticmethod
def listDevices():
"""
List available AVT cameras
Returns
-------
cams : list
list of available AVT devices
"""
return Controller().listDevices()
def openDevice(self):
"""
Opens a camera device with the stored self.device object
"""
try:
self.logger.debug('Opening camera device')
self.device.openCamera()
except Exception as e:
self.logger.exception('Failed to open the camera device: '
'{e}'.format(e=e))
def closeDevice(self):
"""
Closes camera device
"""
try:
self.logger.debug('Closing camera device')
self.device.closeCamera()
del self.device
self.device = None
except Exception as e:
self.logger.exception('Failed to close the camera device: '
'{e}'.format(e=e))
def isOpen(self):
"""
Check if the device for this instance is currently open and ready to
communicate
Returns
-------
bool
True if the camera connection is open, False if it is not
"""
# AVT cameras do not have any isOpen-function by itself.
# Assuming that if there is a device given in self.device, device is opened.
if self.device is not None:
return True
else:
return False
def getImage(self, *args, **kwargs):
"""
Get an image from the camera device
*args and **kwargs are ignored parameters!
!!! Warning: Check transfer rate of your network connection !!!
Low transfer-rates may cause incomplete image transfer with missing
data
Returns
-------
img : np.ndarray
Current camera image
"""
self.logger.debug('Creating frame and starting acquisition')
# Create new frame for camera
frame = self.device.getFrame()
# Announce frame
frame.announceFrame()
# Capture a camera image
self.device.startCapture()
frame.queueFrameCapture()
self.device.runFeatureCommand('AcquisitionStart')
frame.waitFrameCapture(1000)
incomplete_frames = 0
incomplete_frame_limit = 20
while frame.getReceiveStatus() != 0:
frame.queueFrameCapture()
frame.waitFrameCapture(1000)
incomplete_frames += 1
if incomplete_frames > incomplete_frame_limit:
raise RuntimeError("More than {lim} frames in a row were incomplete! Check transfer settings!"
"".format(lim=incomplete_frame_limit))
self.device.runFeatureCommand('AcquisitionStop')
self.logger.debug("Trashed frames: {t_frames}".format(t_frames=incomplete_frames))
# Get image data ...
imgData = np.ndarray(buffer=frame.getBufferByteData(),
dtype=self.imageFormatLUT[self.device.PixelFormat],
shape=(frame.height,
frame.width))
# Do cleanup
self._cleanUp()
self.logger.debug('Image acquisition finished')
return imgData.copy()
def prepareRecording(self, num):
""" Sets the camera to MultiFrame mode and prepares frames. Use with
"record()"-function.
Parameters
----------
num : int
number of frames to be captured during acquisition
"""
self._clearQueueAndFrames()
self.device.AcquisitionMode = 'MultiFrame'
self.device.AcquisitionFrameCount = num
# Creating frames
self.framelist = []
for _ in range(num):
frame = self.device.getFrame()
frame.announceFrame()
frame.queueFrameCapture(self._frameCallback)
self.framelist.append(frame)
self.device.startCapture()
def record(self):
""" Blocking image acquisition, ends acquisition when num frames are
captured, where num is set by "prepareRecording(num)". Only use with
"prepareRecording(num)".
Returns
-------
imgData : list
List of images
"""
self.imgData = []
self.device.runFeatureCommand('AcquisitionStart')
# Block until num images are captured
while len(self.imgData) != len(self.framelist):
pass
self.device.runFeatureCommand('AcquisitionStop')
# Do cleanup
self._cleanUp()
# Set back to freerun mode
self.device.AcquisitionMode = 'Continuous'
return copy.deepcopy(self.imgData)
# TODO: If grabStart without "num" is needed - implement threading solution with while loop (similar to _liveView())
def grabStart(self, num):
"""
Prepares num images to be grabbed. This function is not blocking.
Calling "grabStop()" will end acquisition.
Parameters
----------
num : int
Number of images that should be recorded
"""
self.device.AcquisitionMode = 'MultiFrame'
self.device.AcquisitionFrameCount = num
# Creating frames
self.framelist = []
for _ in range(num):
frame = self.device.getFrame()
frame.announceFrame()
frame.queueFrameCapture(self._frameCallback)
self.framelist.append(frame)
self.device.startCapture()
self.device.runFeatureCommand('AcquisitionStart')
def grabStop(self):
"""
Stop grabbing images and return camera to continuous mode.
"""
self.device.runFeatureCommand('AcquisitionStop')
# Do cleanup
self._cleanUp()
# Set back to freerun mode
self.device.AcquisitionMode = 'Continuous'
return copy.deepcopy(self.imgData)
def _liveView(self):
"""
Live image stream an visualization through OpenCV window
Leave _liveView by pressing "q"
"""
cv.startWindowThread()
cv.namedWindow("IMG", 2)
cv.resizeWindow("IMG", 900, 900)
frame = self.device.getFrame()
frame.announceFrame()
self.device.startCapture()
framecount = 0
droppedframes = []
while True:
try:
frame.queueFrameCapture()
success = True
except Exception:
droppedframes.append(framecount)
success = False
self.device.runFeatureCommand("AcquisitionStart")
self.device.runFeatureCommand("AcquisitionStop")
frame.waitFrameCapture(1000)
frame_data = frame.getBufferByteData()
if success:
live_img = np.ndarray(buffer=frame_data,
dtype=self.imageFormatLUT[self.device.PixelFormat],
shape=(frame.height,
frame.width))
cv.imshow("IMG", live_img)
framecount += 1
key = cv.waitKey(1) & 0xFF
if key == ord("q"):
cv.destroyAllWindows()
self.logger.info("Frames displayed: %i" % framecount)
self.logger.info("Frames dropped: %s" % droppedframes)
break
# Cleanup
self._cleanUp()
def listFeatures(self):
"""
Lists camera features
"""
try:
self.logger.debug('Listing camera features')
featureNames = self.device.getFeatureNames()
print("Printing feature names: ...\n")
print("\n".join(featureNames))
except Exception as e:
self.logger.exception('Failed to get feature names: '
'{e}'.format(e=e))
def setExposureMicrons(self, microns=None):
"""
Set the exposure time to the given value in microseconds or read the
current value by passing None
Parameters
----------
microns : int
Desired exposure time in microseconds that should be set, or None
to read the current exposure time
Returns
-------
microns : int
The exposure time in microseconds after applying the passed value
"""
if microns is not None:
self.logger.debug('Setting <ExposureTime> to {microns}'
''.format(microns=microns))
self.device.ExposureTimeAbs = microns
return self.device.ExposureTimeAbs
def autoExposure(self):
"""
Automatically sets the exposure time of the camera ONCE.
Old exposure setting is lost during the process!
Returns
-------
exposure : int
The exposure time in microseconds after auto exposure
"""
self.logger.debug("Starting automatic exposure control")
self.device.ExposureAuto = "Once"
# Save trigger settings and activate acquisition until
# auto exposure has settled
triggerMode_buffer = self.triggerMode
frame = self.device.getFrame()
frame.announceFrame()
self.device.startCapture()
self.triggerMode = "off"
max_iter = 100
iter = 0
# Auto exposure gets stuck if the border values are reached,
# but further adjustments are necessary
limits = (self.device.ExposureAutoMin, self.device.ExposureAutoMax)
limit_cnt = 0
last_exposure = -1
self.device.runFeatureCommand("AcquisitionStart")
while self.device.ExposureAuto != "Off":
if last_exposure in limits:
limit_cnt += 1
else:
limit_cnt = 0
try:
frame.queueFrameCapture()
except Exception:
pass
frame.waitFrameCapture(1000)
iter += 1
last_exposure = self.device.ExposureTimeAbs
if limit_cnt > 5:
self.logger.info("Auto exposure has run into limits. Continuing with exposure of: {exposure} ".format(
exposure=last_exposure))
self.device.ExposureAuto = "Off"
if iter >= max_iter:
try:
raise TimeoutError("Timeout while setting auto exposure!")
except NameError:
# Python 2 compatible Error
raise Exception("Timeout while setting auto exposure!")
# Cleanup
self.device.runFeatureCommand("AcquisitionStop")
self._cleanUp()
self.triggerMode = triggerMode_buffer
self.logger.debug("Set exposure time to {exposure}"
"".format(exposure=self.device.ExposureTimeAbs))
return self.device.ExposureTimeAbs
def setResolution(self, resolution=None):
"""
Set the resolution of the camera to the given values in pixels or read
the current resolution by passing None
Parameters
----------
resolution : tuple
Desired camera resolution in the form (width, height), or None to
read the current resolution
Returns
-------
resolution : tuple
The set camera resolution after applying the passed value
"""
if resolution is not None:
self.logger.debug('Setting <Width> to {width}'
''.format(width=resolution[0]))
self.device.Width = resolution[0]
self.logger.debug('Setting <Height> to {height}'
''.format(height=resolution[1]))
self.device.Height = resolution[1]
return self.device.Width, self.device.Height
def setGain(self, gain=None):
"""
Set the gain of the camera to the given value or read the current value
by passing None
Parameters
----------
gain : float
Desired gain value in dB to be set, or None to read the current
gain value
Returns
-------
gain : int
The gain value after applying the passed value
"""
if gain is not None:
self.logger.debug('Setting <Gain> to {gain}'
''.format(gain=gain))
self.device.Gain = gain
return self.device.Gain
def setPixelFormat(self, fmt=None):
"""
Set the image format to the passed setting or read the current format
by passing None
Parameters
----------
fmt : str
String describing the desired image format (e.g. "mono8"), or None
to read the current image format. Check camera technical manual for available formats,
may differ from model to model.
Returns
-------
fmt : str
The image format after applying the passed value
"""
if fmt is not None:
self.logger.debug('Setting <PixelFormat> to {fmt}'
''.format(fmt=fmt))
self.device.PixelFormat = fmt
self.device.runFeatureCommand("GVSPAdjustPacketSize")
return self.device.PixelFormat
def setTriggerMode(self, mode=None):
"""
Set the trigger mode of the camera to either "in", "out" or "off", or
read the current trigger setting ba passing None
Parameters
----------
mode : str
The desired trigger mode. "in" means the camera receives a trigger
signal, "out" means the camera sends a trigger signal, "off"" means
the camera does not react to triggers. To read the current trigger
setting pass None
Returns
-------
mode : str
The trigger mode after applying the passed value
"""
self.logger.debug("Setting trigger mode to: {mode}".format(mode=mode))
if mode is None:
return self.triggerModeSetting
elif isinstance(mode, str):
if mode.lower() == 'in':
self.device.TriggerMode = 'On'
self.device.TriggerSource = 'Line1'
self.device.TriggerSelector = 'FrameStart'
self.device.TriggerActivation = "RisingEdge"
self.triggerModeSetting = 'in'
elif mode.lower() == 'out':
# TODO: Implement out trigger for AVT cameras
self.triggerModeSetting = 'out'
raise NotImplementedError('Sending triggers is not'
'implemented yet!')
elif mode.lower() == 'off':
self.device.TriggerMode = 'Off'
self.device.TriggerSource = 'Freerun'
self.device.TriggerSelector = 'FrameStart'
self.triggerModeSetting = 'off'
else:
raise ValueError('Unexpected value in setTriggerMode. '
'Expected "in", "out", or "off". Got {mode}'
''.format(mode=mode))
return self.triggerModeSetting
else:
raise TypeError('Trigger Mode should be None, "in", "out", or '
'"off". Got {mode}'.format(mode=mode))
def __repr__(self):
return repr(self.device)
if __name__ == '__main__':
import logging
import cv2 as cv
logging.basicConfig(level=logging.DEBUG)
bListFeatures = False
bLiveView = False
contr = Controller()
handle = contr.listDevices()
print(handle)
# Dictionary to test different connection types/inputs
source = {'IP': '172.16.31.10',
'Handle_list': handle,
'Handle': handle[0],
'Bad_input': 'Yo Mama is fat'}
# Use one of source entries here:
cam_device = contr.getDevice(source['Handle_list'])
# cam_device = contr.getDevice('DEV_000F314D941E')
# Test auto exposure
cam_device = Camera('DEV_000F314E2C01')
cam_device.exposure = 400000
print("Before: ", cam_device.exposure)
exposure = cam_device.autoExposure()
print("After: ", cam_device.exposure)
# Listing features of device
if bListFeatures:
cam_device.listFeatures()
# Get an image
image = cam_device.getImage()
cv.namedWindow('Captured image', cv.WINDOW_NORMAL)
cv.resizeWindow('Captured image', 1000, 1000)
cv.imshow('Captured image', image)
cv.waitKey()
if bLiveView:
cam_device._liveView()
images = cam_device.getImages(10)
print(len(images))
for _, img in enumerate(images):
print('Showing image {i}'.format(i=_))
cv.imshow('Captured image', img)
cv.waitKey()
# grabbedImgs = cam_device.grabStart(10)
# cam_device.grabStop()
cam_device.closeDevice()
contr.closeController()
| [
"copy.deepcopy",
"logging.basicConfig",
"cv2.waitKey",
"cv2.destroyAllWindows",
"pymba.Vimba",
"logging.getLogger",
"time.sleep",
"re.findall",
"cv2.startWindowThread",
"cv2.resizeWindow",
"cv2.imshow",
"numpy.ndarray",
"cv2.namedWindow"
] | [((27493, 27533), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (27512, 27533), False, 'import logging\n'), ((28399, 28449), 'cv2.namedWindow', 'cv.namedWindow', (['"""Captured image"""', 'cv.WINDOW_NORMAL'], {}), "('Captured image', cv.WINDOW_NORMAL)\n", (28413, 28449), True, 'import cv2 as cv\n'), ((28454, 28499), 'cv2.resizeWindow', 'cv.resizeWindow', (['"""Captured image"""', '(1000)', '(1000)'], {}), "('Captured image', 1000, 1000)\n", (28469, 28499), True, 'import cv2 as cv\n'), ((28504, 28538), 'cv2.imshow', 'cv.imshow', (['"""Captured image"""', 'image'], {}), "('Captured image', image)\n", (28513, 28538), True, 'import cv2 as cv\n'), ((28543, 28555), 'cv2.waitKey', 'cv.waitKey', ([], {}), '()\n', (28553, 28555), True, 'import cv2 as cv\n'), ((918, 945), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (935, 945), False, 'import logging\n'), ((1114, 1121), 'pymba.Vimba', 'Vimba', ([], {}), '()\n', (1119, 1121), False, 'from pymba import Vimba\n'), ((1271, 1286), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (1281, 1286), False, 'import time\n'), ((2681, 2731), 're.findall', 're.findall', (['"""(DEV_[0-9A-Z]{11,13})"""', 'device_handle'], {}), "('(DEV_[0-9A-Z]{11,13})', device_handle)\n", (2691, 2731), False, 'import re\n'), ((4448, 4475), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (4465, 4475), False, 'import logging\n'), ((7514, 7560), 're.findall', 're.findall', (['"""([0-9A-Z]{11,13})"""', 'device_handle'], {}), "('([0-9A-Z]{11,13})', device_handle)\n", (7524, 7560), False, 'import re\n'), ((16590, 16617), 'copy.deepcopy', 'copy.deepcopy', (['self.imgData'], {}), '(self.imgData)\n', (16603, 16617), False, 'import copy\n'), ((17801, 17828), 'copy.deepcopy', 'copy.deepcopy', (['self.imgData'], {}), '(self.imgData)\n', (17814, 17828), False, 'import copy\n'), ((17993, 18015), 'cv2.startWindowThread', 'cv.startWindowThread', ([], {}), '()\n', (18013, 18015), True, 'import cv2 as cv\n'), ((18024, 18048), 'cv2.namedWindow', 'cv.namedWindow', (['"""IMG"""', '(2)'], {}), "('IMG', 2)\n", (18038, 18048), True, 'import cv2 as cv\n'), ((18057, 18089), 'cv2.resizeWindow', 'cv.resizeWindow', (['"""IMG"""', '(900)', '(900)'], {}), "('IMG', 900, 900)\n", (18072, 18089), True, 'import cv2 as cv\n'), ((28760, 28792), 'cv2.imshow', 'cv.imshow', (['"""Captured image"""', 'img'], {}), "('Captured image', img)\n", (28769, 28792), True, 'import cv2 as cv\n'), ((28801, 28813), 'cv2.waitKey', 'cv.waitKey', ([], {}), '()\n', (28811, 28813), True, 'import cv2 as cv\n'), ((2841, 2892), 're.findall', 're.findall', (['"""[0-9]+(?:\\\\.[0-9]+){3}"""', 'device_handle'], {}), "('[0-9]+(?:\\\\.[0-9]+){3}', device_handle)\n", (2851, 2892), False, 'import re\n'), ((4139, 4146), 'pymba.Vimba', 'Vimba', ([], {}), '()\n', (4144, 4146), False, 'from pymba import Vimba\n'), ((4312, 4327), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (4322, 4327), False, 'import time\n'), ((7670, 7721), 're.findall', 're.findall', (['"""[0-9]+(?:\\\\.[0-9]+){3}"""', 'device_handle'], {}), "('[0-9]+(?:\\\\.[0-9]+){3}', device_handle)\n", (7680, 7721), False, 'import re\n'), ((18734, 18855), 'numpy.ndarray', 'np.ndarray', ([], {'buffer': 'frame_data', 'dtype': 'self.imageFormatLUT[self.device.PixelFormat]', 'shape': '(frame.height, frame.width)'}), '(buffer=frame_data, dtype=self.imageFormatLUT[self.device.\n PixelFormat], shape=(frame.height, frame.width))\n', (18744, 18855), True, 'import numpy as np\n'), ((18989, 19015), 'cv2.imshow', 'cv.imshow', (['"""IMG"""', 'live_img'], {}), "('IMG', live_img)\n", (18998, 19015), True, 'import cv2 as cv\n'), ((19062, 19075), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (19072, 19075), True, 'import cv2 as cv\n'), ((19131, 19153), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (19151, 19153), True, 'import cv2 as cv\n')] |
#!/usr/bin/env python
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
COMMS_EXT_ENABLED = False
requirements = []
extensions = []
cmdclass = {}
if COMMS_EXT_ENABLED:
requirements.append('cython >= 0.21')
from Cython.Distutils import build_ext
from Cython.Build import cythonize
import Cython
if Cython.__version__ < '0.19.1':
raise Exception('Please upgrade to Cython 0.19.1 or newer')
cmdclass['build_ext'] = build_ext
from setuptools import setup # noqa
import os # noqa
import sys # noqa
from distutils.extension import Extension # noqa
from distutils.command.clean import clean as _clean # noqa
class clean(_clean):
def run(self):
_clean.run(self)
for x in []:
try:
os.remove(x)
except OSError:
pass
cmdclass['clean'] = clean
with open('requirements.txt') as f:
file_reqs = f.read().splitlines()
requirements = requirements + file_reqs
PY2 = sys.version_info[0] == 2
PY26 = sys.version_info[0] == 2 and sys.version_info[1] == 6
if PY26:
requirements.append('argparse')
requirements.append('unittest2')
if PY2:
requirements.append('mock')
if COMMS_EXT_ENABLED:
import numpy as np
common_include = ['ibis/src', np.get_include()]
comms_ext_libraries = []
if sys.platform != 'darwin':
# libuuid is available without additional linking as part of the base
# BSD system on OS X, needs to be installed and linked on Linux,
# though.
comms_ext_libraries.append('uuid')
comms_ext = Extension('ibis.comms',
['ibis/comms.pyx',
'ibis/src/ipc_support.c'],
depends=['ibis/src/ipc_support.h'],
libraries=comms_ext_libraries,
include_dirs=common_include)
extensions = cythonize([comms_ext])
LONG_DESCRIPTION = """
Ibis is a productivity-centric Python big data framework.
See http://ibis-project.org
"""
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Cython',
'Topic :: Scientific/Engineering',
]
import versioneer # noqa
setup(
name='ibis-framework',
packages=['ibis',
'ibis.expr',
'ibis.expr.tests',
'ibis.hive',
'ibis.hive.tests',
'ibis.impala',
'ibis.impala.tests',
'ibis.spark',
'ibis.spark.tests',
'ibis.sql',
'ibis.sql.tests',
'ibis.sql.postgres',
'ibis.sql.postgres.tests',
'ibis.sql.presto',
'ibis.sql.presto.tests',
'ibis.sql.redshift',
'ibis.sql.redshift.tests',
'ibis.sql.sqlite',
'ibis.sql.sqlite.tests',
'ibis.sql.vertica',
'ibis.sql.vertica.tests',
'ibis.tests'],
version=versioneer.get_version(),
package_data={'ibis': ['*.pxd', '*.pyx']},
ext_modules=extensions,
cmdclass=versioneer.get_cmdclass(),
install_requires=requirements,
extras_require={'kerberos': ['requests-kerberos']},
description="Productivity-centric Python Big Data Framework",
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
license='Apache License, Version 2.0',
maintainer="<NAME>",
maintainer_email="<EMAIL>"
)
| [
"versioneer.get_version",
"os.remove",
"Cython.Build.cythonize",
"distutils.command.clean.clean.run",
"versioneer.get_cmdclass",
"distutils.extension.Extension",
"numpy.get_include"
] | [((2108, 2281), 'distutils.extension.Extension', 'Extension', (['"""ibis.comms"""', "['ibis/comms.pyx', 'ibis/src/ipc_support.c']"], {'depends': "['ibis/src/ipc_support.h']", 'libraries': 'comms_ext_libraries', 'include_dirs': 'common_include'}), "('ibis.comms', ['ibis/comms.pyx', 'ibis/src/ipc_support.c'],\n depends=['ibis/src/ipc_support.h'], libraries=comms_ext_libraries,\n include_dirs=common_include)\n", (2117, 2281), False, 'from distutils.extension import Extension\n'), ((2422, 2444), 'Cython.Build.cythonize', 'cythonize', (['[comms_ext]'], {}), '([comms_ext])\n', (2431, 2444), False, 'from Cython.Build import cythonize\n'), ((1222, 1238), 'distutils.command.clean.clean.run', '_clean.run', (['self'], {}), '(self)\n', (1232, 1238), True, 'from distutils.command.clean import clean as _clean\n'), ((1799, 1815), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (1813, 1815), True, 'import numpy as np\n'), ((3794, 3818), 'versioneer.get_version', 'versioneer.get_version', ([], {}), '()\n', (3816, 3818), False, 'import versioneer\n'), ((3908, 3933), 'versioneer.get_cmdclass', 'versioneer.get_cmdclass', ([], {}), '()\n', (3931, 3933), False, 'import versioneer\n'), ((1293, 1305), 'os.remove', 'os.remove', (['x'], {}), '(x)\n', (1302, 1305), False, 'import os\n')] |
# Helpful plotting utilities
import numpy as np
import matplotlib.pyplot as plt
def lineplot(x, y, ctr_type='mean', err_type='std', show_trials=False, **kwargs):
"""
Plot the values with mean/median and std/95% CI/quartile as shading.
This uses the automatic/default/preset color cycling for lines. Provided
kwargs (including overriding color) will be passed to the line plotting
The style is meant to match that produced by seaborn.lineplot, but on numpy
arrays and with way less overhead (i.e., not putting it into a pandas
dataframe and using the seaborn plotting). It doesn't support a lot of the
fancy extras of its seaborn cousin.
**Note:** This uses numpy's nan-functions (e.g., `nanmean` and `nanstd`) so
your data can include nan values, and they will not contribute to summary
statistic plots.
Parameters
----------
x : np.ndarray
(n,) shape array of x-values to plot
y : np.ndarray
(m, n) shape array of y-values to plot, where m is the number of trials
ctr_type : str, optional
Which central statistic to plot is the primary summary metric. Options
are 'mean' and 'median'. (the default is 'mean', which uses
numpy.nanmean)
err_type : str, optional
Which error type to show for shading. Options are:
- **std**: Standard deviation
- **95ci**: 95% confidence interval
- **quartile**: [25%, 75%] confidence interval
- **None**: No error plotting
(the default is ``'std'``, which is standard deviation)
show_trials : bool, optional
Whether or not to show plots of the individual trials (each row of y
data). (the default is False, which means only summary data shown)
"""
if ctr_type == 'mean':
y_mid = np.nanmean(y, axis=0)
elif ctr_type == 'median':
y_mid = np.nanmedian(y, axis=0)
show_err = True
if err_type == 'std':
std = np.nanstd(y, axis=0)
err = np.array([y_mid-std, y_mid+std])
elif err_type == '95ci':
err = np.nanpercentile(y, [2.5, 97.5], axis=0)
elif err_type == 'quartile':
err = np.nanpercentile(y, [25, 75], axis=0)
elif err_type is None:
show_err = False
else:
raise ValueError("Invalid err_type")
# Get the color the line should have by taking it from a dummy plot
if 'color' not in kwargs:
line, = plt.plot([], [])
line_color = line.get_color()
line.remove()
# Add color to the existing kwargs
kwargs['color'] = line_color
plt.plot(x, y_mid, **kwargs)
kwargs.pop('label', None)
if show_err:
plt.fill_between(x, err[0], err[1], alpha=0.2, **kwargs)
if show_trials:
for trial in range(y.shape[0]):
plt.plot(x, y[trial, :], linewidth=1, alpha=0.2)
if __name__ == '__main__':
lineplot(np.arange(10), np.zeros([8, 10]))
| [
"numpy.nanpercentile",
"matplotlib.pyplot.plot",
"numpy.nanmedian",
"numpy.nanstd",
"numpy.zeros",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.fill_between",
"numpy.nanmean"
] | [((2590, 2618), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_mid'], {}), '(x, y_mid, **kwargs)\n', (2598, 2618), True, 'import matplotlib.pyplot as plt\n'), ((1812, 1833), 'numpy.nanmean', 'np.nanmean', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (1822, 1833), True, 'import numpy as np\n'), ((1966, 1986), 'numpy.nanstd', 'np.nanstd', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (1975, 1986), True, 'import numpy as np\n'), ((2001, 2037), 'numpy.array', 'np.array', (['[y_mid - std, y_mid + std]'], {}), '([y_mid - std, y_mid + std])\n', (2009, 2037), True, 'import numpy as np\n'), ((2429, 2445), 'matplotlib.pyplot.plot', 'plt.plot', (['[]', '[]'], {}), '([], [])\n', (2437, 2445), True, 'import matplotlib.pyplot as plt\n'), ((2674, 2730), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', 'err[0]', 'err[1]'], {'alpha': '(0.2)'}), '(x, err[0], err[1], alpha=0.2, **kwargs)\n', (2690, 2730), True, 'import matplotlib.pyplot as plt\n'), ((2895, 2908), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (2904, 2908), True, 'import numpy as np\n'), ((2910, 2927), 'numpy.zeros', 'np.zeros', (['[8, 10]'], {}), '([8, 10])\n', (2918, 2927), True, 'import numpy as np\n'), ((1881, 1904), 'numpy.nanmedian', 'np.nanmedian', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (1893, 1904), True, 'import numpy as np\n'), ((2077, 2117), 'numpy.nanpercentile', 'np.nanpercentile', (['y', '[2.5, 97.5]'], {'axis': '(0)'}), '(y, [2.5, 97.5], axis=0)\n', (2093, 2117), True, 'import numpy as np\n'), ((2804, 2852), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y[trial, :]'], {'linewidth': '(1)', 'alpha': '(0.2)'}), '(x, y[trial, :], linewidth=1, alpha=0.2)\n', (2812, 2852), True, 'import matplotlib.pyplot as plt\n'), ((2165, 2202), 'numpy.nanpercentile', 'np.nanpercentile', (['y', '[25, 75]'], {'axis': '(0)'}), '(y, [25, 75], axis=0)\n', (2181, 2202), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import sys
import toml
import PySimpleGUI as sg
#import PySimpleGUIQt as sg
import numpy as np
from appdirs import AppDirs
from os import path, makedirs
from pathlib import Path
from version import NAME, APPNAME, GUI, VERSION, AUTHOR
"""
"""
tension = {}
charge = {}
type = {}
type['13S2P'] = 'WILPA 2210'
type['12S2P'] = 'WILPA 2554'
type['12S2Pxlr'] = 'WILPA 3017'
type['10S3P'] = 'WILPA 2475'
tension['10S3P'] = np.array([32, 33.76, 34.61, 35.43, 36.04, 36.47, 36.67, 36.74, 36.77, 36.80, 36.82,
36.84, 36.85, 36.87, 36.89, 36.95, 37.06, 37.43, 37.75, 37.90, 38.19,
38.89, 39.45, 40.12, 40.94, 41.90, 42])
charge['10S3P'] = np.array([0, 1.6, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4, 8.4, 9.4, 10.4, 11.4, 12.3, 13.3, 14.3,
15.3, 16.3, 21.1, 30.9, 40.6, 50.4, 60.2, 70.0, 79.7, 89.5, 99.3, 100])
tension['12S2P'] = np.array([
50.40,
50.28,
49.13,
48.14,
47.34,
46.67,
45.83,
45.48,
45.30,
44.92,
44.47,
44.34,
44.27,
44.24,
44.22,
44.21,
44.18,
44.16,
44.12,
44.09,
44.00,
43.76,
43.25,
42.52,
41.53,
40.51,
38.40])
charge['12S2P'] = np.array([
100.0,
99.3,
89.5,
79.7,
70.0,
60.2,
50.4,
40.6,
30.9,
21.1,
16.3,
15.3,
14.3,
13.3,
12.3,
11.4,
10.4,
9.4,
8.4,
7.4,
6.5,
5.5,
4.5,
3.5,
2.5,
1.6,
0.0])
tension['12S2Pxlr'] = np.array([
50.40,
48.68,
47.39,
46.28,
45.34,
44.21,
43.66,
42.58,
41.39,
41.28,
41.21,
41.12,
41.04,
40.93,
40.81,
40.66,
40.93,
40.43,
40.01,
39.38,
38.63])
charge['12S2Pxlr'] = np.array([
100.0,
90.1,
80.2,
70.4,
60.5,
50.6,
40.7,
30.9,
21.0,
11.1,
10.1,
9.1,
8.1,
7.1,
6.2,
5.2,
4.2,
3.2,
2.2,
1.2,
0])
tension['13S2P'] = np.array([
54.60,
54.47,
53.22,
52.16,
51.29,
50.56,
49.65,
49.27,
49.08,
48.66,
48.18,
48.04,
47.96,
47.93,
47.91,
47.89,
47.87,
47.84,
47.80,
47.76,
47.67,
47.41,
46.85,
46.06,
44.99,
43.89,
41.60])
charge['13S2P'] = np.array([
100.0,
99.3,
89.5,
79.7,
70.0,
60.2,
50.4,
40.6,
30.9,
21.1,
16.3,
15.3,
14.3,
13.3,
12.3,
11.4,
10.4,
9.4,
8.4,
7.4,
6.5,
5.5,
4.5,
3.5,
2.5,
1.6,
0.0])
def getDefaultConfig():
toml_string = """
type = '10S3P'
"""
return toml.loads(toml_string)
def saveDefaultConfig():
with open(cfg_file, 'w') as fid:
cfg = getDefaultConfig()
cfg['version'] = VERSION
toml.dump(cfg, fid)
# Save current config
def saveConfig():
with open(cfg_file, 'w') as fid:
toml.dump(cfg, fid)
# start main program
cfg_dir = AppDirs(APPNAME, AUTHOR).user_config_dir
# print(cfg_dir)
if not path.exists(cfg_dir):
makedirs(cfg_dir)
cfg_file = Path(path.expandvars(
f"{cfg_dir}/{APPNAME}")).with_suffix('.toml')
if not path.isfile(cfg_file):
saveDefaultConfig()
cfg = toml.load(cfg_file)
if "version" not in cfg or \
cfg["version"] != VERSION:
saveDefaultConfig()
cfg = toml.load(cfg_file)
# default value
V = tension['10S3P']
layout = [[sg.T(type['10S3P'], key='_MODEL_', visible=None), sg.Text('Select the batterie'),
sg.InputCombo(['10S3P', '13S2P', '12S2P', '12S2Pxlr'], key='_TYPE_', default_value = cfg['type'],
change_submits=True, size=(8, 1))],
[sg.T('Enter voltage'), sg.In(key='_INPUT_',
size=(8, 1), change_submits=True)],
# [sg.T('', key='_TENSION_', visible=False), sg.T('Capacity'), sg.In(key='_RESULT_', size=(8,1))],
[sg.T('Capacity'), sg.In(key='_RESULT_', size=(8, 1))],
[sg.Button('Exit', key='Exit')]]
window = sg.Window(f'Li-Ion capacity calculator v{VERSION} with {GUI}',
auto_size_text=False,
default_element_size=(22, 1),
text_justification='right',
).Layout(layout)
while True: # Event Loop
event, values = window.Read()
#print(event, values)
if event is None:
break
if event in ('Exit', sg.WIN_CLOSED):
break # leave main loop
if event == '_TYPE_':
cfg['type'] = values['_TYPE_']
window.Element('_MODEL_').Update(value=type[values['_TYPE_']])
window.Element('_INPUT_').Update(value='')
window.Element('_RESULT_').Update(value='')
if event == '_INPUT_':
if values['_INPUT_'] == '':
continue
if values['_TYPE_'] == '10S3P':
V = tension['10S3P']
C = charge['10S3P']
if values['_TYPE_'] == '12S2P':
V = tension['12S2P']
C = charge['12S2P']
if values['_TYPE_'] == '12S2Pxlr':
V = tension['12S2Pxlr']
C = charge['12S2Pxlr']
if values['_TYPE_'] == '13S2P':
V = tension['13S2P']
C = charge['13S2P']
U = values['_INPUT_']
U = U.replace(',','.')
u = float(U)
ind = (np.abs(V - u)).argmin()
if u >= np.max(V):
str = '{:4.1f} %'.format(100.0)
elif u <= np.min(V):
str = '{:4.1f} %'.format(0)
else:
try:
p = (V[ind+1] - V[ind]) / (u - V[ind])
res = (C[ind+1] - C[ind]) / p
res = C[ind] + res
str = '{:4.1f} %'.format(res)
except:
if ind >= len(U) -1:
str = '{:4.1f} %'.format(100)
else:
str = '{:4.1f} %'.format(0)
input = 'Input: {:05.2f} V'.format(u)
window.Element('_RESULT_').Update(value=str)
# print(cfg['type'])
# print('end')
saveConfig()
window.close()
| [
"PySimpleGUI.Button",
"numpy.abs",
"appdirs.AppDirs",
"os.makedirs",
"PySimpleGUI.InputCombo",
"os.path.exists",
"os.path.expandvars",
"os.path.isfile",
"PySimpleGUI.Text",
"numpy.array",
"toml.load",
"PySimpleGUI.T",
"PySimpleGUI.Window",
"numpy.max",
"numpy.min",
"toml.loads",
"tom... | [((440, 639), 'numpy.array', 'np.array', (['[32, 33.76, 34.61, 35.43, 36.04, 36.47, 36.67, 36.74, 36.77, 36.8, 36.82, \n 36.84, 36.85, 36.87, 36.89, 36.95, 37.06, 37.43, 37.75, 37.9, 38.19, \n 38.89, 39.45, 40.12, 40.94, 41.9, 42]'], {}), '([32, 33.76, 34.61, 35.43, 36.04, 36.47, 36.67, 36.74, 36.77, 36.8,\n 36.82, 36.84, 36.85, 36.87, 36.89, 36.95, 37.06, 37.43, 37.75, 37.9, \n 38.19, 38.89, 39.45, 40.12, 40.94, 41.9, 42])\n', (448, 639), True, 'import numpy as np\n'), ((710, 877), 'numpy.array', 'np.array', (['[0, 1.6, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4, 8.4, 9.4, 10.4, 11.4, 12.3, 13.3, \n 14.3, 15.3, 16.3, 21.1, 30.9, 40.6, 50.4, 60.2, 70.0, 79.7, 89.5, 99.3, 100\n ]'], {}), '([0, 1.6, 2.5, 3.5, 4.5, 5.5, 6.5, 7.4, 8.4, 9.4, 10.4, 11.4, 12.3,\n 13.3, 14.3, 15.3, 16.3, 21.1, 30.9, 40.6, 50.4, 60.2, 70.0, 79.7, 89.5,\n 99.3, 100])\n', (718, 877), True, 'import numpy as np\n'), ((918, 1123), 'numpy.array', 'np.array', (['[50.4, 50.28, 49.13, 48.14, 47.34, 46.67, 45.83, 45.48, 45.3, 44.92, 44.47,\n 44.34, 44.27, 44.24, 44.22, 44.21, 44.18, 44.16, 44.12, 44.09, 44.0, \n 43.76, 43.25, 42.52, 41.53, 40.51, 38.4]'], {}), '([50.4, 50.28, 49.13, 48.14, 47.34, 46.67, 45.83, 45.48, 45.3, \n 44.92, 44.47, 44.34, 44.27, 44.24, 44.22, 44.21, 44.18, 44.16, 44.12, \n 44.09, 44.0, 43.76, 43.25, 42.52, 41.53, 40.51, 38.4])\n', (926, 1123), True, 'import numpy as np\n'), ((1246, 1418), 'numpy.array', 'np.array', (['[100.0, 99.3, 89.5, 79.7, 70.0, 60.2, 50.4, 40.6, 30.9, 21.1, 16.3, 15.3, \n 14.3, 13.3, 12.3, 11.4, 10.4, 9.4, 8.4, 7.4, 6.5, 5.5, 4.5, 3.5, 2.5, \n 1.6, 0.0]'], {}), '([100.0, 99.3, 89.5, 79.7, 70.0, 60.2, 50.4, 40.6, 30.9, 21.1, 16.3,\n 15.3, 14.3, 13.3, 12.3, 11.4, 10.4, 9.4, 8.4, 7.4, 6.5, 5.5, 4.5, 3.5, \n 2.5, 1.6, 0.0])\n', (1254, 1418), True, 'import numpy as np\n'), ((1542, 1708), 'numpy.array', 'np.array', (['[50.4, 48.68, 47.39, 46.28, 45.34, 44.21, 43.66, 42.58, 41.39, 41.28, 41.21,\n 41.12, 41.04, 40.93, 40.81, 40.66, 40.93, 40.43, 40.01, 39.38, 38.63]'], {}), '([50.4, 48.68, 47.39, 46.28, 45.34, 44.21, 43.66, 42.58, 41.39, \n 41.28, 41.21, 41.12, 41.04, 40.93, 40.81, 40.66, 40.93, 40.43, 40.01, \n 39.38, 38.63])\n', (1550, 1708), True, 'import numpy as np\n'), ((1809, 1938), 'numpy.array', 'np.array', (['[100.0, 90.1, 80.2, 70.4, 60.5, 50.6, 40.7, 30.9, 21.0, 11.1, 10.1, 9.1, \n 8.1, 7.1, 6.2, 5.2, 4.2, 3.2, 2.2, 1.2, 0]'], {}), '([100.0, 90.1, 80.2, 70.4, 60.5, 50.6, 40.7, 30.9, 21.0, 11.1, 10.1,\n 9.1, 8.1, 7.1, 6.2, 5.2, 4.2, 3.2, 2.2, 1.2, 0])\n', (1817, 1938), True, 'import numpy as np\n'), ((2040, 2246), 'numpy.array', 'np.array', (['[54.6, 54.47, 53.22, 52.16, 51.29, 50.56, 49.65, 49.27, 49.08, 48.66, 48.18,\n 48.04, 47.96, 47.93, 47.91, 47.89, 47.87, 47.84, 47.8, 47.76, 47.67, \n 47.41, 46.85, 46.06, 44.99, 43.89, 41.6]'], {}), '([54.6, 54.47, 53.22, 52.16, 51.29, 50.56, 49.65, 49.27, 49.08, \n 48.66, 48.18, 48.04, 47.96, 47.93, 47.91, 47.89, 47.87, 47.84, 47.8, \n 47.76, 47.67, 47.41, 46.85, 46.06, 44.99, 43.89, 41.6])\n', (2048, 2246), True, 'import numpy as np\n'), ((2372, 2544), 'numpy.array', 'np.array', (['[100.0, 99.3, 89.5, 79.7, 70.0, 60.2, 50.4, 40.6, 30.9, 21.1, 16.3, 15.3, \n 14.3, 13.3, 12.3, 11.4, 10.4, 9.4, 8.4, 7.4, 6.5, 5.5, 4.5, 3.5, 2.5, \n 1.6, 0.0]'], {}), '([100.0, 99.3, 89.5, 79.7, 70.0, 60.2, 50.4, 40.6, 30.9, 21.1, 16.3,\n 15.3, 14.3, 13.3, 12.3, 11.4, 10.4, 9.4, 8.4, 7.4, 6.5, 5.5, 4.5, 3.5, \n 2.5, 1.6, 0.0])\n', (2380, 2544), True, 'import numpy as np\n'), ((3317, 3336), 'toml.load', 'toml.load', (['cfg_file'], {}), '(cfg_file)\n', (3326, 3336), False, 'import toml\n'), ((2746, 2769), 'toml.loads', 'toml.loads', (['toml_string'], {}), '(toml_string)\n', (2756, 2769), False, 'import toml\n'), ((3065, 3089), 'appdirs.AppDirs', 'AppDirs', (['APPNAME', 'AUTHOR'], {}), '(APPNAME, AUTHOR)\n', (3072, 3089), False, 'from appdirs import AppDirs\n'), ((3130, 3150), 'os.path.exists', 'path.exists', (['cfg_dir'], {}), '(cfg_dir)\n', (3141, 3150), False, 'from os import path, makedirs\n'), ((3156, 3173), 'os.makedirs', 'makedirs', (['cfg_dir'], {}), '(cfg_dir)\n', (3164, 3173), False, 'from os import path, makedirs\n'), ((3264, 3285), 'os.path.isfile', 'path.isfile', (['cfg_file'], {}), '(cfg_file)\n', (3275, 3285), False, 'from os import path, makedirs\n'), ((3431, 3450), 'toml.load', 'toml.load', (['cfg_file'], {}), '(cfg_file)\n', (3440, 3450), False, 'import toml\n'), ((2907, 2926), 'toml.dump', 'toml.dump', (['cfg', 'fid'], {}), '(cfg, fid)\n', (2916, 2926), False, 'import toml\n'), ((3013, 3032), 'toml.dump', 'toml.dump', (['cfg', 'fid'], {}), '(cfg, fid)\n', (3022, 3032), False, 'import toml\n'), ((3501, 3549), 'PySimpleGUI.T', 'sg.T', (["type['10S3P']"], {'key': '"""_MODEL_"""', 'visible': 'None'}), "(type['10S3P'], key='_MODEL_', visible=None)\n", (3505, 3549), True, 'import PySimpleGUI as sg\n'), ((3551, 3581), 'PySimpleGUI.Text', 'sg.Text', (['"""Select the batterie"""'], {}), "('Select the batterie')\n", (3558, 3581), True, 'import PySimpleGUI as sg\n'), ((3594, 3727), 'PySimpleGUI.InputCombo', 'sg.InputCombo', (["['10S3P', '13S2P', '12S2P', '12S2Pxlr']"], {'key': '"""_TYPE_"""', 'default_value': "cfg['type']", 'change_submits': '(True)', 'size': '(8, 1)'}), "(['10S3P', '13S2P', '12S2P', '12S2Pxlr'], key='_TYPE_',\n default_value=cfg['type'], change_submits=True, size=(8, 1))\n", (3607, 3727), True, 'import PySimpleGUI as sg\n'), ((3780, 3801), 'PySimpleGUI.T', 'sg.T', (['"""Enter voltage"""'], {}), "('Enter voltage')\n", (3784, 3801), True, 'import PySimpleGUI as sg\n'), ((3803, 3857), 'PySimpleGUI.In', 'sg.In', ([], {'key': '"""_INPUT_"""', 'size': '(8, 1)', 'change_submits': '(True)'}), "(key='_INPUT_', size=(8, 1), change_submits=True)\n", (3808, 3857), True, 'import PySimpleGUI as sg\n'), ((4027, 4043), 'PySimpleGUI.T', 'sg.T', (['"""Capacity"""'], {}), "('Capacity')\n", (4031, 4043), True, 'import PySimpleGUI as sg\n'), ((4045, 4079), 'PySimpleGUI.In', 'sg.In', ([], {'key': '"""_RESULT_"""', 'size': '(8, 1)'}), "(key='_RESULT_', size=(8, 1))\n", (4050, 4079), True, 'import PySimpleGUI as sg\n'), ((4093, 4122), 'PySimpleGUI.Button', 'sg.Button', (['"""Exit"""'], {'key': '"""Exit"""'}), "('Exit', key='Exit')\n", (4102, 4122), True, 'import PySimpleGUI as sg\n'), ((4135, 4286), 'PySimpleGUI.Window', 'sg.Window', (['f"""Li-Ion capacity calculator v{VERSION} with {GUI}"""'], {'auto_size_text': '(False)', 'default_element_size': '(22, 1)', 'text_justification': '"""right"""'}), "(f'Li-Ion capacity calculator v{VERSION} with {GUI}',\n auto_size_text=False, default_element_size=(22, 1), text_justification=\n 'right')\n", (4144, 4286), True, 'import PySimpleGUI as sg\n'), ((3190, 3229), 'os.path.expandvars', 'path.expandvars', (['f"""{cfg_dir}/{APPNAME}"""'], {}), "(f'{cfg_dir}/{APPNAME}')\n", (3205, 3229), False, 'from os import path, makedirs\n'), ((5461, 5470), 'numpy.max', 'np.max', (['V'], {}), '(V)\n', (5467, 5470), True, 'import numpy as np\n'), ((5421, 5434), 'numpy.abs', 'np.abs', (['(V - u)'], {}), '(V - u)\n', (5427, 5434), True, 'import numpy as np\n'), ((5534, 5543), 'numpy.min', 'np.min', (['V'], {}), '(V)\n', (5540, 5543), True, 'import numpy as np\n')] |
import os
import logging
import datetime
import json
import click
import numpy as np
import joblib
from collections import defaultdict
from rllab.misc import tensor_utils
from rllab.algos.trpo import TRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.website_env import WebsiteEnv
from rllab.envs.normalized_env import normalize
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
# Flask
from flask import Flask
from flask_cors import CORS
from flask import request
from flask import jsonify
logger = logging.getLogger(__name__)
CONFIG = {
'num_actions': 1,
'num_observations': 1,
'snapshot_dir_itr': 0,
'snapshot_dir': '',
'agent_dict': {},
'batch_size': 20,
'batch': [],
'max_bad_updates_before_purge': 60,
'num_bad_updates': 0,
'noise': 0.5,
'picked_correct': [],
'max_bad_buttons_to_update': 5,
'file_to_load_policy': ''
}
home = os.path.expanduser('~')
SNAPSHOT_DIR = os.path.join(home, 'rllab/experiments_{}_{}')
def listify_dict(d):
"""
Recursively convert arrays to lists in a python object
"""
if type(d) is np.ndarray:
return d.tolist()
if isinstance(d, list):
for idx, k in enumerate(d):
d[idx] = listify_dict(k)
return d
if isinstance(d, dict):
for k in d.keys():
d[k] = listify_dict(d[k])
return d
def arrayify_dict(d):
"""
Assume dict is made of values that are lists, that need to be
converted to arrays
"""
for k in d.keys():
d[k] = np.array(d[k])
def validate_action_dim(action):
"""
Validate that the observations are properly formatted
"""
if not action:
return False
if len(action) == CONFIG['num_actions']:
return True
return False
def validate_obs_dim(obs):
"""
Validate that the observations are properly formatted
"""
if not obs:
return False
if len(obs) == CONFIG['num_observations']:
return True
return False
def make_path(r, obs, action, agent_info):
path = dict()
path["rewards"] = tensor_utils.stack_tensor_list(r)
path["observations"] = tensor_utils.stack_tensor_list(obs)
path["actions"] = tensor_utils.stack_tensor_list(action)
path["env_infos"] = {}
path["agent_infos"] = tensor_utils.stack_tensor_dict_list(agent_info)
return path
def update_policy(obs, action, agent_info, reward):
"""
Update the policy
"""
if len(np.array(obs).shape) == 1:
obs = np.array([obs])
else:
obs = np.array(obs)
if len(np.array(action).shape) == 1:
action = np.array([action])
else:
action = np.array(action)
if isinstance(agent_info, list):
[arrayify_dict(a) for a in agent_info]
else:
arrayify_dict(agent_info)
agent_info = [agent_info]
r = np.array(reward)
path = make_path(r, obs, action, agent_info)
update_in_batch(path)
def update_in_batch(path):
"""
update the policy in batch (asynchronously if possible)
"""
CONFIG['batch'].append(path)
if len(CONFIG['batch']) > CONFIG['batch_size']:
algo = CONFIG['agent_dict']['agent']
good_update = algo.train_from_single_sample(
CONFIG['batch'], log_dir=CONFIG['snapshot_dir'])
if not good_update:
CONFIG['num_bad_updates'] += 1
if CONFIG['num_bad_updates'] > CONFIG['max_bad_updates_before_purge']:
print('Did max bad updates, purging batch data')
CONFIG['batch'] = []
return
print('Update step was bad, skipping, but saving data')
return
mean_reward = np.mean(CONFIG['picked_correct'])
print('Mean reward at itr {} is {}'.format(
CONFIG['snapshot_dir_itr'], mean_reward))
f_str = '{}\t{}\n'.format(CONFIG['snapshot_dir_itr'], mean_reward)
with open(CONFIG['reward_file'], 'a') as f:
f.write(f_str)
CONFIG['snapshot_dir_itr'] += 1
if len(CONFIG['picked_correct']) >= 5 * CONFIG['batch_size']:
CONFIG['picked_correct'] = CONFIG['picked_correct'][-4 * CONFIG['batch_size']:]
print('Saving data to ', CONFIG['data_file'])
f_str = ''
for path in CONFIG['batch']:
path = listify_dict(path)
f_str += json.dumps(path) + '\n'
with open(CONFIG['data_file'], 'a') as f:
f.write(f_str)
CONFIG['batch'] = []
def init_agent(num_obs=1, num_actions=1, network_name='', hidden_sizes=(32, 32)):
msg = 'initing network with num_actions={}, num_obs={}, network_name={}, hidden_sizes={}'
print(msg.format(num_actions, num_obs, network_name, hidden_sizes))
# set the snapshot dir stuff
datetime_str = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
CONFIG['snapshot_dir'] = SNAPSHOT_DIR.format(
network_name, datetime_str)
os.makedirs(CONFIG['snapshot_dir'], exist_ok=True)
print('Making new network on ', datetime_str,
'with {} actions and {} obs'.format(num_actions, num_obs))
# create data file
file = 'data{}_act{}_obs{}.json'.format(
CONFIG['snapshot_dir_itr'], num_actions, num_obs)
CONFIG['data_file'] = os.path.join(CONFIG['snapshot_dir'], file)
file = open(CONFIG['data_file'], 'w')
file.close()
print('Made data file at ', CONFIG['data_file'])
CONFIG['snapshot_dir_itr'] += 1
# make reward monitoring file
CONFIG['reward_file'] = os.path.join(CONFIG['snapshot_dir'], 'reward_monitoring.txt')
CONFIG['num_actions'] = num_actions
CONFIG['num_observations'] = num_obs
# Create the env and agent
env = WebsiteEnv(num_actions=num_actions, action_bounds=[1.] * num_actions,
num_observations=num_obs,
observation_bound=1.)
env = normalize(env)
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=hidden_sizes,
adaptive_std=True
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
discount=0.99,
step_size=0.01,
optimizer_args={"accept_violation": False, "cg_iters": 20},
# center_adv=False, # we will scale rewards appropriately
)
algo.init_opt()
CONFIG['agent_dict']['env'] = env
CONFIG['agent_dict']['agent'] = algo
CONFIG['agent_dict']['policy'] = algo.policy
# train from a single samples to load the graph
obs = env.observation_space.sample()
action, agent_info = algo.policy.get_action(obs)
path = make_path(np.array([1.]), [obs], [action], [agent_info])
algo.train_from_single_sample([path])
def init_agent_from_file():
filename = CONFIG['file_to_load_policy']
print('loading model from file', filename)
params = joblib.load(filename)
CONFIG['agent_dict']['env'] = params['env']
CONFIG['agent_dict']['agent'] = params['algo']
CONFIG['agent_dict']['policy'] = params['policy']
CONFIG['snapshot_dir_itr'] = params['itr'] + 1
CONFIG['snapshot_dir'] = os.path.dirname(filename)
CONFIG['reward_file'] = os.path.join(CONFIG['snapshot_dir'], 'reward_monitoring.txt')
CONFIG['num_actions'] = params['env'].action_dim
CONFIG['num_observations'] = params['env'].observation_space.shape[0]
# create data file
file = 'data{}_act{}_obs{}.json'.format(
CONFIG['snapshot_dir_itr'], CONFIG['num_actions'],
CONFIG['num_observations'])
CONFIG['data_file'] = os.path.join(CONFIG['snapshot_dir'], file)
file = open(CONFIG['data_file'], 'w')
file.close()
def mse(x, y):
return np.mean((x - y)**2)
def add_noise_to_action_array(x, noise):
# orig_x = np.copy(x)
x = np.copy(x)
noise = abs(noise)
noise = min(1., noise)
noise = max(1e-8, noise)
# get the new mean button to sample from
# for each dimension, go left or right, whichever is valid
for idx, xx in enumerate(x):
if xx + noise > 1:
x[idx] = xx - noise
elif xx - noise < -1:
x[idx] = xx + noise
else:
if np.random.rand(1) > .5:
x[idx] = xx - noise
else:
x[idx] = xx + noise
# give the mean point, generate a random point close to it, with the std being `noise` / 2.
x_prime = np.random.multivariate_normal(x, np.diag([noise / 2.] * len(x)))
x_prime = np.clip(x_prime, -1, 1)
# any -1, or 1 values should be moved back into the space by some random value
edge_interval = noise * 0.1
x_prime[x_prime == 1] = np.random.uniform(1 - edge_interval, 1., len(x_prime[x_prime == 1]))
x_prime[x_prime == -1] = np.random.uniform(-1, -1 + edge_interval, len(x_prime[x_prime == -1]))
# d = np.mean((x_prime - orig_x)**2)
return x_prime
"""
FLASK APP
"""
def configure_app(flask_app):
CORS(flask_app)
if CONFIG['file_to_load_policy'] == '':
init_agent()
else:
init_agent_from_file()
# Needs to be external for gunicorn
app = Flask(__name__)
configure_app(app)
@app.route('/')
def health():
return jsonify({'status': 'ok'}), 200
@app.route('/init_networks', methods=['POST'])
def init_networks():
req = request.get_json()
num_actions = req.get('num_actions')
num_obs = req.get('num_observations')
network_name = req.get('network_name', 'default')
hidden_sizes = req.get('hidden_sizes', (32, 32))
if not isinstance(network_name, str):
msg = 'network_name is not a string: {}'.format(network_name)
return jsonify({'status': msg}), 400
if not isinstance(hidden_sizes, list) and\
not isinstance(hidden_sizes, tuple):
msg = 'hidden_sizes is not a tuple: {}'.format(hidden_sizes)
return jsonify({'status': msg}), 400
init_agent(num_obs, num_actions, network_name, hidden_sizes)
return jsonify({'status': 'ok'}), 200
@app.route('/get_multi_action', methods=['POST'])
def get_multiple_actions():
"""
Get multiple actions by feeding each action as the next observation
"""
req = request.get_json()
obs = req.get('observation')
noise = req.get('noise', CONFIG['noise'])
n_bads = req.get('n_bads', 1)
n_actions = req.get('n_actions', 1)
if not validate_obs_dim(obs):
return jsonify({'status': 'invalid input obs'}), 400
# get n_actions by feeding in each subsequent action as the next observation
observations = []
agent_infos = []
next_obs = obs.copy()
good_dict = defaultdict(list)
for n in range(n_actions):
observations.append(list(next_obs))
action, agent_info = CONFIG['agent_dict']['policy'].get_action(next_obs)
agent_info = listify_dict(agent_info)
agent_infos.append(agent_info)
action = np.clip(action, -1, 1)
next_obs = np.copy(action)
good_dict['actions'].append(list(action))
good_dict['agent_info'] = agent_infos
good_dict['action'] = list(np.concatenate(good_dict['actions']))
good_dict['observations'] = observations
# format bad response
action = good_dict['action']
bad_button_list = []
for i in range(n_bads):
bad_action = add_noise_to_action_array(action, noise)
bad_dict = dict()
bad_dict['action'] = list(bad_action)
bad_dict['agent_info'] = agent_infos
bad_dict['observations'] = observations
bad_dict['actions'] = bad_action.reshape(n_actions, len(obs)).tolist()
bad_button_list.append(bad_dict)
resp = {
'good': good_dict,
'bad': bad_button_list
}
return jsonify(resp), 200
@app.route('/get_action', methods=['POST'])
def get_action():
"""
Get a single action
"""
req = request.get_json()
obs = req.get('observation')
noise = req.get('noise', CONFIG['noise'])
n_bads = req.get('n_bads', 1)
if not validate_obs_dim(obs):
return jsonify({'status': 'invalid input obs'}), 400
action, agent_info = CONFIG['agent_dict']['policy'].get_action(obs)
agent_info = listify_dict(agent_info)
action = np.clip(action, -1, 1)
# format response
good_dict = dict()
good_dict['action'] = list(action)
good_dict['actions'] = list(action)
good_dict['agent_info'] = agent_info
good_dict['observations'] = [obs]
# format bad response
bad_button_list = []
for i in range(n_bads):
bad_action = add_noise_to_action_array(action, noise)
bad_dict = dict()
bad_dict['action'] = list(bad_action)
bad_dict['actions'] = list(bad_action)
bad_dict['agent_info'] = agent_info
bad_dict['observations'] = [obs]
bad_button_list.append(bad_dict)
resp = {
'good': good_dict,
'bad': bad_button_list
}
return jsonify(resp), 200
@app.route('/get_random_obs', methods=['GET'])
def get_random_obs():
obs = CONFIG['agent_dict']['env'].observation_space.sample()
return jsonify({'observation': list(obs)}), 200
@app.route('/update_policy_from_game', methods=['POST'])
def update_policy_from_game():
"""
Update the policy from a sequence of actions taken
"""
req = request.get_json()
button_list = req.get('button_list')
# get the button that was picked
good_button = list(filter(lambda x: x['isGood'], button_list))[0]
other_buttons = filter(lambda x: not x['isGood'], button_list)
picked_correct = 0
if good_button['picked']:
picked_correct = 1
# record whether the user picked the button we predicted given the context
CONFIG['picked_correct'].append(picked_correct)
picked_button = None
unpicked_buttons = []
for b in other_buttons:
if b['picked'] is True:
picked_button = b
else:
unpicked_buttons.append(b)
if good_button['picked']:
picked_button = good_button
if picked_button is None:
return 'no button was picked', 400
num_steps = len(picked_button['observations'])
if good_button['picked']:
# reward the good button since it was picked
mses = map(
lambda x: mse(np.array(x['action']), good_button['action']),
unpicked_buttons)
r = np.mean(list(mses))
update_policy(
good_button['observations'], good_button['actions'],
good_button['agent_info'], [r] * num_steps)
elif not good_button['picked']:
# reward the picked button
mses = map(
lambda x: mse(np.array(x['action']), picked_button['action']),
unpicked_buttons + [good_button])
r = np.mean(list(mses))
print('reward for picked button is ', r)
update_policy(
picked_button['observations'], picked_button['actions'],
picked_button['agent_info'], [r] * num_steps)
# penalize the good button
mses = mse(np.array(picked_button['action']), good_button['action'])
mses *= -1
update_policy(
good_button['observations'], good_button['actions'],
good_button['agent_info'], [mses] * num_steps)
# penalize button that were unpicked and that are maximally far from the button
# that was picked
mses = map(
lambda x: mse(np.array(x['action']), picked_button['action']),
unpicked_buttons)
unpicked_buttons = sorted(
zip(unpicked_buttons, mses),
key=lambda x: x[1],
reverse=True)
mses = list(map(lambda x: x[1], unpicked_buttons))
unpicked_buttons = list(map(lambda x: x[0], unpicked_buttons))
num_unpicked = len(unpicked_buttons)
num_to_update = min(num_unpicked, CONFIG['max_bad_buttons_to_update'])
for idx, u in enumerate(unpicked_buttons[:num_to_update]):
update_policy(
u['observations'], u['actions'],
u['agent_info'], [-mses[idx]] * num_steps)
return jsonify({'status': 'ok'}), 200
@click.command()
@click.option('--server-port', default=8080, help='server port')
def start_app(server_port):
logger.info('Starting on ', server_port)
app.run(port=server_port)
if __name__ == '__main__':
start_app()
| [
"flask_cors.CORS",
"rllab.policies.gaussian_mlp_policy.GaussianMLPPolicy",
"click.option",
"rllab.baselines.linear_feature_baseline.LinearFeatureBaseline",
"rllab.algos.trpo.TRPO",
"numpy.clip",
"collections.defaultdict",
"json.dumps",
"flask.jsonify",
"numpy.mean",
"os.path.join",
"rllab.misc... | [((563, 590), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (580, 590), False, 'import logging\n'), ((949, 972), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (967, 972), False, 'import os\n'), ((988, 1033), 'os.path.join', 'os.path.join', (['home', '"""rllab/experiments_{}_{}"""'], {}), "(home, 'rllab/experiments_{}_{}')\n", (1000, 1033), False, 'import os\n'), ((9158, 9173), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (9163, 9173), False, 'from flask import Flask\n'), ((16066, 16081), 'click.command', 'click.command', ([], {}), '()\n', (16079, 16081), False, 'import click\n'), ((16083, 16146), 'click.option', 'click.option', (['"""--server-port"""'], {'default': '(8080)', 'help': '"""server port"""'}), "('--server-port', default=8080, help='server port')\n", (16095, 16146), False, 'import click\n'), ((2141, 2174), 'rllab.misc.tensor_utils.stack_tensor_list', 'tensor_utils.stack_tensor_list', (['r'], {}), '(r)\n', (2171, 2174), False, 'from rllab.misc import tensor_utils\n'), ((2202, 2237), 'rllab.misc.tensor_utils.stack_tensor_list', 'tensor_utils.stack_tensor_list', (['obs'], {}), '(obs)\n', (2232, 2237), False, 'from rllab.misc import tensor_utils\n'), ((2260, 2298), 'rllab.misc.tensor_utils.stack_tensor_list', 'tensor_utils.stack_tensor_list', (['action'], {}), '(action)\n', (2290, 2298), False, 'from rllab.misc import tensor_utils\n'), ((2352, 2399), 'rllab.misc.tensor_utils.stack_tensor_dict_list', 'tensor_utils.stack_tensor_dict_list', (['agent_info'], {}), '(agent_info)\n', (2387, 2399), False, 'from rllab.misc import tensor_utils\n'), ((2907, 2923), 'numpy.array', 'np.array', (['reward'], {}), '(reward)\n', (2915, 2923), True, 'import numpy as np\n'), ((4989, 5039), 'os.makedirs', 'os.makedirs', (["CONFIG['snapshot_dir']"], {'exist_ok': '(True)'}), "(CONFIG['snapshot_dir'], exist_ok=True)\n", (5000, 5039), False, 'import os\n'), ((5312, 5354), 'os.path.join', 'os.path.join', (["CONFIG['snapshot_dir']", 'file'], {}), "(CONFIG['snapshot_dir'], file)\n", (5324, 5354), False, 'import os\n'), ((5566, 5627), 'os.path.join', 'os.path.join', (["CONFIG['snapshot_dir']", '"""reward_monitoring.txt"""'], {}), "(CONFIG['snapshot_dir'], 'reward_monitoring.txt')\n", (5578, 5627), False, 'import os\n'), ((5752, 5875), 'rllab.envs.website_env.WebsiteEnv', 'WebsiteEnv', ([], {'num_actions': 'num_actions', 'action_bounds': '([1.0] * num_actions)', 'num_observations': 'num_obs', 'observation_bound': '(1.0)'}), '(num_actions=num_actions, action_bounds=[1.0] * num_actions,\n num_observations=num_obs, observation_bound=1.0)\n', (5762, 5875), False, 'from rllab.envs.website_env import WebsiteEnv\n'), ((5922, 5936), 'rllab.envs.normalized_env.normalize', 'normalize', (['env'], {}), '(env)\n', (5931, 5936), False, 'from rllab.envs.normalized_env import normalize\n'), ((5951, 6037), 'rllab.policies.gaussian_mlp_policy.GaussianMLPPolicy', 'GaussianMLPPolicy', ([], {'env_spec': 'env.spec', 'hidden_sizes': 'hidden_sizes', 'adaptive_std': '(True)'}), '(env_spec=env.spec, hidden_sizes=hidden_sizes,\n adaptive_std=True)\n', (5968, 6037), False, 'from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy\n'), ((6080, 6120), 'rllab.baselines.linear_feature_baseline.LinearFeatureBaseline', 'LinearFeatureBaseline', ([], {'env_spec': 'env.spec'}), '(env_spec=env.spec)\n', (6101, 6120), False, 'from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline\n'), ((6133, 6276), 'rllab.algos.trpo.TRPO', 'TRPO', ([], {'env': 'env', 'policy': 'policy', 'baseline': 'baseline', 'discount': '(0.99)', 'step_size': '(0.01)', 'optimizer_args': "{'accept_violation': False, 'cg_iters': 20}"}), "(env=env, policy=policy, baseline=baseline, discount=0.99, step_size=\n 0.01, optimizer_args={'accept_violation': False, 'cg_iters': 20})\n", (6137, 6276), False, 'from rllab.algos.trpo import TRPO\n'), ((6935, 6956), 'joblib.load', 'joblib.load', (['filename'], {}), '(filename)\n', (6946, 6956), False, 'import joblib\n'), ((7190, 7215), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (7205, 7215), False, 'import os\n'), ((7244, 7305), 'os.path.join', 'os.path.join', (["CONFIG['snapshot_dir']", '"""reward_monitoring.txt"""'], {}), "(CONFIG['snapshot_dir'], 'reward_monitoring.txt')\n", (7256, 7305), False, 'import os\n'), ((7623, 7665), 'os.path.join', 'os.path.join', (["CONFIG['snapshot_dir']", 'file'], {}), "(CONFIG['snapshot_dir'], file)\n", (7635, 7665), False, 'import os\n'), ((7753, 7774), 'numpy.mean', 'np.mean', (['((x - y) ** 2)'], {}), '((x - y) ** 2)\n', (7760, 7774), True, 'import numpy as np\n'), ((7850, 7860), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (7857, 7860), True, 'import numpy as np\n'), ((8536, 8559), 'numpy.clip', 'np.clip', (['x_prime', '(-1)', '(1)'], {}), '(x_prime, -1, 1)\n', (8543, 8559), True, 'import numpy as np\n'), ((8991, 9006), 'flask_cors.CORS', 'CORS', (['flask_app'], {}), '(flask_app)\n', (8995, 9006), False, 'from flask_cors import CORS\n'), ((9347, 9365), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (9363, 9365), False, 'from flask import request\n'), ((10211, 10229), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (10227, 10229), False, 'from flask import request\n'), ((10650, 10667), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10661, 10667), False, 'from collections import defaultdict\n'), ((11872, 11890), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (11888, 11890), False, 'from flask import request\n'), ((12232, 12254), 'numpy.clip', 'np.clip', (['action', '(-1)', '(1)'], {}), '(action, -1, 1)\n', (12239, 12254), True, 'import numpy as np\n'), ((13317, 13335), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (13333, 13335), False, 'from flask import request\n'), ((1584, 1598), 'numpy.array', 'np.array', (['d[k]'], {}), '(d[k])\n', (1592, 1598), True, 'import numpy as np\n'), ((2560, 2575), 'numpy.array', 'np.array', (['[obs]'], {}), '([obs])\n', (2568, 2575), True, 'import numpy as np\n'), ((2600, 2613), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (2608, 2613), True, 'import numpy as np\n'), ((2673, 2691), 'numpy.array', 'np.array', (['[action]'], {}), '([action])\n', (2681, 2691), True, 'import numpy as np\n'), ((2719, 2735), 'numpy.array', 'np.array', (['action'], {}), '(action)\n', (2727, 2735), True, 'import numpy as np\n'), ((3742, 3775), 'numpy.mean', 'np.mean', (["CONFIG['picked_correct']"], {}), "(CONFIG['picked_correct'])\n", (3749, 3775), True, 'import numpy as np\n'), ((6711, 6726), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (6719, 6726), True, 'import numpy as np\n'), ((9236, 9261), 'flask.jsonify', 'jsonify', (["{'status': 'ok'}"], {}), "({'status': 'ok'})\n", (9243, 9261), False, 'from flask import jsonify\n'), ((10002, 10027), 'flask.jsonify', 'jsonify', (["{'status': 'ok'}"], {}), "({'status': 'ok'})\n", (10009, 10027), False, 'from flask import jsonify\n'), ((10926, 10948), 'numpy.clip', 'np.clip', (['action', '(-1)', '(1)'], {}), '(action, -1, 1)\n', (10933, 10948), True, 'import numpy as np\n'), ((10968, 10983), 'numpy.copy', 'np.copy', (['action'], {}), '(action)\n', (10975, 10983), True, 'import numpy as np\n'), ((11107, 11143), 'numpy.concatenate', 'np.concatenate', (["good_dict['actions']"], {}), "(good_dict['actions'])\n", (11121, 11143), True, 'import numpy as np\n'), ((11739, 11752), 'flask.jsonify', 'jsonify', (['resp'], {}), '(resp)\n', (11746, 11752), False, 'from flask import jsonify\n'), ((12939, 12952), 'flask.jsonify', 'jsonify', (['resp'], {}), '(resp)\n', (12946, 12952), False, 'from flask import jsonify\n'), ((16032, 16057), 'flask.jsonify', 'jsonify', (["{'status': 'ok'}"], {}), "({'status': 'ok'})\n", (16039, 16057), False, 'from flask import jsonify\n'), ((4844, 4867), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4865, 4867), False, 'import datetime\n'), ((9684, 9708), 'flask.jsonify', 'jsonify', (["{'status': msg}"], {}), "({'status': msg})\n", (9691, 9708), False, 'from flask import jsonify\n'), ((9895, 9919), 'flask.jsonify', 'jsonify', (["{'status': msg}"], {}), "({'status': msg})\n", (9902, 9919), False, 'from flask import jsonify\n'), ((10437, 10477), 'flask.jsonify', 'jsonify', (["{'status': 'invalid input obs'}"], {}), "({'status': 'invalid input obs'})\n", (10444, 10477), False, 'from flask import jsonify\n'), ((12058, 12098), 'flask.jsonify', 'jsonify', (["{'status': 'invalid input obs'}"], {}), "({'status': 'invalid input obs'})\n", (12065, 12098), False, 'from flask import jsonify\n'), ((2519, 2532), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (2527, 2532), True, 'import numpy as np\n'), ((2626, 2642), 'numpy.array', 'np.array', (['action'], {}), '(action)\n', (2634, 2642), True, 'import numpy as np\n'), ((4408, 4424), 'json.dumps', 'json.dumps', (['path'], {}), '(path)\n', (4418, 4424), False, 'import json\n'), ((15033, 15066), 'numpy.array', 'np.array', (["picked_button['action']"], {}), "(picked_button['action'])\n", (15041, 15066), True, 'import numpy as np\n'), ((15402, 15423), 'numpy.array', 'np.array', (["x['action']"], {}), "(x['action'])\n", (15410, 15423), True, 'import numpy as np\n'), ((8232, 8249), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (8246, 8249), True, 'import numpy as np\n'), ((14281, 14302), 'numpy.array', 'np.array', (["x['action']"], {}), "(x['action'])\n", (14289, 14302), True, 'import numpy as np\n'), ((14652, 14673), 'numpy.array', 'np.array', (["x['action']"], {}), "(x['action'])\n", (14660, 14673), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from hotspots.hs_io import HotspotReader
from hotspots.grid_extension import Grid
from hotspots.protein_extension import Protein, centroid
from hotspots.wrapper_pymol import PyMOLCommands, PyMOLFile
from ccdc import io
class TestBindingSiteFromGrid(unittest.TestCase):
def setUp(self):
self.prot = Protein.from_file("testdata/result/binding_site.pdb")
self.grid = Grid.from_file("testdata/result/molA.grd")
def test_centroid(self):
cent = centroid(np.array([a.coordinates for a in self.prot.residues[0].atoms]))
expected = (1.889772727272726, 2.9687272727272713, -21.237954545454546)
self.assertAlmostEqual(cent[0], expected[0])
self.assertAlmostEqual(cent[1], expected[1])
self.assertAlmostEqual(cent[2], expected[2])
def test_detect_from_grid(self):
bs = Protein.BindingSiteFromGrid._detect_from_grid(self.prot, self.grid, 4)
self.assertEqual(18, len(bs))
# for visual inspection
f = PyMOLFile()
f.commands += PyMOLCommands.load("binding_site.pdb", "abc")
for res in bs:
f.commands += PyMOLCommands.select("sele", f'resi {res.identifier.split(":")[1][3:]}')
f.commands += PyMOLCommands.show("sticks", "sele")
f.write("testdata/protein_extension/test_bindingsitefromgrid.py")
def test_BindingSiteFromGrid(self):
bs = Protein.BindingSiteFromGrid(self.prot, self.grid, within=6)
print(len(bs.residues))
self.assertEqual(28, len(bs.residues))
f = PyMOLFile()
f.commands += PyMOLCommands.load("binding_site.pdb", "abc")
with io.MoleculeWriter("testdata/protein_extension/binding_site.pdb") as w:
w.write(bs.protein)
for res in bs.residues:
f.commands += PyMOLCommands.select("sele", f'resi {res.identifier.split(":")[1][3:]}')
f.commands += PyMOLCommands.show("sticks", "sele")
f.write("testdata/protein_extension/test_bindingsitefromgrid.py")
| [
"hotspots.wrapper_pymol.PyMOLFile",
"hotspots.grid_extension.Grid.from_file",
"hotspots.protein_extension.Protein.BindingSiteFromGrid._detect_from_grid",
"ccdc.io.MoleculeWriter",
"hotspots.wrapper_pymol.PyMOLCommands.load",
"hotspots.protein_extension.Protein.from_file",
"numpy.array",
"hotspots.wrap... | [((356, 409), 'hotspots.protein_extension.Protein.from_file', 'Protein.from_file', (['"""testdata/result/binding_site.pdb"""'], {}), "('testdata/result/binding_site.pdb')\n", (373, 409), False, 'from hotspots.protein_extension import Protein, centroid\n'), ((431, 473), 'hotspots.grid_extension.Grid.from_file', 'Grid.from_file', (['"""testdata/result/molA.grd"""'], {}), "('testdata/result/molA.grd')\n", (445, 473), False, 'from hotspots.grid_extension import Grid\n'), ((894, 964), 'hotspots.protein_extension.Protein.BindingSiteFromGrid._detect_from_grid', 'Protein.BindingSiteFromGrid._detect_from_grid', (['self.prot', 'self.grid', '(4)'], {}), '(self.prot, self.grid, 4)\n', (939, 964), False, 'from hotspots.protein_extension import Protein, centroid\n'), ((1052, 1063), 'hotspots.wrapper_pymol.PyMOLFile', 'PyMOLFile', ([], {}), '()\n', (1061, 1063), False, 'from hotspots.wrapper_pymol import PyMOLCommands, PyMOLFile\n'), ((1087, 1132), 'hotspots.wrapper_pymol.PyMOLCommands.load', 'PyMOLCommands.load', (['"""binding_site.pdb"""', '"""abc"""'], {}), "('binding_site.pdb', 'abc')\n", (1105, 1132), False, 'from hotspots.wrapper_pymol import PyMOLCommands, PyMOLFile\n'), ((1459, 1518), 'hotspots.protein_extension.Protein.BindingSiteFromGrid', 'Protein.BindingSiteFromGrid', (['self.prot', 'self.grid'], {'within': '(6)'}), '(self.prot, self.grid, within=6)\n', (1486, 1518), False, 'from hotspots.protein_extension import Protein, centroid\n'), ((1615, 1626), 'hotspots.wrapper_pymol.PyMOLFile', 'PyMOLFile', ([], {}), '()\n', (1624, 1626), False, 'from hotspots.wrapper_pymol import PyMOLCommands, PyMOLFile\n'), ((1650, 1695), 'hotspots.wrapper_pymol.PyMOLCommands.load', 'PyMOLCommands.load', (['"""binding_site.pdb"""', '"""abc"""'], {}), "('binding_site.pdb', 'abc')\n", (1668, 1695), False, 'from hotspots.wrapper_pymol import PyMOLCommands, PyMOLFile\n'), ((531, 593), 'numpy.array', 'np.array', (['[a.coordinates for a in self.prot.residues[0].atoms]'], {}), '([a.coordinates for a in self.prot.residues[0].atoms])\n', (539, 593), True, 'import numpy as np\n'), ((1286, 1322), 'hotspots.wrapper_pymol.PyMOLCommands.show', 'PyMOLCommands.show', (['"""sticks"""', '"""sele"""'], {}), "('sticks', 'sele')\n", (1304, 1322), False, 'from hotspots.wrapper_pymol import PyMOLCommands, PyMOLFile\n'), ((1712, 1776), 'ccdc.io.MoleculeWriter', 'io.MoleculeWriter', (['"""testdata/protein_extension/binding_site.pdb"""'], {}), "('testdata/protein_extension/binding_site.pdb')\n", (1729, 1776), False, 'from ccdc import io\n'), ((1978, 2014), 'hotspots.wrapper_pymol.PyMOLCommands.show', 'PyMOLCommands.show', (['"""sticks"""', '"""sele"""'], {}), "('sticks', 'sele')\n", (1996, 2014), False, 'from hotspots.wrapper_pymol import PyMOLCommands, PyMOLFile\n')] |
import re
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
color = sns.color_palette('deep') + sns.color_palette('bright')
def FeatureSort(feature_name, group=np.array(()), group_name=[], value=[], store_path='',
is_sort=True, is_show=True, fig=plt.figure()):
'''
Draw the plot of the sorted feature, an option is to draw different color according to the group and group value.
:param feature_name: The name of the features
:param group: the array to map the feature name to the group name, default is zeros
:param group_name: the group name list to denote the group. default is []
:param value: The value of each feature_name. Default is []
:param store_path: The store path, supporting .jpeg and .tif
:param is_sort: Boolen, to sort the features according to value. Default is True
:return:
Apr-29-18, <NAME> [<EMAIL>]
'''
if group.size == 0 and group_name == []:
group = np.zeros((len(feature_name), ), dtype=np.uint8)
group_name = ['']
if value == []:
value = [len(feature_name) - index for index in range(len(feature_name))]
else:
value = np.abs(np.squeeze(value))
if is_sort:
sort_index = sorted(range(len(value)), key=lambda k: value[k], reverse=True)
value = [value[index] for index in sort_index]
feature_name = [feature_name[index] for index in sort_index]
group = [group[index] for index in sort_index]
assert(np.max(group) + 1 == len(group_name))
sub_group = np.zeros((len(group_name), len(feature_name)))
for index in range(len(feature_name)):
sub_group[group[index], index] = value[index]
y = range(len(feature_name))
fig.clear()
ax = fig.add_subplot(111)
for index in range(sub_group.shape[0]):
ax.barh(y, sub_group[index, :], color=color[index])
ax.set_yticks(range(len(feature_name)))
ax.set_yticklabels(feature_name)
ax.set_xticks([])
if len(group_name) > 1:
ax.legend(group_name)
if store_path:
fig.set_tight_layout(True)
if store_path[-3:] == 'jpg':
fig.savefig(store_path, dpi=300, format='jpeg')
elif store_path[-3:] == 'eps':
fig.savefig(store_path, dpi=1200, format='eps')
if is_show:
fig.show()
return ax
def ShortFeatureFullName(feature_full_name):
if len(feature_full_name) <= 5:
return feature_full_name
sub = re.findall("[A-Z]", feature_full_name)
if len(sub) == 1:
return feature_full_name[:5]
elif len(sub) == 0:
return feature_full_name[:5]
else:
return ''.join(sub)
def SeperateRadiomicsFeatures(feature_name):
'''
Generate the feature name, group, and group cound according to radiomics features.
:param feature_name: The generated radiomis featues. which should including sequence_name, image_class,
feature_class, and the feature_name, like T2_origin_glszm_ZonePercentage
:return:
Apr-29-18 <NAME> [<EMAIL>]
'''
sub_feature_name = []
group = []
group_name = []
seq_group = []
seq_count = 0
image_class_group = []
image_class_count = 0
feature_class_group = []
feature_class_count = 0
for feature in feature_name:
sep = feature.split('_')
if len(sep) == 2:
sep = [sep[0], '', '', sep[1]]
seq = sep[0]
image_class = sep[1]
feature_class = sep[-2]
if not seq in seq_group:
seq_count += 1
seq_group.append(seq)
if not image_class in image_class_group:
image_class_count += 1
image_class_group.append(image_class)
if not feature_class in feature_class_group:
feature_class_count += 1
feature_class_group.append(feature_class)
sub_feature_name.append(ShortFeatureFullName(sep[-1]))
if seq_count == 1:
seq_group = ['' for index in range(len(feature_name))]
if image_class_count == 1:
image_class_group = ['' for index in range(len(feature_name))]
if feature_class_count == 1:
feature_class_group = ['' for index in range(len(feature_name))]
group_count = 0
for index in range(len(feature_name)):
temp_name = seq_group[index] + '-' + image_class_group[index] + '-' + feature_class_group[index]
if not temp_name in group_name:
group_name.append(temp_name)
group.append(group_count)
group_count += 1
else:
group.append(group_name.index(temp_name))
return sub_feature_name, np.asarray(group, dtype=np.uint8), group_name
def SortRadiomicsFeature(feature_name, value=[], store_path='', is_show=False, fig=plt.figure()):
sub_feature_name, group, group_name = SeperateRadiomicsFeatures(feature_name)
FeatureSort(sub_feature_name, group, group_name, value, store_path, is_show=is_show, fig=fig)
def GeneralFeatureSort(feature_name, value=[], store_path='', is_sort=True, max_num=-1, is_show=True, fig=plt.figure()):
if not isinstance(value, list):
value = list(value)
if value == []:
value = [len(feature_name) - index for index in range(len(feature_name))]
if is_sort:
sort_index = sorted(range(len(value)), key=lambda k: value[k], reverse=True)
value = [value[index] for index in sort_index]
feature_name = [feature_name[index] for index in sort_index]
if max_num > 0:
value = value[:max_num]
feature_name = feature_name[:max_num]
fig.clear()
# margin = 0.2
left, bottom, width, height = 0.75, 0.1, 0.2, 0.8
ax = fig.add_axes([left, bottom, width, height])
# ax = fig.add_subplot(111)
ax.barh(range(len(feature_name)), value, color=color[0])
ax.set_yticks(range(len(feature_name)))
ax.set_yticklabels(feature_name)
ax.set_xticks([])
if store_path:
fig.set_tight_layout(True)
if store_path[-3:] == 'jpg':
plt.savefig(store_path, dpi=300, format='jpeg')
elif store_path[-3:] == 'eps':
plt.savefig(store_path, dpi=1200, format='eps')
if is_show:
fig.show()
return ax
if __name__ == '__main__':
# feature_name = ['DE', 'SAE', 'SZNUM', 'JE', 'Id']
feature_name = ['10Per', 'Autoc', 'IR', 'GLV']
value = [72.9, 45.4, 45.2, 41]
group = np.array([1, 3, 2, 0])
group_name = ['ADC--firstorder', 'DWI--firstorder', 'DWI--glcm', 'DWI--glszm']
# group = [0, 1, 1, 0, 0]
# group_name = ['GLCM', 'GLSZM']
# value = 0.1, 0.5, 0.9, 0.2, 0.1
FeatureSort(feature_name, group, group_name, value, is_show=True)
# import pandas as pd
# df = pd.read_csv(r'C:\Users\yangs\Desktop\anova_sort.csv', index_col=0)
# feature_name = list(df.index)
# value = list(df['F'])
# new_feature_name = [ShortFeatureFullName(index) for index in feature_name]
# GeneralFeatureSort(new_feature_name, value, max_num=4, is_show=True, store_path=r'D:\MyDocs\Document\研究生\毕业\毕业论文\图\组学\ANOVA_sort.jpg')
# SortRadiomicsFeature(new_feature_name, value, is_show=True)
| [
"numpy.asarray",
"matplotlib.pyplot.figure",
"re.findall",
"numpy.array",
"numpy.max",
"seaborn.color_palette",
"numpy.squeeze",
"matplotlib.pyplot.savefig"
] | [((92, 117), 'seaborn.color_palette', 'sns.color_palette', (['"""deep"""'], {}), "('deep')\n", (109, 117), True, 'import seaborn as sns\n'), ((120, 147), 'seaborn.color_palette', 'sns.color_palette', (['"""bright"""'], {}), "('bright')\n", (137, 147), True, 'import seaborn as sns\n'), ((186, 198), 'numpy.array', 'np.array', (['()'], {}), '(())\n', (194, 198), True, 'import numpy as np\n'), ((288, 300), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (298, 300), True, 'import matplotlib.pyplot as plt\n'), ((2479, 2517), 're.findall', 're.findall', (['"""[A-Z]"""', 'feature_full_name'], {}), "('[A-Z]', feature_full_name)\n", (2489, 2517), False, 'import re\n'), ((4749, 4761), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4759, 4761), True, 'import matplotlib.pyplot as plt\n'), ((5051, 5063), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5061, 5063), True, 'import matplotlib.pyplot as plt\n'), ((6386, 6408), 'numpy.array', 'np.array', (['[1, 3, 2, 0]'], {}), '([1, 3, 2, 0])\n', (6394, 6408), True, 'import numpy as np\n'), ((4619, 4652), 'numpy.asarray', 'np.asarray', (['group'], {'dtype': 'np.uint8'}), '(group, dtype=np.uint8)\n', (4629, 4652), True, 'import numpy as np\n'), ((1185, 1202), 'numpy.squeeze', 'np.squeeze', (['value'], {}), '(value)\n', (1195, 1202), True, 'import numpy as np\n'), ((1506, 1519), 'numpy.max', 'np.max', (['group'], {}), '(group)\n', (1512, 1519), True, 'import numpy as np\n'), ((6006, 6053), 'matplotlib.pyplot.savefig', 'plt.savefig', (['store_path'], {'dpi': '(300)', 'format': '"""jpeg"""'}), "(store_path, dpi=300, format='jpeg')\n", (6017, 6053), True, 'import matplotlib.pyplot as plt\n'), ((6105, 6152), 'matplotlib.pyplot.savefig', 'plt.savefig', (['store_path'], {'dpi': '(1200)', 'format': '"""eps"""'}), "(store_path, dpi=1200, format='eps')\n", (6116, 6152), True, 'import matplotlib.pyplot as plt\n')] |
'''
Defines class FEEstimator, which uses multigrid and partialing out to solve two way fixed effect models. This includes AKM, the Andrews et al. homoskedastic correction, and the Kline et al. heteroskedastic correction.
'''
import warnings
from pathlib import Path
import pyamg
import numpy as np
import pandas as pd
from bipartitepandas import ParamsDict, logger_init
from scipy.sparse import csc_matrix, coo_matrix, diags, linalg, hstack, eye
import time
# import pyreadr
import os
from multiprocessing import Pool, TimeoutError, Value, set_start_method
from timeit import default_timer as timer
import pickle
import time
import json
import glob, sys
# Try to use tqdm
try:
from tqdm import tqdm, trange
except ImportError:
trange = range
# def pipe_qcov(df, e1, e2): # FIXME I moved this from above, also this is used only in commented out code
# v1 = df.eval(e1)
# v2 = df.eval(e2)
# return np.cov(v1, v2)[0][1]
# NOTE: multiprocessing isn't compatible with lambda functions
def _gteq1(a):
return a >= 1
def _0to1(a):
return 0 <= a <= 1
# Define default parameter dictionary
_fe_params_default = ParamsDict({
'ncore': (1, 'type_constrained', (int, _gteq1),
'''
(default=1) Number of cores to use.
''', '>= 1'),
'weighted': (True, 'type', bool,
'''
(default=True) If True, use weighted estimators.
''', None),
'statsonly': (False, 'type', bool,
'''
(default=False) If True, return only basic statistics.
''', None),
'feonly': (False, 'type', bool,
'''
(default=False) If True, estimate only fixed effects and not variances.
''', None),
'Q': ('cov(alpha, psi)', 'set', ['cov(alpha, psi)', 'cov(psi_t, psi_{t+1})'],
'''
(default='cov(alpha, psi)') Which Q matrix to consider. Options include 'cov(alpha, psi)' and 'cov(psi_t, psi_{t+1})'.
''', None),
'ndraw_trace': (5, 'type_constrained', (int, _gteq1),
'''
(default=5) Number of draws to use in trace approximations.
''', '>= 1'),
# 'trace_analytical': (False, 'type', bool, # FIXME not used
# '''
# (default=False) If True, estimate trace analytically.
# ''', None)
'he': (False, 'type', bool,
'''
(default=False) If True, estimate heteroskedastic correction.
''', None),
'he_analytical': (False, 'type', bool,
'''
(default=False) If True, estimate heteroskedastic correction using analytical formula; if False, use JL approxmation.
''', None),
'lev_batchsize': (50, 'type_constrained', (int, _gteq1),
'''
(default=50) Number of draws to use for each batch in approximation of leverages for heteroskedastic correction.
''', '>= 1'),
'lev_batchsize_multiprocessing': (10, 'type_constrained', (int, _gteq1),
'''
(default=10) Batch size to send in parallel. Should evenly divide 'lev_batchsize'.
''', '>= 1'),
'lev_nbatches': (5, 'type_constrained', (int, _gteq1),
'''
(default=5) Maximum number of batches to run in approximation of leverages for heteroskedastic correction.
''', '>= 1'),
'lev_threshold_obs': (100, 'type_constrained', (int, _gteq1),
'''
(default=100) Minimum number of observations with Pii >= threshold where batches will keep running in approximation of leverages for heteroskedastic correction. Once this threshold is met, remaining Pii above threshold will be recomputed analytically.
''', '>= 1'),
'lev_threshold_pii': (0.98, 'type_constrained', (float, _0to1),
'''
(default=0.98) Threshold Pii value for computing threshold number of Pii observations in approximation of leverages for heteroskedastic correction.
''', 'in [0, 1]'),
'levfile': ('', 'type', str,
'''
(default='') File to load precomputed leverages for heteroskedastic correction.
''', None),
# 'con': (False, 'type', bool, # FIXME not used
# '''
# (default=False) Computes the smallest eigen values, this is the filepath where these results are saved.
# ''', None),
'out': ('res_fe.json', 'type', str,
'''
(default='res_fe.json') Outputfile where results are saved.
''', None)
})
def fe_params(update_dict={}):
'''
Dictionary of default fe_params.
Arguments:
update_dict (dict): user parameter values
Returns:
(ParamsDict) dictionary of fe_params
'''
new_dict = _fe_params_default.copy()
new_dict.update(update_dict)
return new_dict
def _weighted_quantile(values, quantiles, sample_weight=None, values_sorted=False, old_style=False):
'''
Very close to numpy.percentile, but supports weights.
NOTE: quantiles should be in [0, 1]!
Arguments:
values (NumPy Array): data
quantiles (array-like): quantiles to compute
sample_weight (array-like): weighting, must be same length as `array` (is `array` supposed to be quantiles?)
values_sorted (bool): if True, skips sorting of initial array
old_style (bool): if True, changes output to be consistent with numpy.percentile
Returns:
(NumPy Array): computed quantiles
'''
values = np.array(values)
quantiles = np.array(quantiles)
if sample_weight is None:
sample_weight = np.ones(len(values))
sample_weight = np.array(sample_weight)
assert np.all(quantiles >= 0) and np.all(quantiles <= 1), \
'quantiles should be in [0, 1]'
if not values_sorted:
sorter = np.argsort(values)
values = values[sorter]
sample_weight = sample_weight[sorter]
weighted_quantiles = np.cumsum(sample_weight) - 0.5 * sample_weight
if old_style:
# To be convenient with numpy.percentile
weighted_quantiles -= weighted_quantiles[0]
weighted_quantiles /= weighted_quantiles[-1]
else:
weighted_quantiles /= np.sum(sample_weight)
return np.interp(quantiles, weighted_quantiles, values)
def _weighted_var(v, w):
'''
Compute weighted variance.
Arguments:
v (NumPy Array): vector to weight
w (NumPy Array): weights
Returns:
v0 (NumPy Array): weighted variance
'''
m0 = np.sum(w * v) / np.sum(w)
v0 = np.sum(w * (v - m0) ** 2) / np.sum(w)
return v0
def _weighted_cov(v1, v2, w):
'''
Compute weighted covariance.
Arguments:
v1 (NumPy Array): vector to weight
v2 (NumPy Array): vector to weight
w (NumPy Array): weights
Returns:
v0 (NumPy Array): weighted variance
'''
m1 = np.sum(w * v1) / np.sum(w)
m2 = np.sum(w * v2) / np.sum(w)
v0 = np.sum(w * (v1 - m1) * (v2 - m2)) / np.sum(w)
return v0
class FEEstimator:
'''
Uses multigrid and partialing out to solve two way fixed effect models. This includes AKM, the Andrews et al. homoskedastic correction, and the Kline et al. heteroskedastic correction.
'''
def __init__(self, data, params=fe_params()):
'''
Arguments:
data (BipartitePandas DataFrame): (collapsed) long format labor data. Data contains the following columns:
i (worker id)
j (firm id)
y (compensation)
t (period) if long
t1 (first period of observation) if collapsed long
t2 (last period of observation) if collapsed long
w (weight)
m (0 if stayer, 1 if mover)
params (ParamsDict): dictionary of parameters for FE estimation. Run tw.fe_params().describe_all() for descriptions of all valid parameters.
'''
# Start logger
logger_init(self)
# self.logger.info('initializing FEEstimator object')
self.adata = data
self.params = params
# Results dictionary
self.res = {}
# Summary results dictionary
self.summary = {}
## Save some commonly used parameters as attributes
# Number of cores to use
self.ncore = self.params['ncore']
# Number of draws to compute leverage for heteroskedastic correction
self.lev_batchsize = self.params['lev_batchsize']
# Number of draws to use in trace approximations
self.ndraw_trace = self.params['ndraw_trace']
self.compute_he = self.params['he']
## Store some parameters in results dictionary
self.res['cores'] = self.ncore
self.res['ndp'] = self.lev_batchsize
self.res['ndt'] = self.ndraw_trace
# self.logger.info('FEEstimator object initialized')
def __getstate__(self):
'''
Defines how the model is pickled.
'''
odict = {k: self.__dict__[k] for k in self.__dict__.keys() - {'ml'}}
return odict
def __setstate__(self, d):
'''
Defines how the model is unpickled.
Arguments:
d (dict): attribute dictionary
'''
# Need to recreate the simple model and the search representation
# Make d the attribute dictionary
self.__dict__ = d
self.ml = pyamg.ruge_stuben_solver(self.M)
@staticmethod
def __load(filename):
'''
Load files for heteroskedastic correction.
Arguments:
filename (string): file to load
Returns:
fes: loaded file
'''
fes = None
with open(filename, 'rb') as infile:
fes = pickle.load(infile)
return fes
def __save(self, filename):
'''
Save FEEstimator class to filename as pickle.
Arguments:
filename (string): filename to save to
'''
with open(filename, 'wb') as outfile:
pickle.dump(self, outfile)
def fit(self, rng=np.random.default_rng(None)):
'''
Run FE solver.
Arguments:
rng (np.random.Generator): NumPy random number generator
'''
self.fit_1()
self.construct_Q()
self.fit_2(rng)
def fit_1(self):
'''
Run FE solver, part 1. Before fit_2(), modify adata to allow creation of Q matrix.
'''
self.start_time = time.time()
# Begin cleaning and analysis
self._prep_vars() # Prepare data
self._prep_JWM() # Use cleaned adata to generate some attributes
self._compute_early_stats() # Use cleaned data to compute some statistics
def fit_2(self, rng=np.random.default_rng(None)):
'''
Run FE solver, part 2.
Arguments:
rng (np.random.Generator): NumPy random number generator
'''
if self.params['statsonly']:
# If only returning early statistics
self._save_early_stats()
else:
## If running analysis
# Solve FE model
self._create_fe_solver(rng)
# Add fixed effect columns
self._get_fe_estimates()
if not self.params['feonly']:
## If running full model
# Compute trace approximation
self._compute_trace_approximation_ho(rng)
if self.compute_he:
## If computing heteroskedastic correction
# Solve heteroskedastic model
self._compute_leverages_Pii(rng)
# Compute trace approximation
self._compute_trace_approximation_he(rng)
# Collect all results
self._collect_res()
end_time = time.time()
self.res['total_time'] = end_time - self.start_time
del self.start_time
# Save results to json
self._save_res()
# Drop irrelevant columns
self._drop_cols()
self.logger.info('------ DONE -------')
def _prep_vars(self):
'''
Generate some initial class attributes and results.
'''
self.logger.info('preparing the data')
# self.adata.sort_values(['i', to_list(self.adata.reference_dict['t'])[0]], inplace=True)
# Number of firms
self.nf = self.adata.n_firms()
# Number of workers
self.nw = self.adata.n_workers()
# Number of observations
self.nn = len(self.adata)
self.logger.info('data firms={} workers={} observations={}'.format(self.nf, self.nw, self.nn))
self.res['n_firms'] = self.nf
self.res['n_workers'] = self.nw
self.res['n_movers'] = self.adata.loc[self.adata['m'].to_numpy() > 0, :].n_workers()
self.res['n_stayers'] = self.res['n_workers'] - self.res['n_movers']
self.logger.info('data movers={} stayers={}'.format(self.res['n_movers'], self.res['n_stayers']))
# Generate 'worker_m' indicating whether a worker is a mover or a stayer
self.adata.loc[:, 'worker_m'] = (self.adata.groupby('i')['m'].transform('max') > 0).astype(int, copy=False)
# # Prepare 'cs' column (0 if observation is first for a worker, 1 if intermediate, 2 if last for a worker)
# worker_first_obs = (self.adata['i'].to_numpy() != np.roll(self.adata['i'].to_numpy(), 1))
# worker_last_obs = (self.adata['i'].to_numpy() != np.roll(self.adata['i'].to_numpy(), -1))
# self.adata['cs'] = 1
# self.adata.loc[(worker_first_obs) & ~(worker_last_obs), 'cs'] = 0
# self.adata.loc[(worker_last_obs) & ~(worker_first_obs), 'cs'] = 2
#res['year_max'] = int(sdata['year'].max())
#res['year_min'] = int(sdata['year'].min())
def _prep_JWM(self):
'''
Generate J, W, and M matrices.
'''
### Matrices for the cross-section
## Firms
J = csc_matrix((np.ones(self.nn), (self.adata.index.to_numpy(), self.adata['j'].to_numpy())), shape=(self.nn, self.nf))
# Normalize one firm to 0
J = J[:, range(self.nf - 1)]
self.J = J
## Workers
W = csc_matrix((np.ones(self.nn), (self.adata.index.to_numpy(), self.adata['i'].to_numpy())), shape=(self.nn, self.nw))
self.W = W
if self.params['weighted'] and ('w' in self.adata.columns):
# Diagonal weight matrix
Dp = diags(self.adata['w'].to_numpy())
# Dwinv = diags(1.0 / ((W.T @ Dp @ W).diagonal())) # linalg.inv(csc_matrix(W.T @ Dp @ W))
else:
# Diagonal weight matrix - all weight one
Dp = diags(np.ones(len(self.adata)))
Dwinv = diags(1.0 / ((W.T @ Dp @ W).diagonal()))
self.Dp = Dp
self.Dp_sqrt = np.sqrt(Dp)
self.Dwinv = Dwinv
self.logger.info('Prepare linear solver')
# Finally create M
M = J.T @ Dp @ J - J.T @ Dp @ W @ Dwinv @ W.T @ Dp @ J
self.M = M
self.ml = pyamg.ruge_stuben_solver(M)
# Save time variable
self.last_invert_time = 0
def _compute_early_stats(self):
'''
Compute some early statistics.
'''
fdata = self.adata.groupby('j').agg({'worker_m': 'sum', 'y': 'mean', 'i': 'count'})
fm, fy, fi = fdata.loc[:, 'worker_m'].to_numpy(), fdata.loc[:, 'y'].to_numpy(), fdata.loc[:, 'i'].to_numpy()
ls = np.linspace(0, 1, 11)
self.res['mover_quantiles'] = _weighted_quantile(fm, ls, fi).tolist()
self.res['size_quantiles'] = _weighted_quantile(fi, ls, fi).tolist()
# self.res['movers_per_firm'] = self.adata.loc[self.adata.loc[:, 'm'] > 0, :].groupby('j')['i'].nunique().mean()
self.res['between_firm_var'] = _weighted_var(fy, fi)
self.res['var_y'] = _weighted_var(self.adata.loc[:, 'y'].to_numpy(), self.Dp)
self.logger.info('total variance: {:0.4f}'.format(self.res['var_y']))
# extract woodcock moments using sdata and jdata
# get averages by firms for stayers
#dsf = adata.query('cs==1').groupby('j1').agg(y1sj=('y1','mean'), nsj=('y1','count'))
#ds = pd.merge(adata.query('cs==1'), dsf, on="j1")
#ds.eval("y1s_lo = (nsj * y1sj - y1) / (nsj - 1)",inplace=True)
#res['woodcock_var_psi'] = ds.query('nsj > 1').pipe(pipe_qcov, 'y1', 'y1s_lo')
#res['woodcock_var_alpha'] = np.minimum( jdata.pipe(pipe_qcov, 'y1','y2'), adata.query('cs==1')['y1'].var() - res['woodcock_var_psi'] )
#res['woodcock_var_eps'] = adata.query('cs==1')['y1'].var() - res['woodcock_var_alpha'] - res['woodcock_var_psi']
#self.logger.info("[woodcock] var psi = {}", res['woodcock_var_psi'])
#self.logger.info("[woodcock] var alpha = {}", res['woodcock_var_alpha'])
#self.logger.info("[woodcock] var eps = {}", res['woodcock_var_eps'])
def _save_early_stats(self):
'''
Save the early statistics computed in compute_early_stats().
'''
with open(self.params['out'], 'w') as outfile:
json.dump(self.res, outfile)
self.logger.info('saved results to {}'.format(self.params['out']))
self.logger.info('--statsonly was passed as argument, so we skip all estimation.')
self.logger.info('------ DONE -------')
# sys.exit() # FIXME I don't think this is necessary (does it even work?) since this is now a class object
def construct_Q(self):
'''
Generate columns in adata necessary to construct Q.
'''
if self.params['Q'] == 'cov(alpha, psi)':
# Which rows to select
# self.adata['Jq'] = 1
# self.adata['Wq'] = 1
# Rows for csc_matrix
self.adata.loc[:, 'Jq_row'] = np.arange(self.nn) # self.adata['Jq'].cumsum() - 1
self.adata.loc[:, 'Wq_row'] = np.arange(self.nn) # self.adata['Wq'].cumsum() - 1
# Columns for csc_matrix
self.adata.loc[:, 'Jq_col'] = self.adata.loc[:, 'j']
self.adata.loc[:, 'Wq_col'] = self.adata.loc[:, 'i']
elif self.params['Q'] in ['cov(psi_t, psi_{t+1})', 'cov(psi_i, psi_j)']:
warnings.warn('These Q options are not yet implemented.')
# elif self.params['Q'] == 'cov(psi_t, psi_{t+1})':
# self.adata['Jq'] = (self.adata['worker_m'] > 0) & ((self.adata['cs'] == 0) | (self.adata['cs'] == 1))
# self.adata['Jq_row'] = self.adata['Jq'].cumsum() - 1
# self.adata['Jq_col'] = self.adata['j']
# self.adata['Wq'] = (self.adata['worker_m'] > 0) & ((self.adata['cs'] == 1) | (self.adata['cs'] == 2))
# self.adata['Wq_row'] = self.adata['Wq'].cumsum() - 1
# self.adata['Wq_col'] = self.adata['j']
# elif self.params['Q'] == 'cov(psi_i, psi_j)': # Code doesn't work
# self.adata['Jq'] = (self.adata['worker_m'] > 0) & (self.adata['cs'] == 1)
# self.adata['Jq_row'] = self.adata['j1']
# self.adata['Jq_col'] = self.adata['j1']
# self.adata['Wq'] = (self.adata['worker_m'] > 0) & (self.adata['cs'] == 0)
# # Recall j1, j2 swapped for m==1 and cs==0
# self.adata['Wq_row'] = self.adata['j2']
# self.adata['Wq_col'] = self.adata['j1']
def __construct_Jq_Wq(self):
'''
Construct Jq and Wq matrices.
Returns:
Jq (Pandas DataFrame): left matrix for computing Q
Wq (Pandas DataFrame): right matrix for computing Q
'''
# FIXME this method is irrelevant at the moment
return self.J, self.W
# Construct Jq, Wq matrices
Jq = self.adata[self.adata['Jq'] == 1].reset_index(drop=True)
self.Yq = Jq['y']
nJ = len(Jq)
nJ_row = Jq['Jq_row'].max() + 1 # FIXME len(Jq['Jq_row'].unique())
nJ_col = Jq['Jq_col'].max() + 1 # FIXME len(Jq['Jq_col'].unique())
Jq = csc_matrix((np.ones(nJ), (Jq['Jq_row'], Jq['Jq_col'])), shape=(nJ_row, nJ_col))
if nJ_col == self.nf: # If looking at firms, normalize one to 0
Jq = Jq[:, range(self.nf - 1)]
Wq = self.adata[self.adata['Wq'] == 1].reset_index(drop=True)
nW = len(Wq)
nW_row = Wq['Wq_row'].max() + 1 # FIXME len(Wq['Wq_row'].unique())
nW_col = Wq['Wq_col'].max() + 1 # FIXME len(Wq['Wq_col'].unique())
Wq = csc_matrix((np.ones(nW), (Wq['Wq_row'], Wq['Wq_col'])), shape=(nW_row, nW_col)) # FIXME Should we use nJ because require Jq, Wq to have the same size?
# if nW_col == self.nf: # If looking at firms, normalize one to 0
# Wq = Wq[:, range(self.nf - 1)]
return Jq, Wq
def _create_fe_solver(self, rng=np.random.default_rng(None)):
'''
Solve FE model.
Arguments:
rng (np.random.Generator): NumPy random number generator
'''
self.Y = self.adata.loc[:, 'y'].to_numpy()
# try to pickle the object to see its size
# self.save('tmp.pkl') # FIXME should we delete these 2 lines?
self.logger.info('extract firm effects')
self.psi_hat, self.alpha_hat = self.__solve(self.Y)
self.logger.info('solver time {:2.4f} seconds'.format(self.last_invert_time))
self.logger.info('expected total time {:2.4f} minutes'.format( (self.ndraw_trace * (1 + self.compute_he) + self.lev_batchsize * self.params['lev_nbatches'] * self.compute_he) * self.last_invert_time / 60))
self.E = self.Y - self.__mult_A(self.psi_hat, self.alpha_hat)
self.res['solver_time'] = self.last_invert_time
fe_rsq = 1 - np.power(self.E, 2).mean() / np.power(self.Y, 2).mean()
self.logger.info('fixed effect R-square {:2.4f}'.format(fe_rsq))
# Plug-in variance
self.var_e_pi = np.var(self.E)
if self.params['weighted'] and ('w' in self.adata.columns):
self._compute_trace_approximation_sigma_2(rng)
trace_approximation = np.mean(self.tr_sigma_ho_all)
self.var_e = (self.nn * self.var_e_pi) / (np.sum(1 / self.Dp.data[0]) - trace_approximation)
else:
self.var_e = (self.nn * self.var_e_pi) / (self.nn - (self.nw + self.nf - 1))
self.logger.info('[ho] variance of residuals {:2.4f}'.format(self.var_e))
def _compute_trace_approximation_ho(self, rng=np.random.default_rng(None)):
'''
Compute weighted HO trace approximation for arbitrary Q.
Arguments:
rng (np.random.Generator): NumPy random number generator
'''
self.logger.info('Starting plug-in estimation')
Jq, Wq = self.__construct_Jq_Wq()
# Compute plug-in (biased) estimators
self.tot_var = np.var(self.Y)
if 'w' in self.adata.columns:
self.logger.info('[weighted fe]')
self.var_fe = _weighted_var(Jq * self.psi_hat, self.Dp)
self.cov_fe = _weighted_cov(Jq * self.psi_hat, Wq * self.alpha_hat, self.Dp)
else:
self.logger.info('[fe]')
# Set ddof=0 is necessary, otherwise takes 1 / (N - 1) by default instead of 1 / N
vcv = np.cov(Jq * self.psi_hat, Wq * self.alpha_hat, ddof=0)
self.var_fe = vcv[0, 0]
self.cov_fe = vcv[0, 1]
self.logger.info('var_psi={:2.4f}'.format(self.var_fe))
self.logger.info('cov={:2.4f} tot={:2.4f}'.format(self.cov_fe, self.tot_var))
##### Start full Trace without collapse operator ######
# # Begin trace approximation
# self.tr_var_ho_all = np.zeros(self.ndraw_trace)
# self.tr_cov_ho_all = np.zeros(self.ndraw_trace)
# for r in trange(self.ndraw_trace):
# # Generate -1 or 1 - in this case length nn
# Z = 2 * rng.binomial(1, 0.5, self.nn) - 1
# # Compute either side of the Trace
# R_psi, R_alpha = self.__solve(Z)
# # Applying the Qcov and Qpsi implied by Jq and Wq
# Rq_psi = Jq @ R_psi
# Rq_alpha = Wq @ R_alpha
# self.tr_var_ho_all[r] = np.cov(Rq_psi, Rq_psi)[0][1]
# self.tr_cov_ho_all[r] = np.cov(Rq_psi, Rq_alpha)[0][1]
# self.logger.debug('FE [traces] step {}/{} done.'.format(r, self.ndraw_trace))
##### End full Trace without collapse operator ######
self.logger.info('Starting weighted homoskedastic trace correction ndraws={}, using {} cores'.format(self.ndraw_trace, self.ncore))
# Begin trace approximation
self.tr_var_ho_all = np.zeros(self.ndraw_trace)
self.tr_cov_ho_all = np.zeros(self.ndraw_trace)
for r in trange(self.ndraw_trace):
# Generate -1 or 1
Zpsi = 2 * rng.binomial(1, 0.5, self.nf - 1) - 1
Zalpha = 2 * rng.binomial(1, 0.5, self.nw) - 1
R1 = Jq @ Zpsi
psi1, alpha1 = self.__mult_AAinv(Zpsi, Zalpha)
# Trace correction - var(psi)
R2_psi = Jq @ psi1
self.tr_var_ho_all[r] = np.cov(R1, R2_psi)[0][1]
# Trace correction - cov(psi, alpha)
R2_alpha = Wq @ alpha1
self.tr_cov_ho_all[r] = np.cov(R1, R2_alpha)[0][1]
self.logger.debug('homoskedastic [traces] step {}/{} done.'.format(r, self.ndraw_trace))
# def __compute_trace_approximation_fe(self, rng=np.random.default_rng(None)):
# '''
# Compute FE trace approximation for arbitrary Q.
# Arguments:
# rng (np.random.Generator): NumPy random number generator
# '''
# self.logger.info('Starting FE trace correction ndraws={}, using {} cores'.format(self.ndraw_trace, self.ncore))
# Jq, Wq = self.__construct_Jq_Wq()
# # Compute some stats
# # FIXME Need to figure out when this section can be run
# self.tot_var = np.var(self.Y)
# self.logger.info('[fe]')
# try:
# # print('psi', self.psi_hat)
# self.var_fe = np.var(Jq * self.psi_hat)
# self.logger.info('var_psi={:2.4f}'.format(self.var_fe))
# except ValueError: # If dimension mismatch
# pass
# try:
# self.cov_fe = np.cov(Jq * self.psi_hat, Wq * self.alpha_hat)[0][1]
# self.logger.info('cov={:2.4f} tot={:2.4f}'.format(self.cov_fe, self.tot_var))
# except ValueError: # If dimension mismatch
# pass
# # FIXME Section ends here
# # Begin trace approximation
# self.tr_var_ho_all = np.zeros(self.ndraw_trace)
# self.tr_cov_ho_all = np.zeros(self.ndraw_trace)
# for r in trange(self.ndraw_trace):
# # Generate -1 or 1
# Zpsi = 2 * rng.binomial(1, 0.5, self.nf - 1) - 1
# Zalpha = 2 * rng.binomial(1, 0.5, self.nw) - 1
# R1 = Jq * Zpsi
# psi1, alpha1 = self.__mult_AAinv(Zpsi, Zalpha)
# try:
# R2_psi = Jq * psi1
# # Trace correction
# self.tr_var_ho_all[r] = np.cov(R1, R2_psi)[0][1]
# except ValueError: # If dimension mismatch
# try:
# del self.tr_var_ho_all
# except AttributeError: # Once deleted
# pass
# try:
# R2_alpha = Wq * alpha1
# # Trace correction
# self.tr_cov_ho_all[r] = np.cov(R1, R2_alpha)[0][1]
# except ValueError: # If dimension mismatch
# try:
# del self.tr_cov_ho_all
# except AttributeError: # Once deleted
# pass
# self.logger.debug('FE [traces] step {}/{} done.'.format(r, self.ndraw_trace))
# def compute_trace_approximation_fe(self, rng=np.random.default_rng(None)):
# '''
# Purpose:
# Compute FE trace approximation.
# Arguments:
# rng (np.random.Generator): NumPy random number generator
# '''
# self.logger.info('Starting FE trace correction ndraws={}, using {} cores'.format(self.ndraw_trace, self.ncore))
# self.tr_var_ho_all = np.zeros(self.ndraw_trace)
# self.tr_cov_ho_all = np.zeros(self.ndraw_trace)
# for r in trange(self.ndraw_trace):
# # Generate -1 or 1
# Zpsi = 2 * rng.binomial(1, 0.5, self.nf - 1) - 1
# Zalpha = 2 * rng.binomial(1, 0.5, self.nw) - 1
# R1 = self.Jq * Zpsi
# psi1, alpha1 = self.__mult_AAinv(Zpsi, Zalpha)
# R2_psi = self.Jq * psi1
# R2_alpha = self.Wq * alpha1
# # Trace corrections
# self.tr_var_ho_all[r] = np.cov(R1, R2_psi)[0][1]
# self.tr_cov_ho_all[r] = np.cov(R1, R2_alpha)[0][1]
# self.logger.debug('FE [traces] step {}/{} done.'.format(r, self.ndraw_trace))
# def compute_trace_approximation_j1j2(self):
# '''
# Purpose:
# covariance between psi before and after the move among movers
# '''
# self.logger.info('Starting FE trace correction ndraws={}, using {} cores'.format(self.ndraw_trace, self.ncore))
# self.tr_var_ho_all = np.zeros(self.ndraw_trace)
# for r in trange(self.ndraw_trace):
# # Generate -1 or 1
# Zpsi = 2 * rng.binomial(1, 0.5, self.nf - 1) - 1
# Zalpha = 2 * rng.binomial(1, 0.5, self.nw) - 1
# R1 = self.J1 * Zpsi
# psi1, _ = self.__mult_AAinv(Zpsi, Zalpha)
# R2_psi = self.J2 * psi1
# # Trace corrections
# self.tr_var_ho_all[r] = np.cov(R1, R2_psi)[0][1]
# self.logger.debug('FE [traces] step {}/{} done.'.format(r, self.ndraw_trace))
def _compute_trace_approximation_he(self, rng=np.random.default_rng(None)):
'''
Compute heteroskedastic trace approximation.
Arguments:
rng (np.random.Generator): NumPy random number generator
'''
self.logger.info('Starting heteroskedastic trace correction ndraws={}, using {} cores'.format(self.ndraw_trace, self.ncore))
self.tr_var_he_all = np.zeros(self.ndraw_trace)
self.tr_cov_he_all = np.zeros(self.ndraw_trace)
Jq, Wq = self.__construct_Jq_Wq()
for r in trange(self.ndraw_trace):
# Generate -1 or 1
Zpsi = 2 * rng.binomial(1, 0.5, self.nf - 1) - 1
Zalpha = 2 * rng.binomial(1, 0.5, self.nw) - 1
psi1, alpha1 = self.__mult_AAinv(Zpsi, Zalpha)
R2_psi = Jq * psi1
R2_alpha = Wq * alpha1
psi2, alpha2 = self.__mult_AAinv(*self.__mult_Atranspose(self.Sii * self.__mult_A(Zpsi, Zalpha, weighted=True)))
R3_psi = Jq * psi2
# Trace corrections
self.tr_var_he_all[r] = np.cov(R2_psi, R3_psi)[0][1]
self.tr_cov_he_all[r] = np.cov(R2_alpha, R3_psi)[0][1]
self.logger.debug('heteroskedastic [traces] step {}/{} done.'.format(r, self.ndraw_trace))
def _compute_trace_approximation_sigma_2(self, rng=np.random.default_rng(None)):
'''
Compute weighted sigma^2 trace approximation.
Solving Tr[A'A(A'DA)^{-1}] = Tr[A(A'DA)^{-1}A']. This is for the case where E[epsilon epsilon'|A] = sigma^2 * D^{-1}.
Commented out, complex case: solving Tr[A(A'DA)^{-1}A'DD'A(A'DA)^{-1}A'] = Tr[D'A(A'DA)^{-1}A'A(A'DA)^{-1}A'D] by multiplying the right half by Z, then transposing that to get the left half.
Arguments:
rng (np.random.Generator): NumPy random number generator
'''
self.logger.info('Starting weighted sigma^2 trace correction ndraws={}, using {} cores'.format(self.ndraw_trace, self.ncore))
# Begin trace approximation
self.tr_sigma_ho_all = np.zeros(self.ndraw_trace)
for r in trange(self.ndraw_trace):
# Generate -1 or 1 - in this case length nn
Z = 2 * rng.binomial(1, 0.5, self.nn) - 1
# Compute Trace
R_psi, R_alpha = self.__solve(Z, Dp2=False)
R_y = self.__mult_A(R_psi, R_alpha)
self.tr_sigma_ho_all[r] = Z.T @ R_y
# Trace when not using collapse operator:
# # Compute either side of the Trace
# R_y = self.__proj(Z)
# self.tr_sigma_ho_all[r] = np.sum(R_y ** 2)
self.logger.debug('sigma^2 [traces] step {}/{} done.'.format(r, self.ndraw_trace))
def _collect_res(self):
'''
Collect all results.
'''
self.res['tot_var'] = self.tot_var
self.res['eps_var_ho'] = self.var_e
self.res['eps_var_fe'] = np.var(self.E)
# self.res['var_y'] = _weighted_var(self.Yq, self.Dp)
## FE results ##
# Plug-in variance
self.res['var_fe'] = self.var_fe
self.logger.info('[ho] VAR fe={:2.4f}'.format(self.var_fe))
# Plug-in covariance
self.logger.info('[ho] COV fe={:2.4f}'.format(self.cov_fe))
self.res['cov_fe'] = self.cov_fe
## Homoskedastic results ##
# Trace approximation: variance
self.res['tr_var_ho'] = np.mean(self.tr_var_ho_all)
self.logger.info('[ho] VAR tr={:2.4f} (sd={:2.4e})'.format(self.res['tr_var_ho'], np.std(self.tr_var_ho_all)))
# Trace approximation: covariance
self.res['tr_cov_ho'] = np.mean(self.tr_cov_ho_all)
self.logger.info('[ho] COV tr={:2.4f} (sd={:2.4e})'.format(self.res['tr_cov_ho'], np.std(self.tr_cov_ho_all)))
# Bias-corrected variance
self.res['var_ho'] = self.var_fe - self.var_e * self.res['tr_var_ho']
self.logger.info('[ho] VAR bc={:2.4f}'.format(self.res['var_ho']))
# Bias-corrected covariance
self.res['cov_ho'] = self.cov_fe - self.var_e * self.res['tr_cov_ho']
self.logger.info('[ho] COV bc={:2.4f}'.format(self.res['cov_ho']))
for res in ['var_y', 'var_fe', 'cov_fe', 'var_ho', 'cov_ho']:
self.summary[res] = self.res[res]
## Heteroskedastic results ##
if self.compute_he:
## Already computed, this just reorders the dictionary
self.res['eps_var_he'] = self.res['eps_var_he']
self.res['min_lev'] = self.res['min_lev']
self.res['max_lev'] = self.res['max_lev']
## New results
self.res['tr_var_he'] = np.mean(self.tr_var_he_all)
self.res['tr_cov_he'] = np.mean(self.tr_cov_he_all)
self.res['tr_var_ho_sd'] = np.std(self.tr_var_ho_all)
self.res['tr_cov_ho_sd'] = np.std(self.tr_cov_ho_all)
self.res['tr_var_he_sd'] = np.std(self.tr_var_he_all)
self.res['tr_cov_he_sd'] = np.std(self.tr_cov_he_all)
self.logger.info('[he] VAR tr={:2.4f} (sd={:2.4e})'.format(self.res['tr_var_he'], np.std(self.tr_var_he_all)))
self.logger.info('[he] COV tr={:2.4f} (sd={:2.4e})'.format(self.res['tr_cov_he'], np.std(self.tr_cov_he_all)))
# ----- FINAL ------
self.res['var_he'] = self.var_fe - self.res['tr_var_he']
self.logger.info('[he] VAR fe={:2.4f} bc={:2.4f}'.format(self.var_fe, self.res['var_he']))
self.res['cov_he'] = self.cov_fe - self.res['tr_cov_he']
self.logger.info('[he] COV fe={:2.4f} bc={:2.4f}'.format(self.cov_fe, self.res['cov_he']))
for res in ['var_he', 'cov_he']:
self.summary[res] = self.res[res]
def _save_res(self):
'''
Save results as json.
'''
# Convert results into strings to prevent JSON errors
for key, val in self.res.items():
self.res[key] = str(val)
with open(self.params['out'], 'w') as outfile:
json.dump(self.res, outfile)
self.logger.info('saved results to {}'.format(self.params['out']))
def _get_fe_estimates(self):
'''
Add the estimated psi_hats and alpha_hats to the dataframe.
'''
j_vals = np.arange(self.nf)
i_vals = np.arange(self.nw)
# Add 0 for normalized firm
psi_hat_dict = dict(zip(j_vals, np.concatenate([self.psi_hat, np.array([0])])))
alpha_hat_dict = dict(zip(i_vals, self.alpha_hat))
# Attach columns
self.adata.loc[:, 'psi_hat'] = self.adata.loc[:, 'j'].map(psi_hat_dict)
self.adata.loc[:, 'alpha_hat'] = self.adata.loc[:, 'i'].map(alpha_hat_dict)
def __solve(self, Y, Dp1=True, Dp2=True):
'''
Compute (A' * Dp1 * A)^{-1} * A' * Dp2 * Y, the least squares estimate of Y = A * [psi_hat' alpha_hat']', where A = [J W] (J is firm indicators and W is worker indicators) and Dp gives weights.
Arguments:
Y (NumPy Array): wage data
Dp1 (bool): if True, include first weight
Dp2 (bool or str): if True, include second weight; if 'sqrt', use square root of weights
Returns:
(tuple of CSC Matrices): (estimated firm fixed effects, estimated worker fixed effects)
'''
# This gives A' * Dp2 * Y
J_transpose_Y, W_transpose_Y = self.__mult_Atranspose(Y, Dp2)
# This gives (A' * Dp1 * A)^{-1} * A' * Dp2 * Y
psi_hat, alpha_hat = self.__mult_AAinv(J_transpose_Y, W_transpose_Y, Dp1)
return psi_hat, alpha_hat
def __mult_A(self, psi, alpha, weighted=False):
'''
Computes Dp * A * [psi' alpha']', where A = [J W] (J is firm indicators and W is worker indicators) and Dp gives weights (used, for example, to compute estimated outcomes and sample errors).
Arguments:
psi (NumPy Array): firm part to multiply
alpha (NumPy Array): worker part to multiply
weighted (bool or str): if True, include weights; if 'sqrt', use square root of weights
Returns:
(CSC Matrix): result of Dp * A * [psi' alpha']'
'''
if weighted:
if weighted == 'sqrt':
return self.Dp_sqrt @ (self.J @ psi + self.W @ alpha)
return self.Dp @ (self.J @ psi + self.W @ alpha)
return self.J @ psi + self.W @ alpha
def __mult_Atranspose(self, v, weighted=True):
'''
Computes A' * Dp * v, where A = [J W] (J is firm indicators and W is worker indicators) and Dp gives weights.
Arguments:
v (NumPy Array): what to multiply by
weighted (bool or str): if True, include weights; if 'sqrt', use square root of weights
Returns:
(tuple of CSC Matrices): (firm part of result, worker part of result)
'''
if weighted:
if weighted == 'sqrt':
return self.J.T @ self.Dp_sqrt @ v, self.W.T @ self.Dp_sqrt @ v
return self.J.T @ self.Dp @ v, self.W.T @ self.Dp @ v
return self.J.T @ v, self.W.T @ v
def __mult_AAinv(self, psi, alpha, weighted=True):
'''
Computes (A' * Dp * A)^{-1} * [psi' alpha']', where A = [J W] (J is firm indicators and W is worker indicators) and Dp gives weights.
Arguments:
psi (NumPy Array): firm part to multiply
alpha (NumPy Array): worker part to multiply
weighted (bool): if True, include weights
Returns:
(tuple of NumPy Arrays): (firm part of result, worker part of result)
'''
start = timer()
if weighted:
psi_out = self.ml.solve(psi - self.J.T * (self.Dp * (self.W * (self.Dwinv * alpha))), tol=1e-10)
self.last_invert_time = timer() - start
alpha_out = - self.Dwinv * (self.W.T * (self.Dp * (self.J * psi_out))) + self.Dwinv * alpha
else:
psi_out = self.ml.solve(psi - self.J.T * (self.W * (self.Dwinv * alpha)), tol=1e-10)
self.last_invert_time = timer() - start
alpha_out = - self.Dwinv * (self.W.T * (self.J * psi_out)) + self.Dwinv * alpha
return psi_out, alpha_out
def __proj(self, Y, Dp0=False, Dp1=True, Dp2=True):
'''
Compute Dp0 * A * (A' * Dp1 * A)^{-1} * A' * Dp2 * Y, where A = [J W] (J is firm indicators and W is worker indicators) and Dp gives weights (essentially projects Y onto A space).
Solve Y, then project onto X space of data stored in the object. Essentially solves A(A'A)^{-1}A'Y
Arguments:
Y (NumPy Array): wage data
Dp0 (bool or str): if True, include weights in __mult_A(); if 'sqrt', use square root of weights
Dp1 (bool): if True, include first weight in __solve()
Dp2 (bool or str): if True, include second weight in __solve(); if 'sqrt', use square root of weights
Returns:
(CSC Matrix): result of Dp0 * A * (A' * Dp1 * A)^{-1} * A' * Dp2 * Y (essentially the projection of Y onto A space)
'''
return self.__mult_A(*self.__solve(Y, Dp1, Dp2), Dp0)
def __construct_M_inv(self):
'''
Construct (A' * Dp * A)^{-1} block matrix components where M^{-1} is computed explicitly.
'''
# Define variables
J = self.J
W = self.W
Minv = np.linalg.inv(self.M.todense())
Dp = self.Dp
Dwinv = self.Dwinv
# Construct blocks
self.AA_inv_A = Minv
self.AA_inv_B = - Minv @ J.T @ Dp @ W @ Dwinv
self.AA_inv_C = - Dwinv @ W.T @ Dp @ J @ Minv
self.AA_inv_D = Dwinv + self.AA_inv_C @ self.M @ self.AA_inv_B # Dwinv @ (eye(nw) + W.T @ Dp @ J @ M @ J.T @ Dp @ W @ Dwinv)
def __construct_AAinv_components(self):
'''
Construct (A' * Dp * A)^{-1} block matrix components. Use this for computing a small number of individual Pii.
'''
# Define variables
J = self.J
W = self.W
Dp = self.Dp
Dwinv = self.Dwinv
# Construct blocks
self.AA_inv_A = None
self.AA_inv_B = J.T @ Dp @ W @ Dwinv
self.AA_inv_C = - Dwinv @ W.T @ Dp @ J
self.AA_inv_D = None
def __compute_Pii(self, DpJ_i, DpW_i):
'''
Compute Pii for a single observation for heteroskedastic correction.
Arguments:
DpJ_i (NumPy Array): weighted J matrix
DpW_i (NumPy Array): weighted W matrix
Returns:
(float): estimate for Pii
'''
if self.AA_inv_A is not None:
# M^{-1} has been explicitly computed
A = DpJ_i @ self.AA_inv_A @ DpJ_i
B = DpJ_i @ self.AA_inv_B @ DpW_i
C = DpW_i @ self.AA_inv_C @ DpJ_i
D = DpW_i @ self.AA_inv_D @ DpW_i
else:
# M^{-1} has not been explicitly computed
M_DpJ_i = self.ml.solve(DpJ_i)
M_B = self.ml.solve(self.AA_inv_B @ DpW_i)
# Construct blocks
A = DpJ_i @ M_DpJ_i
B = - DpJ_i @ M_B
C = DpW_i @ self.AA_inv_C @ M_DpJ_i
D = DpW_i @ (self.Dwinv @ DpW_i - self.AA_inv_C @ M_B)
return A + B + C + D
def _compute_leverages_Pii(self, rng=np.random.default_rng(None)):
'''
Compute leverages for heteroskedastic correction.
Arguments:
rng (np.random.Generator): NumPy random number generator
'''
Pii = np.zeros(self.nn)
# Indices to compute Pii analytically
analytical_indices = []
worker_m = (self.adata.loc[:, 'worker_m'].to_numpy() > 0)
if self.params['he_analytical']:
analytical_indices = self.adata.loc[worker_m, :].index
self.__construct_M_inv()
else:
if len(self.params['levfile']) > 1:
self.logger.info('[he] starting heteroskedastic correction, loading precomputed files')
files = glob.glob('{}*'.format(self.params['levfile']))
self.logger.info('[he] found {} files to get leverages from'.format(len(files)))
self.res['lev_file_count'] = len(files)
assert len(files) > 0, "Didn't find any leverage files!"
for f in files:
pp = np.load(f)
Pii += pp / len(files)
else:
self.logger.info('[he] starting heteroskedastic correction lev_batchsize={}, lev_nbatches={}, using {} cores'.format(self.params['lev_batchsize'], self.params['lev_nbatches'], self.ncore))
for batch_i in range(self.params['lev_nbatches']):
if self.ncore > 1:
# Multiprocessing
ndraw_seeds = self.lev_batchsize // self.params['lev_batchsize_multiprocessing']
if np.round(ndraw_seeds * self.params['lev_batchsize_multiprocessing']) != self.lev_batchsize:
# 'lev_batchsize_multiprocessing' must evenly divide 'lev_batchsize'
raise ValueError("'lev_batchsize_multiprocessing' (currently {}) should evenly divide 'lev_batchsize' (currently {}).".format(self.params['lev_batchsize_multiprocessing'], self.lev_batchsize))
# Multiprocessing rng source: https://albertcthomas.github.io/good-practices-random-number-generators/
seeds = rng.bit_generator._seed_seq.spawn(ndraw_seeds)
set_start_method('spawn')
with Pool(processes=self.ncore) as pool:
Pii_all = pool.starmap(self._leverage_approx, [(self.params['lev_batchsize_multiprocessing'], np.random.default_rng(seed)) for seed in seeds])
# Take mean over draws
Pii_i = sum(Pii_all) / len(Pii_all)
else:
# Single core
Pii_i = self._leverage_approx(self.lev_batchsize, rng)
# Take weighted average over all Pii draws
Pii = (batch_i * Pii + Pii_i) / (batch_i + 1)
# Compute number of bad draws
n_bad_draws = sum(worker_m & (Pii >= self.params['lev_threshold_pii']))
# If few enough bad draws, compute them analytically
if n_bad_draws < self.params['lev_threshold_obs']:
leverage_warning = 'Threshold for max Pii is {}, with {} draw(s) per batch and a maximum of {} batch(es) being drawn. There is/are {} observation(s) with Pii above this threshold. These will be recomputed analytically. It took {} batch(es) to get below the threshold of {} bad observations.'.format(self.params['lev_threshold_pii'], self.lev_batchsize, self.params['lev_nbatches'], n_bad_draws, batch_i + 1, self.params['lev_threshold_obs'])
warnings.warn(leverage_warning)
break
elif batch_i == self.params['lev_nbatches'] - 1:
leverage_warning = 'Threshold for max Pii is {}, with {} draw(s) per batch and a maximum of {} batch(es) being drawn. After exhausting the maximum number of batches, there is/are still {} draw(s) with Pii above this threshold. These will be recomputed analytically.'.format(self.params['lev_threshold_pii'], self.lev_batchsize, self.params['lev_nbatches'], n_bad_draws)
warnings.warn(leverage_warning)
# Compute Pii analytically for observations with Pii approximation above threshold value
analytical_indices = self.adata.loc[worker_m & (Pii >= self.params['lev_threshold_pii']), :].index
if len(analytical_indices) > 0:
self.__construct_AAinv_components()
# Compute analytical Pii
if len(analytical_indices) > 0:
# Construct weighted J and W
DpJ = np.asarray((self.Dp_sqrt @ self.J).todense())
DpW = np.asarray((self.Dp_sqrt @ self.W).todense())
for i in analytical_indices:
DpJ_i = DpJ[i, :]
DpW_i = DpW[i, :]
Pii[i] = self.__compute_Pii(DpJ_i, DpW_i)
self.res['min_lev'] = Pii[worker_m].min()
self.res['max_lev'] = Pii[worker_m].max()
if self.res['max_lev'] >= 1:
leverage_warning = "Max P_ii is {} which is >= 1. This means your data is not leave-one-observation-out connected. The HE estimator requires leave-one-observation-out connected data to work properly. When cleaning your data, please set clean_params['connectedness'] = 'leave_one_observation_out' to correct this.".format(self.res['max_lev'])
self.logger.info(leverage_warning)
raise ValueError(leverage_warning)
# self.adata['Pii'] = Pii
# self.adata.to_feather('pii_data.ftr')
# raise NotImplementedError
self.logger.info('[he] Leverage range {:2.4f} to {:2.4f}'.format(self.res['min_lev'], self.res['max_lev']))
# print('Observation with max leverage:', self.adata[self.adata['Pii'] == self.res['max_lev']])
## Give stayers the variance estimate at the firm level ##
# Temporarily set Pii = 0 for stayers to avoid divide-by-zero warning
Pii[~worker_m] = 0
# Compute Sii for movers
self.adata.loc[:, 'Sii'] = self.Y * self.E / (1 - Pii)
# Link firms to average Sii of movers
S_j = self.adata.loc[worker_m, :].groupby('j')['Sii'].mean().to_dict()
Sii_j = self.adata.loc[:, 'j'].map(S_j)
self.Sii = np.where(worker_m, self.adata.loc[:, 'Sii'], Sii_j)
# No longer need Sii column
self.adata.drop('Sii', axis=1, inplace=True)
self.res['eps_var_he'] = self.Sii.mean()
self.logger.info('[he] variance of residuals in heteroskedastic case: {:2.4f}'.format(self.res['eps_var_he']))
def _leverage_approx(self, ndraw_pii, rng=np.random.default_rng(None)):
'''
Draw Pii estimates for use in JL approximation of leverage.
Arguments:
ndraw_pii (int): number of Pii draws
rng (np.random.Generator): NumPy random number generator
Returns:
Pii (NumPy Array): Pii array
'''
Pii = np.zeros(self.nn)
# Compute the different draws
for _ in range(ndraw_pii):
R2 = 2 * rng.binomial(1, 0.5, self.nn) - 1
Pii += np.power(self.__proj(R2, Dp0='sqrt', Dp2='sqrt'), 2.0)
# Take mean over draws
Pii /= ndraw_pii
self.logger.info('done with batch')
return Pii
def _drop_cols(self):
'''
Drop irrelevant columns (['worker_m', 'Jq', 'Wq', 'Jq_row', 'Wq_row', 'Jq_col', 'Wq_col']).
'''
for col in ['worker_m', 'Jq', 'Wq', 'Jq_row', 'Wq_row', 'Jq_col', 'Wq_col']:
if col in self.adata.columns:
self.adata.drop(col, axis=1, inplace=True)
| [
"pickle.dump",
"numpy.load",
"numpy.sum",
"multiprocessing.set_start_method",
"numpy.ones",
"pyamg.ruge_stuben_solver",
"numpy.argsort",
"numpy.random.default_rng",
"numpy.mean",
"numpy.arange",
"pickle.load",
"numpy.interp",
"numpy.round",
"bipartitepandas.logger_init",
"numpy.std",
"... | [((1134, 3990), 'bipartitepandas.ParamsDict', 'ParamsDict', (['{\'ncore\': (1, \'type_constrained\', (int, _gteq1),\n """\n (default=1) Number of cores to use.\n """, \'>= 1\'\n ), \'weighted\': (True, \'type\', bool,\n """\n (default=True) If True, use weighted estimators.\n """\n , None), \'statsonly\': (False, \'type\', bool,\n """\n (default=False) If True, return only basic statistics.\n """\n , None), \'feonly\': (False, \'type\', bool,\n """\n (default=False) If True, estimate only fixed effects and not variances.\n """\n , None), \'Q\': (\'cov(alpha, psi)\', \'set\', [\'cov(alpha, psi)\',\n \'cov(psi_t, psi_{t+1})\'],\n """\n (default=\'cov(alpha, psi)\') Which Q matrix to consider. Options include \'cov(alpha, psi)\' and \'cov(psi_t, psi_{t+1})\'.\n """\n , None), \'ndraw_trace\': (5, \'type_constrained\', (int, _gteq1),\n """\n (default=5) Number of draws to use in trace approximations.\n """\n , \'>= 1\'), \'he\': (False, \'type\', bool,\n """\n (default=False) If True, estimate heteroskedastic correction.\n """\n , None), \'he_analytical\': (False, \'type\', bool,\n """\n (default=False) If True, estimate heteroskedastic correction using analytical formula; if False, use JL approxmation.\n """\n , None), \'lev_batchsize\': (50, \'type_constrained\', (int, _gteq1),\n """\n (default=50) Number of draws to use for each batch in approximation of leverages for heteroskedastic correction.\n """\n , \'>= 1\'), \'lev_batchsize_multiprocessing\': (10, \'type_constrained\', (\n int, _gteq1),\n """\n (default=10) Batch size to send in parallel. Should evenly divide \'lev_batchsize\'.\n """\n , \'>= 1\'), \'lev_nbatches\': (5, \'type_constrained\', (int, _gteq1),\n """\n (default=5) Maximum number of batches to run in approximation of leverages for heteroskedastic correction.\n """\n , \'>= 1\'), \'lev_threshold_obs\': (100, \'type_constrained\', (int, _gteq1),\n """\n (default=100) Minimum number of observations with Pii >= threshold where batches will keep running in approximation of leverages for heteroskedastic correction. Once this threshold is met, remaining Pii above threshold will be recomputed analytically.\n """\n , \'>= 1\'), \'lev_threshold_pii\': (0.98, \'type_constrained\', (float,\n _0to1),\n """\n (default=0.98) Threshold Pii value for computing threshold number of Pii observations in approximation of leverages for heteroskedastic correction.\n """\n , \'in [0, 1]\'), \'levfile\': (\'\', \'type\', str,\n """\n (default=\'\') File to load precomputed leverages for heteroskedastic correction.\n """\n , None), \'out\': (\'res_fe.json\', \'type\', str,\n """\n (default=\'res_fe.json\') Outputfile where results are saved.\n """\n , None)}'], {}), '({\'ncore\': (1, \'type_constrained\', (int, _gteq1),\n """\n (default=1) Number of cores to use.\n """, \'>= 1\'\n ), \'weighted\': (True, \'type\', bool,\n """\n (default=True) If True, use weighted estimators.\n """\n , None), \'statsonly\': (False, \'type\', bool,\n """\n (default=False) If True, return only basic statistics.\n """\n , None), \'feonly\': (False, \'type\', bool,\n """\n (default=False) If True, estimate only fixed effects and not variances.\n """\n , None), \'Q\': (\'cov(alpha, psi)\', \'set\', [\'cov(alpha, psi)\',\n \'cov(psi_t, psi_{t+1})\'],\n """\n (default=\'cov(alpha, psi)\') Which Q matrix to consider. Options include \'cov(alpha, psi)\' and \'cov(psi_t, psi_{t+1})\'.\n """\n , None), \'ndraw_trace\': (5, \'type_constrained\', (int, _gteq1),\n """\n (default=5) Number of draws to use in trace approximations.\n """\n , \'>= 1\'), \'he\': (False, \'type\', bool,\n """\n (default=False) If True, estimate heteroskedastic correction.\n """\n , None), \'he_analytical\': (False, \'type\', bool,\n """\n (default=False) If True, estimate heteroskedastic correction using analytical formula; if False, use JL approxmation.\n """\n , None), \'lev_batchsize\': (50, \'type_constrained\', (int, _gteq1),\n """\n (default=50) Number of draws to use for each batch in approximation of leverages for heteroskedastic correction.\n """\n , \'>= 1\'), \'lev_batchsize_multiprocessing\': (10, \'type_constrained\', (\n int, _gteq1),\n """\n (default=10) Batch size to send in parallel. Should evenly divide \'lev_batchsize\'.\n """\n , \'>= 1\'), \'lev_nbatches\': (5, \'type_constrained\', (int, _gteq1),\n """\n (default=5) Maximum number of batches to run in approximation of leverages for heteroskedastic correction.\n """\n , \'>= 1\'), \'lev_threshold_obs\': (100, \'type_constrained\', (int, _gteq1),\n """\n (default=100) Minimum number of observations with Pii >= threshold where batches will keep running in approximation of leverages for heteroskedastic correction. Once this threshold is met, remaining Pii above threshold will be recomputed analytically.\n """\n , \'>= 1\'), \'lev_threshold_pii\': (0.98, \'type_constrained\', (float,\n _0to1),\n """\n (default=0.98) Threshold Pii value for computing threshold number of Pii observations in approximation of leverages for heteroskedastic correction.\n """\n , \'in [0, 1]\'), \'levfile\': (\'\', \'type\', str,\n """\n (default=\'\') File to load precomputed leverages for heteroskedastic correction.\n """\n , None), \'out\': (\'res_fe.json\', \'type\', str,\n """\n (default=\'res_fe.json\') Outputfile where results are saved.\n """\n , None)})\n', (1144, 3990), False, 'from bipartitepandas import ParamsDict, logger_init\n'), ((5369, 5385), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (5377, 5385), True, 'import numpy as np\n'), ((5402, 5421), 'numpy.array', 'np.array', (['quantiles'], {}), '(quantiles)\n', (5410, 5421), True, 'import numpy as np\n'), ((5517, 5540), 'numpy.array', 'np.array', (['sample_weight'], {}), '(sample_weight)\n', (5525, 5540), True, 'import numpy as np\n'), ((6105, 6153), 'numpy.interp', 'np.interp', (['quantiles', 'weighted_quantiles', 'values'], {}), '(quantiles, weighted_quantiles, values)\n', (6114, 6153), True, 'import numpy as np\n'), ((5552, 5574), 'numpy.all', 'np.all', (['(quantiles >= 0)'], {}), '(quantiles >= 0)\n', (5558, 5574), True, 'import numpy as np\n'), ((5579, 5601), 'numpy.all', 'np.all', (['(quantiles <= 1)'], {}), '(quantiles <= 1)\n', (5585, 5601), True, 'import numpy as np\n'), ((5689, 5707), 'numpy.argsort', 'np.argsort', (['values'], {}), '(values)\n', (5699, 5707), True, 'import numpy as np\n'), ((5812, 5836), 'numpy.cumsum', 'np.cumsum', (['sample_weight'], {}), '(sample_weight)\n', (5821, 5836), True, 'import numpy as np\n'), ((6071, 6092), 'numpy.sum', 'np.sum', (['sample_weight'], {}), '(sample_weight)\n', (6077, 6092), True, 'import numpy as np\n'), ((6385, 6398), 'numpy.sum', 'np.sum', (['(w * v)'], {}), '(w * v)\n', (6391, 6398), True, 'import numpy as np\n'), ((6401, 6410), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (6407, 6410), True, 'import numpy as np\n'), ((6420, 6445), 'numpy.sum', 'np.sum', (['(w * (v - m0) ** 2)'], {}), '(w * (v - m0) ** 2)\n', (6426, 6445), True, 'import numpy as np\n'), ((6448, 6457), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (6454, 6457), True, 'import numpy as np\n'), ((6755, 6769), 'numpy.sum', 'np.sum', (['(w * v1)'], {}), '(w * v1)\n', (6761, 6769), True, 'import numpy as np\n'), ((6772, 6781), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (6778, 6781), True, 'import numpy as np\n'), ((6791, 6805), 'numpy.sum', 'np.sum', (['(w * v2)'], {}), '(w * v2)\n', (6797, 6805), True, 'import numpy as np\n'), ((6808, 6817), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (6814, 6817), True, 'import numpy as np\n'), ((6827, 6860), 'numpy.sum', 'np.sum', (['(w * (v1 - m1) * (v2 - m2))'], {}), '(w * (v1 - m1) * (v2 - m2))\n', (6833, 6860), True, 'import numpy as np\n'), ((6863, 6872), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (6869, 6872), True, 'import numpy as np\n'), ((7848, 7865), 'bipartitepandas.logger_init', 'logger_init', (['self'], {}), '(self)\n', (7859, 7865), False, 'from bipartitepandas import ParamsDict, logger_init\n'), ((9286, 9318), 'pyamg.ruge_stuben_solver', 'pyamg.ruge_stuben_solver', (['self.M'], {}), '(self.M)\n', (9310, 9318), False, 'import pyamg\n'), ((9961, 9988), 'numpy.random.default_rng', 'np.random.default_rng', (['None'], {}), '(None)\n', (9982, 9988), True, 'import numpy as np\n'), ((10362, 10373), 'time.time', 'time.time', ([], {}), '()\n', (10371, 10373), False, 'import time\n'), ((10634, 10661), 'numpy.random.default_rng', 'np.random.default_rng', (['None'], {}), '(None)\n', (10655, 10661), True, 'import numpy as np\n'), ((11724, 11735), 'time.time', 'time.time', ([], {}), '()\n', (11733, 11735), False, 'import time\n'), ((14720, 14731), 'numpy.sqrt', 'np.sqrt', (['Dp'], {}), '(Dp)\n', (14727, 14731), True, 'import numpy as np\n'), ((14938, 14965), 'pyamg.ruge_stuben_solver', 'pyamg.ruge_stuben_solver', (['M'], {}), '(M)\n', (14962, 14965), False, 'import pyamg\n'), ((15352, 15373), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(11)'], {}), '(0, 1, 11)\n', (15363, 15373), True, 'import numpy as np\n'), ((20643, 20670), 'numpy.random.default_rng', 'np.random.default_rng', (['None'], {}), '(None)\n', (20664, 20670), True, 'import numpy as np\n'), ((21727, 21741), 'numpy.var', 'np.var', (['self.E'], {}), '(self.E)\n', (21733, 21741), True, 'import numpy as np\n'), ((22274, 22301), 'numpy.random.default_rng', 'np.random.default_rng', (['None'], {}), '(None)\n', (22295, 22301), True, 'import numpy as np\n'), ((22651, 22665), 'numpy.var', 'np.var', (['self.Y'], {}), '(self.Y)\n', (22657, 22665), True, 'import numpy as np\n'), ((24460, 24486), 'numpy.zeros', 'np.zeros', (['self.ndraw_trace'], {}), '(self.ndraw_trace)\n', (24468, 24486), True, 'import numpy as np\n'), ((24516, 24542), 'numpy.zeros', 'np.zeros', (['self.ndraw_trace'], {}), '(self.ndraw_trace)\n', (24524, 24542), True, 'import numpy as np\n'), ((24561, 24585), 'tqdm.trange', 'trange', (['self.ndraw_trace'], {}), '(self.ndraw_trace)\n', (24567, 24585), False, 'from tqdm import tqdm, trange\n'), ((29713, 29740), 'numpy.random.default_rng', 'np.random.default_rng', (['None'], {}), '(None)\n', (29734, 29740), True, 'import numpy as np\n'), ((30071, 30097), 'numpy.zeros', 'np.zeros', (['self.ndraw_trace'], {}), '(self.ndraw_trace)\n', (30079, 30097), True, 'import numpy as np\n'), ((30127, 30153), 'numpy.zeros', 'np.zeros', (['self.ndraw_trace'], {}), '(self.ndraw_trace)\n', (30135, 30153), True, 'import numpy as np\n'), ((30215, 30239), 'tqdm.trange', 'trange', (['self.ndraw_trace'], {}), '(self.ndraw_trace)\n', (30221, 30239), False, 'from tqdm import tqdm, trange\n'), ((31000, 31027), 'numpy.random.default_rng', 'np.random.default_rng', (['None'], {}), '(None)\n', (31021, 31027), True, 'import numpy as np\n'), ((31734, 31760), 'numpy.zeros', 'np.zeros', (['self.ndraw_trace'], {}), '(self.ndraw_trace)\n', (31742, 31760), True, 'import numpy as np\n'), ((31779, 31803), 'tqdm.trange', 'trange', (['self.ndraw_trace'], {}), '(self.ndraw_trace)\n', (31785, 31803), False, 'from tqdm import tqdm, trange\n'), ((32592, 32606), 'numpy.var', 'np.var', (['self.E'], {}), '(self.E)\n', (32598, 32606), True, 'import numpy as np\n'), ((33078, 33105), 'numpy.mean', 'np.mean', (['self.tr_var_ho_all'], {}), '(self.tr_var_ho_all)\n', (33085, 33105), True, 'import numpy as np\n'), ((33299, 33326), 'numpy.mean', 'np.mean', (['self.tr_cov_ho_all'], {}), '(self.tr_cov_ho_all)\n', (33306, 33326), True, 'import numpy as np\n'), ((35916, 35934), 'numpy.arange', 'np.arange', (['self.nf'], {}), '(self.nf)\n', (35925, 35934), True, 'import numpy as np\n'), ((35952, 35970), 'numpy.arange', 'np.arange', (['self.nw'], {}), '(self.nw)\n', (35961, 35970), True, 'import numpy as np\n'), ((39273, 39280), 'timeit.default_timer', 'timer', ([], {}), '()\n', (39278, 39280), True, 'from timeit import default_timer as timer\n'), ((42928, 42955), 'numpy.random.default_rng', 'np.random.default_rng', (['None'], {}), '(None)\n', (42949, 42955), True, 'import numpy as np\n'), ((43143, 43160), 'numpy.zeros', 'np.zeros', (['self.nn'], {}), '(self.nn)\n', (43151, 43160), True, 'import numpy as np\n'), ((49294, 49345), 'numpy.where', 'np.where', (['worker_m', "self.adata.loc[:, 'Sii']", 'Sii_j'], {}), "(worker_m, self.adata.loc[:, 'Sii'], Sii_j)\n", (49302, 49345), True, 'import numpy as np\n'), ((49651, 49678), 'numpy.random.default_rng', 'np.random.default_rng', (['None'], {}), '(None)\n', (49672, 49678), True, 'import numpy as np\n'), ((49984, 50001), 'numpy.zeros', 'np.zeros', (['self.nn'], {}), '(self.nn)\n', (49992, 50001), True, 'import numpy as np\n'), ((9632, 9651), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (9643, 9651), False, 'import pickle\n'), ((9911, 9937), 'pickle.dump', 'pickle.dump', (['self', 'outfile'], {}), '(self, outfile)\n', (9922, 9937), False, 'import pickle\n'), ((16996, 17024), 'json.dump', 'json.dump', (['self.res', 'outfile'], {}), '(self.res, outfile)\n', (17005, 17024), False, 'import json\n'), ((17697, 17715), 'numpy.arange', 'np.arange', (['self.nn'], {}), '(self.nn)\n', (17706, 17715), True, 'import numpy as np\n'), ((17790, 17808), 'numpy.arange', 'np.arange', (['self.nn'], {}), '(self.nn)\n', (17799, 17808), True, 'import numpy as np\n'), ((21903, 21932), 'numpy.mean', 'np.mean', (['self.tr_sigma_ho_all'], {}), '(self.tr_sigma_ho_all)\n', (21910, 21932), True, 'import numpy as np\n'), ((23071, 23125), 'numpy.cov', 'np.cov', (['(Jq * self.psi_hat)', '(Wq * self.alpha_hat)'], {'ddof': '(0)'}), '(Jq * self.psi_hat, Wq * self.alpha_hat, ddof=0)\n', (23077, 23125), True, 'import numpy as np\n'), ((34304, 34331), 'numpy.mean', 'np.mean', (['self.tr_var_he_all'], {}), '(self.tr_var_he_all)\n', (34311, 34331), True, 'import numpy as np\n'), ((34368, 34395), 'numpy.mean', 'np.mean', (['self.tr_cov_he_all'], {}), '(self.tr_cov_he_all)\n', (34375, 34395), True, 'import numpy as np\n'), ((34435, 34461), 'numpy.std', 'np.std', (['self.tr_var_ho_all'], {}), '(self.tr_var_ho_all)\n', (34441, 34461), True, 'import numpy as np\n'), ((34501, 34527), 'numpy.std', 'np.std', (['self.tr_cov_ho_all'], {}), '(self.tr_cov_ho_all)\n', (34507, 34527), True, 'import numpy as np\n'), ((34567, 34593), 'numpy.std', 'np.std', (['self.tr_var_he_all'], {}), '(self.tr_var_he_all)\n', (34573, 34593), True, 'import numpy as np\n'), ((34633, 34659), 'numpy.std', 'np.std', (['self.tr_cov_he_all'], {}), '(self.tr_cov_he_all)\n', (34639, 34659), True, 'import numpy as np\n'), ((35668, 35696), 'json.dump', 'json.dump', (['self.res', 'outfile'], {}), '(self.res, outfile)\n', (35677, 35696), False, 'import json\n'), ((13884, 13900), 'numpy.ones', 'np.ones', (['self.nn'], {}), '(self.nn)\n', (13891, 13900), True, 'import numpy as np\n'), ((14121, 14137), 'numpy.ones', 'np.ones', (['self.nn'], {}), '(self.nn)\n', (14128, 14137), True, 'import numpy as np\n'), ((18102, 18159), 'warnings.warn', 'warnings.warn', (['"""These Q options are not yet implemented."""'], {}), "('These Q options are not yet implemented.')\n", (18115, 18159), False, 'import warnings\n'), ((19875, 19886), 'numpy.ones', 'np.ones', (['nJ'], {}), '(nJ)\n', (19882, 19886), True, 'import numpy as np\n'), ((20325, 20336), 'numpy.ones', 'np.ones', (['nW'], {}), '(nW)\n', (20332, 20336), True, 'import numpy as np\n'), ((33196, 33222), 'numpy.std', 'np.std', (['self.tr_var_ho_all'], {}), '(self.tr_var_ho_all)\n', (33202, 33222), True, 'import numpy as np\n'), ((33417, 33443), 'numpy.std', 'np.std', (['self.tr_cov_ho_all'], {}), '(self.tr_cov_ho_all)\n', (33423, 33443), True, 'import numpy as np\n'), ((39447, 39454), 'timeit.default_timer', 'timer', ([], {}), '()\n', (39452, 39454), True, 'from timeit import default_timer as timer\n'), ((39715, 39722), 'timeit.default_timer', 'timer', ([], {}), '()\n', (39720, 39722), True, 'from timeit import default_timer as timer\n'), ((21987, 22014), 'numpy.sum', 'np.sum', (['(1 / self.Dp.data[0])'], {}), '(1 / self.Dp.data[0])\n', (21993, 22014), True, 'import numpy as np\n'), ((24934, 24952), 'numpy.cov', 'np.cov', (['R1', 'R2_psi'], {}), '(R1, R2_psi)\n', (24940, 24952), True, 'import numpy as np\n'), ((25079, 25099), 'numpy.cov', 'np.cov', (['R1', 'R2_alpha'], {}), '(R1, R2_alpha)\n', (25085, 25099), True, 'import numpy as np\n'), ((30744, 30766), 'numpy.cov', 'np.cov', (['R2_psi', 'R3_psi'], {}), '(R2_psi, R3_psi)\n', (30750, 30766), True, 'import numpy as np\n'), ((30809, 30833), 'numpy.cov', 'np.cov', (['R2_alpha', 'R3_psi'], {}), '(R2_alpha, R3_psi)\n', (30815, 30833), True, 'import numpy as np\n'), ((34754, 34780), 'numpy.std', 'np.std', (['self.tr_var_he_all'], {}), '(self.tr_var_he_all)\n', (34760, 34780), True, 'import numpy as np\n'), ((34877, 34903), 'numpy.std', 'np.std', (['self.tr_cov_he_all'], {}), '(self.tr_cov_he_all)\n', (34883, 34903), True, 'import numpy as np\n'), ((43975, 43985), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (43982, 43985), True, 'import numpy as np\n'), ((21546, 21565), 'numpy.power', 'np.power', (['self.E', '(2)'], {}), '(self.E, 2)\n', (21554, 21565), True, 'import numpy as np\n'), ((21575, 21594), 'numpy.power', 'np.power', (['self.Y', '(2)'], {}), '(self.Y, 2)\n', (21583, 21594), True, 'import numpy as np\n'), ((36078, 36091), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (36086, 36091), True, 'import numpy as np\n'), ((45173, 45198), 'multiprocessing.set_start_method', 'set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (45189, 45198), False, 'from multiprocessing import Pool, TimeoutError, Value, set_start_method\n'), ((46594, 46625), 'warnings.warn', 'warnings.warn', (['leverage_warning'], {}), '(leverage_warning)\n', (46607, 46625), False, 'import warnings\n'), ((44533, 44601), 'numpy.round', 'np.round', (["(ndraw_seeds * self.params['lev_batchsize_multiprocessing'])"], {}), "(ndraw_seeds * self.params['lev_batchsize_multiprocessing'])\n", (44541, 44601), True, 'import numpy as np\n'), ((45228, 45254), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'self.ncore'}), '(processes=self.ncore)\n', (45232, 45254), False, 'from multiprocessing import Pool, TimeoutError, Value, set_start_method\n'), ((47143, 47174), 'warnings.warn', 'warnings.warn', (['leverage_warning'], {}), '(leverage_warning)\n', (47156, 47174), False, 'import warnings\n'), ((45386, 45413), 'numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (45407, 45413), True, 'import numpy as np\n')] |
import time
from collections import OrderedDict
import numpy as np
import theano
import theano.tensor as T
import lasagne
import custom_ops
class QuantizedDenseLayer(lasagne.layers.DenseLayer):
def get_output_for(self, input, deterministic=False, **kwargs):
if input.ndim > 2:
# if the input has more than two dimensions, flatten it into a
# batch of feature vectors.
input = input.flatten(2)
activation = custom_ops.quantized_gemm(x = input, w = self.W)
if self.b is not None:
activation = activation + self.b.dimshuffle('x', 0)
return self.nonlinearity(activation)
return rvalue
# Given a dataset and a model, this function trains the model on the dataset for several epochs
# (There is no default trainer function in Lasagne yet)
def train(train_fn,val_fn,
model,
batch_size,
LR_start,LR_decay,LR_patience,patience,
X_train,y_train,
X_val,y_val,
X_test,y_test,
save_path=None,
shuffle_parts=1):
# A function which shuffles a dataset
def shuffle(X,y):
# print(len(X))
chunk_size = len(X)/shuffle_parts
shuffled_range = range(chunk_size)
X_buffer = np.copy(X[0:chunk_size])
y_buffer = np.copy(y[0:chunk_size])
for k in range(shuffle_parts):
np.random.shuffle(shuffled_range)
for i in range(chunk_size):
X_buffer[i] = X[k*chunk_size+shuffled_range[i]]
y_buffer[i] = y[k*chunk_size+shuffled_range[i]]
X[k*chunk_size:(k+1)*chunk_size] = X_buffer
y[k*chunk_size:(k+1)*chunk_size] = y_buffer
return X,y
# This function trains the model a full epoch (on the whole dataset)
def train_epoch(X,y,LR):
loss = 0
batches = len(X)/batch_size
for i in range(batches):
loss += train_fn(X[i*batch_size:(i+1)*batch_size],y[i*batch_size:(i+1)*batch_size],LR)
loss/=batches
return loss
# This function tests the model a full epoch (on the whole dataset)
def val_epoch(X,y):
err = 0
loss = 0
batches = len(X)/batch_size
for i in range(batches):
new_loss, new_err = val_fn(X[i*batch_size:(i+1)*batch_size], y[i*batch_size:(i+1)*batch_size])
err += new_err
loss += new_loss
err = err / batches * 100
loss /= batches
return err, loss
# shuffle the train set
X_train,y_train = shuffle(X_train,y_train)
best_val_err = 100
best_epoch = 0
LR = LR_start
epoch=0
LR_no_improv = 0
no_improv = 0
# We iterate over epochs:
# for epoch in range(num_epochs):
while no_improv <= patience:
start_time = time.time()
train_loss = train_epoch(X_train,y_train,LR)
X_train,y_train = shuffle(X_train,y_train)
val_err, val_loss = val_epoch(X_val,y_val)
# test if validation error went down
if val_err <= best_val_err:
best_val_err = val_err
best_epoch = epoch
test_err, test_loss = val_epoch(X_test,y_test)
if save_path is not None:
np.savez(save_path, *lasagne.layers.get_all_param_values(model))
LR_no_improv = 0.
no_improv = 0.
else:
LR_no_improv+=1
no_improv+=1
if LR_no_improv >= LR_patience:
LR *= LR_decay
LR_no_improv=0
epoch +=1
epoch_duration = time.time() - start_time
# Then we print the results for this epoch:
# print("Epoch "+str(epoch + 1)+" of "+str(num_epochs)+" took "+str(epoch_duration)+"s")
print("Epoch "+str(epoch)+" took "+str(epoch_duration)+"s")
print(" LR: "+str(LR))
print(" training loss: "+str(train_loss))
print(" validation loss: "+str(val_loss))
print(" validation error rate: "+str(val_err)+"%")
print(" best epoch: "+str(best_epoch))
print(" best validation error rate: "+str(best_val_err)+"%")
print(" test loss: "+str(test_loss))
print(" test error rate: "+str(test_err)+"%")
| [
"lasagne.layers.get_all_param_values",
"custom_ops.quantized_gemm",
"numpy.copy",
"time.time",
"numpy.random.shuffle"
] | [((511, 555), 'custom_ops.quantized_gemm', 'custom_ops.quantized_gemm', ([], {'x': 'input', 'w': 'self.W'}), '(x=input, w=self.W)\n', (536, 555), False, 'import custom_ops\n'), ((1414, 1438), 'numpy.copy', 'np.copy', (['X[0:chunk_size]'], {}), '(X[0:chunk_size])\n', (1421, 1438), True, 'import numpy as np\n'), ((1459, 1483), 'numpy.copy', 'np.copy', (['y[0:chunk_size]'], {}), '(y[0:chunk_size])\n', (1466, 1483), True, 'import numpy as np\n'), ((3163, 3174), 'time.time', 'time.time', ([], {}), '()\n', (3172, 3174), False, 'import time\n'), ((1561, 1594), 'numpy.random.shuffle', 'np.random.shuffle', (['shuffled_range'], {}), '(shuffled_range)\n', (1578, 1594), True, 'import numpy as np\n'), ((4056, 4067), 'time.time', 'time.time', ([], {}), '()\n', (4065, 4067), False, 'import time\n'), ((3693, 3735), 'lasagne.layers.get_all_param_values', 'lasagne.layers.get_all_param_values', (['model'], {}), '(model)\n', (3728, 3735), False, 'import lasagne\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from ase.build import molecule
from graphdot import Graph
from graphdot.kernel.molecular import Tang2019MolecularKernel
def test_molecular_kernel():
molecules = [molecule('H2'), molecule('O2'), molecule('CH4')]
graphs = [Graph.from_ase(m) for m in molecules]
kernel = Tang2019MolecularKernel()
R = kernel(graphs)
D = np.diag(np.diag(R)**-0.5)
K = D.dot(R).dot(D)
assert(R.shape == (3, 3))
for i in range(len(molecules)):
assert(K[i, i] == pytest.approx(1, 1e-6))
R_nodal = kernel(graphs, nodal=True)
D_nodal = np.diag(np.diag(R_nodal)**-0.5)
K_nodal = D_nodal.dot(R_nodal).dot(D_nodal)
natoms = np.sum([len(m) for m in molecules])
assert(R_nodal.shape == (natoms, natoms))
for i in range(natoms):
assert(K_nodal[i, i] == pytest.approx(1, 1e-6))
def test_molecular_kernel_custom_pstart():
molecules = [molecule('H2'), molecule('O2'), molecule('CH4')]
graphs = [Graph.from_ase(m) for m in molecules]
kernel_nocarbon = Tang2019MolecularKernel(
starting_probability=(
lambda ns: np.where(ns.element == 6, 0.0, 1.0),
'n.element == 6 ? 0.f : 1.f'
)
)
R_nocarbon_nodal = kernel_nocarbon(graphs, nodal=True)
k = 0
for i, m in enumerate(molecules):
for j, a in enumerate(m):
if a.symbol == 'C':
assert(R_nocarbon_nodal[k, :].sum() == 0)
assert(R_nocarbon_nodal[:, k].sum() == 0)
k += 1
| [
"graphdot.kernel.molecular.Tang2019MolecularKernel",
"ase.build.molecule",
"graphdot.Graph.from_ase",
"numpy.where",
"numpy.diag",
"pytest.approx"
] | [((363, 388), 'graphdot.kernel.molecular.Tang2019MolecularKernel', 'Tang2019MolecularKernel', ([], {}), '()\n', (386, 388), False, 'from graphdot.kernel.molecular import Tang2019MolecularKernel\n'), ((247, 261), 'ase.build.molecule', 'molecule', (['"""H2"""'], {}), "('H2')\n", (255, 261), False, 'from ase.build import molecule\n'), ((263, 277), 'ase.build.molecule', 'molecule', (['"""O2"""'], {}), "('O2')\n", (271, 277), False, 'from ase.build import molecule\n'), ((279, 294), 'ase.build.molecule', 'molecule', (['"""CH4"""'], {}), "('CH4')\n", (287, 294), False, 'from ase.build import molecule\n'), ((311, 328), 'graphdot.Graph.from_ase', 'Graph.from_ase', (['m'], {}), '(m)\n', (325, 328), False, 'from graphdot import Graph\n'), ((966, 980), 'ase.build.molecule', 'molecule', (['"""H2"""'], {}), "('H2')\n", (974, 980), False, 'from ase.build import molecule\n'), ((982, 996), 'ase.build.molecule', 'molecule', (['"""O2"""'], {}), "('O2')\n", (990, 996), False, 'from ase.build import molecule\n'), ((998, 1013), 'ase.build.molecule', 'molecule', (['"""CH4"""'], {}), "('CH4')\n", (1006, 1013), False, 'from ase.build import molecule\n'), ((1030, 1047), 'graphdot.Graph.from_ase', 'Graph.from_ase', (['m'], {}), '(m)\n', (1044, 1047), False, 'from graphdot import Graph\n'), ((429, 439), 'numpy.diag', 'np.diag', (['R'], {}), '(R)\n', (436, 439), True, 'import numpy as np\n'), ((564, 587), 'pytest.approx', 'pytest.approx', (['(1)', '(1e-06)'], {}), '(1, 1e-06)\n', (577, 587), False, 'import pytest\n'), ((652, 668), 'numpy.diag', 'np.diag', (['R_nodal'], {}), '(R_nodal)\n', (659, 668), True, 'import numpy as np\n'), ((880, 903), 'pytest.approx', 'pytest.approx', (['(1)', '(1e-06)'], {}), '(1, 1e-06)\n', (893, 903), False, 'import pytest\n'), ((1170, 1205), 'numpy.where', 'np.where', (['(ns.element == 6)', '(0.0)', '(1.0)'], {}), '(ns.element == 6, 0.0, 1.0)\n', (1178, 1205), True, 'import numpy as np\n')] |
from __future__ import division
import math
import numpy as np
CONF_THRESHOLD = 10 # 48 # pct 68
MIN_LIKELY_PERSON_ASPECT_RATIO = 1.0
MAX_LIKELY_PERSON_ASPECT_RATIO = 4.0
MIN_STANDING_PERSON_ASPECT_RATIO = 1.6 # 1.9
MAX_STANDING_PERSON_ASPECT_RATIO = MAX_LIKELY_PERSON_ASPECT_RATIO
MASK_GRANULARITY_FACTOR = 2
HEIGHT_ADJ_FACTOR = 0.5
HEIGHT_ADJ_MIN_ABS_SLOPE = 0.5
MAX_DISTANCE = 999999.0
MIN_LIKELY_PERSON_AREA = 0.0015
MIN_LIKELY_PERSON_AREA_PX = 615
CAM_FROM_TOP = 1 # always bottom edge of pic
MIN_DIFF_THRESHOLD = 0.03
NORMALIZE_NUM_FEET = 6 # each unit in the size mask represents this many feet
def slope_between_two_points(point1, point2):
# calculate the slope, being sure not to divide by zero
t1 = point1[0]; l1 = point1[1]
t2 = point2[0]; l2 = point2[1]
t_dist = abs(t2 - t1); l_dist = abs(l2 - l1)
if l_dist == 0:
slope = 0
else:
slope = t_dist / l_dist
return slope
def likely_standing(conf, pxHeight, pxWidth):
aspect_ratio = pxHeight / pxWidth
if ((aspect_ratio > MIN_STANDING_PERSON_ASPECT_RATIO) and
(aspect_ratio < MAX_STANDING_PERSON_ASPECT_RATIO)):
good_person_rectangle = True
else:
good_person_rectangle = False
total_bbox_area_px = pxHeight * pxWidth
# print(f' px area:{total_bbox_area_px:.2f}, pxH:{pxHeight:.2f}, pxW:{pxWidth:.2f}')
if total_bbox_area_px > MIN_LIKELY_PERSON_AREA_PX:
big_enough = True
else:
big_enough = False
is_standing_person = (conf > CONF_THRESHOLD) and (big_enough) and (good_person_rectangle)
return is_standing_person
def likely_a_person(conf, h, w, image_shape,
min_aspect_ratio=MIN_LIKELY_PERSON_ASPECT_RATIO,
max_aspect_ratio=MAX_LIKELY_PERSON_ASPECT_RATIO):
pxHeight = h * image_shape[1]
pxWidth = w * image_shape[0]
aspect_ratio = pxHeight / pxWidth
if (aspect_ratio > min_aspect_ratio) and (aspect_ratio < max_aspect_ratio):
good_person_rectangle = True
else:
good_person_rectangle = False
# total_bbox_area = h * w
# if total_bbox_area > MIN_LIKELY_PERSON_AREA:
total_bbox_area_px = pxHeight * pxWidth
# print(f' px area:{total_bbox_area_px:.2f}, pxH:{pxHeight:.2f}, pxW:{pxWidth:.2f}')
if total_bbox_area_px > MIN_LIKELY_PERSON_AREA_PX:
big_enough = True
else:
big_enough = False
if (conf > CONF_THRESHOLD) and (big_enough) and (good_person_rectangle):
return True
else:
# print(f'REJECT - Conf: {conf:.2f}, w/h: {w:.2f}/{h:.2f}, bbox area: {total_bbox_area:.3f}, big enough: {big_enough}, good rect: {good_person_rectangle}, asp: {h/w:.1f}')
return False
def keep_likely_people(p, image_shape):
likely_people = []
for i in range(len(p)):
person = p[i]
conf = person['Confidence']
bb = person['BoundingBox']
if likely_a_person(conf, bb['Height'], bb['Width'], image_shape):
likely_people.append(person)
return likely_people
def adjusted_distance_between_two_points(pos1, pos2, size_mask, cam_height, verbose):
if verbose:
#print(f' start: {pos1}, end: {pos2}')
print('start {}, end {}'.format(pos1, pos2))
img_portion_dist = euclidian_distance_between_two_points(pos1, pos2)
t1 = pos1[0]; l1 = pos1[1]
t2 = pos2[0]; l2 = pos2[1]
# convert from img portion distance to feet by using factors from the size mask.
# these size factors take into account the camera left position as well as the camera height
row_height = 1 / size_mask.shape[0]
col_width = 1 / size_mask.shape[1]
which_row1 = int(t1 / row_height)
which_col1 = int(l1 / col_width)
size1 = size_mask[which_row1, which_col1]
which_row2 = int(t2 / row_height)
which_col2 = int(l2 / col_width)
size2 = size_mask[which_row2, which_col2]
# use the minimum mask between src and dst in the grid
baseline = min(size1, size2)
dist_ft = img_portion_dist / baseline * NORMALIZE_NUM_FEET
# make adjustment for height of camera. size mask calculations don't fully account
# for longer distances between persons when one person is above another.
slope = slope_between_two_points(pos1, pos2)
if abs(slope) > HEIGHT_ADJ_MIN_ABS_SLOPE:
dist_ft = dist_ft * (1 + HEIGHT_ADJ_FACTOR)
if verbose:
#print(f' Dist: {dist_ft:.2f}, start mk: {size1:.2f}, end mk: {size2:.2f}, img dist: {img_portion_dist:.2f}')
print('This is verbose')
return dist_ft, img_portion_dist, abs(slope)
def distance_from_closest_person(which_person, people, size_mask, cam_height, verbose):
start_pos = [people[which_person]['BoundingBox']['Top'],
people[which_person]['BoundingBox']['Left']]
dist = MAX_DISTANCE
img_portion_dist = MAX_DISTANCE
closest_person = -1
slope = 0
for p in range(len(people)):
if not p == which_person:
end_pos = [people[p]['BoundingBox']['Top'],
people[p]['BoundingBox']['Left']]
this_dist_ft, this_img_portion_dist, this_slope = \
adjusted_distance_between_two_points(start_pos, end_pos, size_mask, cam_height, verbose)
if verbose:
#print(f' {which_person} to {p} is {this_dist_ft:.2f} ft, img d: {this_img_portion_dist:.2f}')
print('This is verbose from distance_from_closest_person')
if this_dist_ft < dist:
dist = this_dist_ft
img_portion_dist = this_img_portion_dist
closest_person = p
slope = this_slope
return dist, img_portion_dist, closest_person, slope
def detect_distances(people, size_mask, image_shape, cam_height, verbose):
likely_people = keep_likely_people(people, image_shape)
row_height = 1 / (size_mask.shape[0])
col_width = 1 / (size_mask.shape[1])
proximity_list = []
# if len(likely_people) < 2:
# proximity_list.append([0, 0, MAX_DISTANCE])
# else:
for which_person in range(len(likely_people)):
person = likely_people[which_person]
top_pos = person['BoundingBox']['Top']
left_pos = person['BoundingBox']['Left']
height = person['BoundingBox']['Height']
width = person['BoundingBox']['Width']
conf = person['Confidence']
row = int(top_pos / row_height)
col = int(left_pos / col_width)
pxHeight = height * image_shape[1]
pxWidth = width * image_shape[0]
pxAsp = pxHeight / pxWidth
if len(likely_people) >=2:
distance, img_portion_dist, closest_person, slope = \
distance_from_closest_person(which_person, likely_people,
size_mask, cam_height, verbose)
else:
distance = MAX_DISTANCE ; img_portion_dist = 0 ; closest_person = 0; slope = 0
proximity_list.append([which_person, closest_person, distance, height,
row, col, size_mask[row, col],
top_pos, left_pos, conf, pxAsp,
img_portion_dist, width, slope])
which_person += 1
return likely_people, proximity_list
def dist_from_camera(cam_from_left, cam_height, grid_shape, r, c):
# zero based r and c
# calc distance from exact camera position to the top left corner of the target grid cell
top_left = [(r / (grid_shape[0] - 1)),
(c / (grid_shape[1] - 1))]
return dist_from_camera_by_exact_coords(cam_from_left, cam_height, top_left)
def dist_from_camera_by_exact_coords(cam_from_left, cam_height, top_left):
return euclidian_distance_between_two_points([CAM_FROM_TOP, cam_from_left], top_left)
# return height_adjusted_distance_between_two_points(cam_height, [CAM_FROM_TOP, cam_from_left], top_left)
def height_adjusted_distance_between_two_points(cam_height, point1, point2):
base_dist = euclidian_distance_between_two_points(point1, point2)
# calculate the slope, being sure not to divide by zero
t1 = point1[0]; l1 = point1[1]
t2 = point2[0]; l2 = point2[1]
t_dist = abs(t2 - t1); l_dist = abs(l2 - l1)
if l_dist == 0:
slope = 0
else:
slope = t_dist / l_dist
# make adjustment for height of camera. size mask calculations don't fully account
# for longer distances between persons when one person is above another.
if abs(slope) > HEIGHT_ADJ_MIN_ABS_SLOPE:
base_dist = base_dist * (1 - HEIGHT_ADJ_DISCOUNT_FACTOR)
return base_dist
def euclidian_distance_between_two_points(point1, point2):
return math.sqrt((point1[0] - point2[0])**2 +
(point1[1] - point2[1])**2)
def find_ampl_beta(cam_from_left, cam_height, grid_shape, pos1, val1, pos2, val2):
radius1 = dist_from_camera(cam_from_left, cam_height, grid_shape, pos1[0], pos1[1])
radius2 = dist_from_camera(cam_from_left, cam_height, grid_shape, pos2[0], pos2[1])
# avoid divide by 0 warning at runtime
r_squared_diff = (radius1**2 - radius2**2)
if r_squared_diff == 0:
r_squared_diff = 0.001
beta = -np.log(val1 / val2) / r_squared_diff
ampl = val1 / np.exp(-beta * radius1**2)
return ampl, beta
def make_gaussian_mask(cam_from_left, cam_height, grid_shape, pos1, val1, pos2, val2):
print('grid shape is {}'.format(grid_shape))
print('grid shape type is {}'.format(type(grid_shape)))
mask = np.zeros([int(grid_shape[0]), int(grid_shape[1])], dtype = float)
print('make_gaussian_mask mask is {}'.format(mask))
ampl, beta = find_ampl_beta(cam_from_left, cam_height, grid_shape, pos1, val1, pos2, val2)
print('make_gaussian_mask ampl is {}'.format(ampl))
print('make_gaussian_mask beta is {}'.format(beta))
for r in range(grid_shape[0]):
for c in range(grid_shape[1]):
dist = dist_from_camera(cam_from_left, cam_height, grid_shape, r, c)
gaussian_val = ampl * np.exp(-beta * (dist**2))
mask[r, c] = np.around(gaussian_val, 3)
return mask
| [
"numpy.around",
"numpy.log",
"numpy.exp",
"math.sqrt"
] | [((8844, 8914), 'math.sqrt', 'math.sqrt', (['((point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2)'], {}), '((point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2)\n', (8853, 8914), False, 'import math\n'), ((9410, 9438), 'numpy.exp', 'np.exp', (['(-beta * radius1 ** 2)'], {}), '(-beta * radius1 ** 2)\n', (9416, 9438), True, 'import numpy as np\n'), ((9355, 9374), 'numpy.log', 'np.log', (['(val1 / val2)'], {}), '(val1 / val2)\n', (9361, 9374), True, 'import numpy as np\n'), ((10256, 10282), 'numpy.around', 'np.around', (['gaussian_val', '(3)'], {}), '(gaussian_val, 3)\n', (10265, 10282), True, 'import numpy as np\n'), ((10205, 10230), 'numpy.exp', 'np.exp', (['(-beta * dist ** 2)'], {}), '(-beta * dist ** 2)\n', (10211, 10230), True, 'import numpy as np\n')] |
#! /usr/bin/env python
import unittest
import numpy as np
from sdf_tools import utils_2d
class TestSDFTools(unittest.TestCase):
def test_sdf_tools(self):
res = 0.05
x_width = 20
y_height = 40
grid_world = np.zeros([y_height, x_width], dtype=np.uint8)
grid_world[1, 3] = 1
center_x = 0
center_y = 0
sdf_origin = [center_x - x_width / 2, center_y - y_height / 2]
sdf, sdf_gradient = utils_2d.compute_sdf_and_gradient(grid_world, res, sdf_origin)
self.assertAlmostEqual(sdf[1, 3], -res)
self.assertAlmostEqual(sdf[2, 3], res)
self.assertAlmostEqual(sdf[0, 3], res)
self.assertAlmostEqual(sdf[1, 2], res)
self.assertAlmostEqual(sdf[1, 4], res)
self.assertGreater(sdf[3, 6], 3 * res)
self.assertEqual(sdf.shape, (y_height, x_width))
self.assertEqual(sdf_gradient.shape, (y_height, x_width, 2))
np.testing.assert_allclose(sdf_gradient[1, 4], [1.5, 0])
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.testing.assert_allclose",
"numpy.zeros",
"sdf_tools.utils_2d.compute_sdf_and_gradient"
] | [((1036, 1051), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1049, 1051), False, 'import unittest\n'), ((247, 292), 'numpy.zeros', 'np.zeros', (['[y_height, x_width]'], {'dtype': 'np.uint8'}), '([y_height, x_width], dtype=np.uint8)\n', (255, 292), True, 'import numpy as np\n'), ((464, 526), 'sdf_tools.utils_2d.compute_sdf_and_gradient', 'utils_2d.compute_sdf_and_gradient', (['grid_world', 'res', 'sdf_origin'], {}), '(grid_world, res, sdf_origin)\n', (497, 526), False, 'from sdf_tools import utils_2d\n'), ((946, 1002), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['sdf_gradient[1, 4]', '[1.5, 0]'], {}), '(sdf_gradient[1, 4], [1.5, 0])\n', (972, 1002), True, 'import numpy as np\n')] |
import datetime
import os
import re
import requests
import urllib.parse
import time
from bs4 import BeautifulSoup
import html2text
import numpy as np
import pandas
search_key_word = 'climate'
search_key = 's'
url = r'https://thebfd.co.nz/'
link_list_data_file_path = 'url-data.csv'
delay_time_min = 0.
delay_time_max = 0.1
quick_save_period = 10
def read_page_entries(page_soup, time_stamp, page_index, page_lists, do_print = True):
entries = page_soup.find_all('div', 'td_module_16')
if(do_print):
print(f'Page {page_index} has {len(entries)} entries')
for entry_index, entry in enumerate(entries):
title_html = entry.find('h3', 'entry-title')
link_html = title_html.find('a')
page_lists['time_stamp'].append(time_stamp)
page_lists['page'].append(page_index)
page_lists['index_in_page'].append(entry_index)
page_lists['title'].append(link_html.attrs['title'])
page_lists['link'].append(link_html.attrs['href'])
def quick_save(page_lists, data_file_path):
print(f'saving...')
data = pandas.DataFrame(page_lists)
data.to_csv(data_file_path, index = False)
page_lists = {
'time_stamp' : [],
'page' : [],
'index_in_page' : [],
'title' : [],
'link' : [],
}
request_code = urllib.parse.urlencode({ search_key : search_key_word })
search_url = url + '?' + request_code
scrap_time = time.time()
res = requests.get(search_url)
result_page = res.text
soup = BeautifulSoup(result_page, 'html.parser')
nb_result_pages = int(soup.find('div', 'page-nav').find('a', 'last').attrs['title'])
print(f'Found {nb_result_pages} pages')
read_page_entries(soup, scrap_time, 1, page_lists)
for page_index in range(2, nb_result_pages + 1):
url = r'https://thebfd.co.nz/page/' + f'{page_index}/?' + request_code
time_stamp = time.time()
res = requests.get(url)
if(not res.ok):
raise Exception(f'*** request failed: {res} - page = {page} - url = {url} ***')
read_page_entries(BeautifulSoup(res.text, 'html.parser'), time_stamp, page_index, page_lists)
if((page_index % quick_save_period) == 0):
quick_save(page_lists, link_list_data_file_path)
delay_time = np.random.uniform(delay_time_min, delay_time_max)
time.sleep(delay_time)
quick_save(page_lists, link_list_data_file_path)
"""
# repair page numbers
nb_items = len(page_lists['title'])
current_time_stamp = page_lists['time_stamp'][0]
current_page = 1
for item_index in range(1, nb_items):
next_time_stamp = page_lists['time_stamp'][item_index]
if(next_time_stamp > current_time_stamp):
current_page += 1
current_time_stamp = next_time_stamp
page_lists['page'] = current_page
quick_save(page_lists, link_list_data_file_path)
"""
"""
ENTRIES:
div, td_module_16
> h3, entry-title
>> a (link)
NEXT ENTRIES:
div, page-nav
> span, current: current page
> a, page: link, title=page
format:
https://thebfd.co.nz/page/{page_number}/?s=climate
> a, last: link, title=page (=total number of pages)
PAGE:
paywall: https://thebfd.co.nz/2020/10/17/the-truth-about-climate-change-part-ii/
span: "Subscriber only content"
open: https://thebfd.co.nz/2020/10/17/the-truth-about-climate-change-part-ii/
categories
title
subtitle
type of author (such as "Guest Post")
date-time
nb viewers (=0 ?)
nb comments ('leave a comment = 0)
author
author role
article (first letter is fancy)
>> content has bold, ita, links, videos, images
curated list of links to related articles
(other stuff and comments)
"""
data_dir = 'the-bfd'
data_html_path = os.path.join(data_dir, 'html')
data_text_path = os.path.join(data_dir, 'text')
save_every = 50
page_content_data_file_path = 'pages.csv'
def check_if_page_is_paywalled(page_soup):
all_h1 = page_soup.find_all('h1')
for h1 in all_h1:
span = h1.find('span')
if(span is None):
continue
if(span.text == 'Subscriber only content'):
return True
return False
def make_page_path(page_index):
return f'{page_index:04d}'
def save_html(data_html_path, page_path, page_html):
html_file_path = os.path.join(data_html_path, page_path)
f = open(html_file_path, 'w')
f.write(page_html)
f.close()
def save_text(data_text_path, page_path, content):
text_file_path = os.path.join(data_text_path, page_path)
f = open(text_file_path, 'w')
f.write(content)
f.close()
def process_link(page_index, page_url, page_data):
res = requests.get(page_url)
if(not res.ok):
raise Exception(f'*** Failed request for {url} : {res} ***')
page_soup = BeautifulSoup(res.text, 'html.parser')
header_soup = page_soup.find('header', 'td-post-title')
categories = []
categories_soup = header_soup.find('ul', 'td-category')
if(categories_soup is not None):
category_entries = categories_soup.find_all('li', 'entry-category')
nb_categories = len(category_entries)
for category_entry in category_entries:
categories.append(category_entry.find('a').text)
title = header_soup.find('h1', 'entry-title').text
try:
subtitle = header_soup.find('p', 'td-post-sub-title').text
except(AttributeError):
subtitle = ''
meta_info_soup = header_soup.find('div', 'td-module-meta-info')
author = meta_info_soup.find('div', 'td-post-author-name').find('a').text
date_time = meta_info_soup.find('span', 'td-post-date').find('time').attrs['datetime']
nb_views = int(meta_info_soup.find('div', 'td-post-views').find('span').text)
# a page has 2 comments but appear as 0???
nb_comments_text = meta_info_soup.find('div', 'td-post-comments').find('a').text
if(nb_comments_text == 'Leave a Comment'):
nb_comments = 0
else:
nb_comments = int(nb_comments_text.split()[0])
is_paywalled = check_if_page_is_paywalled(page_soup)
if(is_paywalled):
page_type = 'subscription only'
page_html_path = None
page_text_path = None
introduction = None
else:
page_type = 'open'
page_path = make_page_path(page_index)
page_html_path = page_path + '.html'
save_html(data_html_path, page_html_path, res.text)
article_content = page_soup.find('div', 'td-post-content')
article_text = '\n'.join([ html_to_text_converter.handle(str(p)) for p in article_content.find_all('p') ])
page_text_path = page_path + '.txt'
save_text(data_text_path, page_text_path, article_text)
introduction = ''
introduction_soup = article_content.find('p', 'wp-block-advanced-gutenberg-blocks-intro__content')
if(introduction_soup is not None):
introduction = html2text.html2text(str(introduction_soup))
page_data['id'].append(page_index)
page_data['url'].append(page_url)
page_data['categories'].append(categories)
page_data['title'].append(title)
page_data['subtitle'].append(subtitle)
page_data['author'].append(author)
page_data['date_time'].append(date_time)
page_data['nb_views'].append(nb_views)
page_data['nb_comments'].append(nb_comments)
page_data['type'].append(page_type)
page_data['introduction'].append(introduction)
page_data['html_path'].append(page_html_path)
page_data['text_path'].append(page_text_path)
html_to_text_converter = html2text.HTML2Text()
html_to_text_converter.body_width = 0
page_list = pandas.read_csv(link_list_data_file_path)
nb_links = len(page_list)
page_data = {
'id' : [],
'url' : [],
'categories' : [],
'title' : [],
'subtitle' : [],
'author' : [], # author or contribution
'date_time' : [],
'nb_views' : [],
'nb_comments' : [],
'type' : [], # open or subscription-only,
# below apply only to 'open'
'introduction' : [], # note: introduction is part of the text saved in text_path file
'html_path' : [], # path to html content of the bare page
'text_path' : [], # path to file containing the body of the article in pure text form (all the 'p' from div'td-post-content')
}
print(f'Scrapping through {nb_links} links...')
for page_index in range(nb_links):
page_url = page_list.loc[page_index]['link']
process_link(page_index, page_url, page_data)
if(((page_index + 1) % save_every) == 0):
print('saving...')
page_data_df = pandas.DataFrame(page_data)
page_data_df.to_csv(page_content_data_file_path, index = False)
#if(page_index >= 10):
# break
page_data_df = pandas.DataFrame(page_data)
page_data_df.to_csv(page_content_data_file_path, index = False)
| [
"pandas.DataFrame",
"numpy.random.uniform",
"html2text.HTML2Text",
"pandas.read_csv",
"time.time",
"time.sleep",
"requests.get",
"bs4.BeautifulSoup",
"os.path.join"
] | [((1347, 1358), 'time.time', 'time.time', ([], {}), '()\n', (1356, 1358), False, 'import time\n'), ((1365, 1389), 'requests.get', 'requests.get', (['search_url'], {}), '(search_url)\n', (1377, 1389), False, 'import requests\n'), ((1421, 1462), 'bs4.BeautifulSoup', 'BeautifulSoup', (['result_page', '"""html.parser"""'], {}), "(result_page, 'html.parser')\n", (1434, 1462), False, 'from bs4 import BeautifulSoup\n'), ((3487, 3517), 'os.path.join', 'os.path.join', (['data_dir', '"""html"""'], {}), "(data_dir, 'html')\n", (3499, 3517), False, 'import os\n'), ((3535, 3565), 'os.path.join', 'os.path.join', (['data_dir', '"""text"""'], {}), "(data_dir, 'text')\n", (3547, 3565), False, 'import os\n'), ((7097, 7118), 'html2text.HTML2Text', 'html2text.HTML2Text', ([], {}), '()\n', (7116, 7118), False, 'import html2text\n'), ((7170, 7211), 'pandas.read_csv', 'pandas.read_csv', (['link_list_data_file_path'], {}), '(link_list_data_file_path)\n', (7185, 7211), False, 'import pandas\n'), ((8196, 8223), 'pandas.DataFrame', 'pandas.DataFrame', (['page_data'], {}), '(page_data)\n', (8212, 8223), False, 'import pandas\n'), ((1035, 1063), 'pandas.DataFrame', 'pandas.DataFrame', (['page_lists'], {}), '(page_lists)\n', (1051, 1063), False, 'import pandas\n'), ((1782, 1793), 'time.time', 'time.time', ([], {}), '()\n', (1791, 1793), False, 'import time\n'), ((1802, 1819), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1814, 1819), False, 'import requests\n'), ((2131, 2180), 'numpy.random.uniform', 'np.random.uniform', (['delay_time_min', 'delay_time_max'], {}), '(delay_time_min, delay_time_max)\n', (2148, 2180), True, 'import numpy as np\n'), ((2183, 2205), 'time.sleep', 'time.sleep', (['delay_time'], {}), '(delay_time)\n', (2193, 2205), False, 'import time\n'), ((4005, 4044), 'os.path.join', 'os.path.join', (['data_html_path', 'page_path'], {}), '(data_html_path, page_path)\n', (4017, 4044), False, 'import os\n'), ((4181, 4220), 'os.path.join', 'os.path.join', (['data_text_path', 'page_path'], {}), '(data_text_path, page_path)\n', (4193, 4220), False, 'import os\n'), ((4345, 4367), 'requests.get', 'requests.get', (['page_url'], {}), '(page_url)\n', (4357, 4367), False, 'import requests\n'), ((4470, 4508), 'bs4.BeautifulSoup', 'BeautifulSoup', (['res.text', '"""html.parser"""'], {}), "(res.text, 'html.parser')\n", (4483, 4508), False, 'from bs4 import BeautifulSoup\n'), ((1942, 1980), 'bs4.BeautifulSoup', 'BeautifulSoup', (['res.text', '"""html.parser"""'], {}), "(res.text, 'html.parser')\n", (1955, 1980), False, 'from bs4 import BeautifulSoup\n'), ((8047, 8074), 'pandas.DataFrame', 'pandas.DataFrame', (['page_data'], {}), '(page_data)\n', (8063, 8074), False, 'import pandas\n')] |
import numpy as np
from scipy import interpolate as interp
from scipy.fftpack import dct
from numpy import linalg as LA
import scipy.optimize as opt
import matplotlib.pyplot as plt
def smoothn(yin, w=None, s=None, robust=True, tolZ=1.0e-5, maxIter=100):
"""Perfom the penalized least-squares smoothing of data of Garcia, D. (2010)
http://www.biomecardio.com/matlab/smoothn.html
The smoothing allows for iterative robust smoothing with missing data.
The smoothing parameter can be automatically determined using a
generalized cross-validation score method.
Originally implemented in MATLAB by
AUTHOR <NAME>
Ported to python by
AUTHOR: <NAME>
***Currently limited to the 1D case***
For missing, corrupted, or in-transit data that you dont want
to influence the fit it, is not sufficient to set the weight
value to 0 for the bad data points. In addition to setting
the weight=0, you MUST also do one of the two following choices.
1) Also set the bad data to NaN in the data vector (y), and this
function will use linear interpolation across the gap
to fill in approximate values OR
2) Interpolate across the gap to fill in the bad data in the data vector
before calling this function
INPUT:
yin - data vector one wants to find a smoothing function for
w - [0-1] data weights
s - smoothing parameter if not specified it is determined with GCVS
robust - Perform iterative reweighting for outliers.
tolZ - relative tolerance for change between iterations
maxIter - maximum number of iterations for convergence
OUTPUT:
z - smoothed model for data vector
w - final weighting vector
s - final smoothing parameter
exitflag - flag if solution converged before maxIter
"""
# Force y to be numpy double array and a copy
y = np.array(yin, dtype=np.double, copy=True)
sizy = y.size
noe = sizy
if noe < 2: # Too few elements return and do nothging
z = y
return z
# Check for weights
# weighted fit is performed if w vector is an argument OR
# non finite values appear in data vector
isWeighted = False
if w is None:
w = np.full_like(y, 1.0)
else:
isWeighted = True
isFinite = np.isfinite(y)
nof = isFinite.sum()
if not isFinite.all():
isWeighted = True
w = np.where(isFinite, w, 0.0)
w = w / w.max()
# autosmoothing
isAuto = False
if s is None:
isAuto = True
# Creation of the Lambda tensor
lam = np.zeros_like(y)
lam = -2.0 + 2.0 * np.cos((np.linspace(1.0,sizy,sizy)-1.0)*np.pi/sizy)
if not isAuto:
gamma = 1.0 / (1.0 + s * lam**2)
#Upper and lower bounds of smoothness parameter
hMin = 5.0e-3
hMax = 0.99
usePow = 2.0
tmp = 1.0 + np.sqrt(1.0 + 8.0 * hMax**usePow)
sMinBnd = ((tmp / 4.0 / hMax**usePow)**2 - 1.0) / 16.0
tmp = 1.0 + np.sqrt(1.0 + 8.0 * hMin**usePow)
sMaxBnd = ((tmp / 4.0 / hMin**usePow)**2 - 1.0) / 16.0
# Initialize a rough guess at the smooth function if weighting is involved
wTot = w
if isWeighted:
z = initialGuess(y, np.isfinite(y))
else:
z = np.zeros_like(y)
z0 = z
# Do linear interpolation for nans in data vector
if not isFinite.all():
fullx = np.arange(len(y))
gdIdx = np.where(isFinite)[0]
tmpx = fullx[gdIdx]
tmpy = y[gdIdx]
funcinterp = interp.interp1d(tmpx, tmpy, kind='linear', bounds_error=False, fill_value=(tmpy[0], tmpy[-1]))
y = funcinterp(fullx)
tol = 1.0
robustIterativeProcess = True
robustStep = 1
nit = 0
# Relaxation Factor
RF = 1.0
if isWeighted:
RF = RF + 0.75
# Main iterative Loop
while robustIterativeProcess:
# amount of weights
aow = wTot.sum() / noe
while tol > tolZ and nit < maxIter:
nit = nit + 1
dcty = dct(wTot * (y - z) + z, type=2, norm='ortho')
if isAuto and np.remainder(np.log2(nit),1) == 0:
allOutput = opt.minimize_scalar(gcv, \
bounds=[np.log10(sMinBnd),np.log10(sMaxBnd)], \
args=(y, lam, dcty, wTot, isFinite, aow, noe, nof), \
method='bounded', tol=None, \
options={'xatol':1.0e-1})
p = allOutput['x']
s = 10.0**p
gamma = 1.0 / (1.0 + s * lam**2)
z = RF * dct(gamma * dcty, type=3, norm='ortho') + (1.0 - RF) * z
tol = LA.norm(z0 - z) / LA.norm(z)
if not isWeighted: # if no weighted/missing data tol=0.0 (no iter)
tol = 0.0
z0 = z # save last output
exitFlag = nit < maxIter
if robust: # robust smoothing iteratively re-weight outliers
# average leverage
h = np.sqrt(1.0 + 16.0 * s)
h = np.sqrt(1.0 + h) / np.sqrt(2.0) / h
# take robust weights into account
wTot = w * robustWeights(y-z, isFinite, h)
# reinitialize for another iteration
isWeighted = True
tol = 1.0
nit = 0
robustStep = robustStep +1
robustIterativeProcess = robustStep < 4 # Max of 3 robust steps
else:
robustIterativeProcess = False # No iterations needed
return z, w, s, exitFlag
def initialGuess(y, iFin ):
z = y
if not iFin.all():
# Do linear interpolation for missing NaN data
fullx = np.arange(len(y))
gdIdx = np.where(iFin)[0]
tmpx = fullx[gdIdx]
tmpy = y[gdIdx]
funcinterp = interp.interp1d(tmpx, tmpy, kind='linear', bounds_error=False, fill_value=(tmpy[0],tmpy[-1]))
z = funcinterp(fullx)
z = dct(z, type=2, norm='ortho')
zeroIdx = np.int(np.ceil(len(z)/10))
z[zeroIdx:] = 0.0
z = dct(z, type=3, norm='ortho')
return z
def gcv(p, y, lam, dcty, wTot, iFin, aow, noe, nof):
s = 10.0**p
gamma = 1.0 / (1.0 + s * lam**2)
if aow > 0.9:
rss = LA.norm(dcty * (gamma - 1.0))**2
else:
yhat = dct(gamma * dcty, type=3, norm='ortho')
gdIdx = np.where(iFin)[0]
rss = LA.norm(np.sqrt(wTot[gdIdx]) * (y[gdIdx] -
yhat[gdIdx]))**2
trH = gamma.sum()
return rss / nof / (1.0 - trH/noe)**2
def robustWeights(r, iFin, h):
gdIdx = np.where(iFin)[0]
mad = np.median(abs(r[gdIdx] - np.median(r[gdIdx]))) #median abs deviation
u = np.abs(r / (1.4826 * mad) / np.sqrt(1.-h)) # studentized residuals
c = 4.685
u = u / c
u2 = u * u
w = (1.0 - u2)**2
w = np.where(u > 1.0, 0.0, w)
w = np.where(np.logical_not(iFin), 0.0, w)
w = np.where(np.logical_not(np.isfinite(w)), 0.0, w)
return w
# Run the test of the smoothn
if __name__ == "__main__":
x = np.linspace(0,100,2**8)
y = np.cos(x/10.0)+(x/50.0)**2 + np.random.randn(len(x))/10.0;
y[[69, 74, 79]] = np.array([5.5, 5, 6])
plt.plot(x,y,'.')
z = smoothn(y, robust=False) # Regular smoothing
plt.plot(x,z[0],'-r')
plt.show()
zr = smoothn(y, robust=True) # Robust smoothing
plt.plot(x,y,'.')
plt.plot(x,zr[0],'-r')
plt.show()
ynew = np.array(y, copy=True)
ynew[100:110] = np.nan
zmr = smoothn(ynew, robust=True)
plt.plot(x,ynew,'.')
plt.plot(x,zmr[0],'-r')
plt.show()
| [
"numpy.full_like",
"numpy.zeros_like",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.median",
"numpy.log2",
"scipy.fftpack.dct",
"numpy.logical_not",
"numpy.isfinite",
"numpy.where",
"numpy.array",
"numpy.linalg.norm",
"numpy.linspace",
"numpy.cos",
"scipy.interpolate.interp... | [((1955, 1996), 'numpy.array', 'np.array', (['yin'], {'dtype': 'np.double', 'copy': '(True)'}), '(yin, dtype=np.double, copy=True)\n', (1963, 1996), True, 'import numpy as np\n'), ((2381, 2395), 'numpy.isfinite', 'np.isfinite', (['y'], {}), '(y)\n', (2392, 2395), True, 'import numpy as np\n'), ((2491, 2517), 'numpy.where', 'np.where', (['isFinite', 'w', '(0.0)'], {}), '(isFinite, w, 0.0)\n', (2499, 2517), True, 'import numpy as np\n'), ((2668, 2684), 'numpy.zeros_like', 'np.zeros_like', (['y'], {}), '(y)\n', (2681, 2684), True, 'import numpy as np\n'), ((5977, 6005), 'scipy.fftpack.dct', 'dct', (['z'], {'type': '(2)', 'norm': '"""ortho"""'}), "(z, type=2, norm='ortho')\n", (5980, 6005), False, 'from scipy.fftpack import dct\n'), ((6077, 6105), 'scipy.fftpack.dct', 'dct', (['z'], {'type': '(3)', 'norm': '"""ortho"""'}), "(z, type=3, norm='ortho')\n", (6080, 6105), False, 'from scipy.fftpack import dct\n'), ((6836, 6861), 'numpy.where', 'np.where', (['(u > 1.0)', '(0.0)', 'w'], {}), '(u > 1.0, 0.0, w)\n', (6844, 6861), True, 'import numpy as np\n'), ((7049, 7076), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(2 ** 8)'], {}), '(0, 100, 2 ** 8)\n', (7060, 7076), True, 'import numpy as np\n'), ((7162, 7183), 'numpy.array', 'np.array', (['[5.5, 5, 6]'], {}), '([5.5, 5, 6])\n', (7170, 7183), True, 'import numpy as np\n'), ((7188, 7207), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""."""'], {}), "(x, y, '.')\n", (7196, 7207), True, 'import matplotlib.pyplot as plt\n'), ((7263, 7286), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'z[0]', '"""-r"""'], {}), "(x, z[0], '-r')\n", (7271, 7286), True, 'import matplotlib.pyplot as plt\n'), ((7293, 7303), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7301, 7303), True, 'import matplotlib.pyplot as plt\n'), ((7360, 7379), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""."""'], {}), "(x, y, '.')\n", (7368, 7379), True, 'import matplotlib.pyplot as plt\n'), ((7382, 7406), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'zr[0]', '"""-r"""'], {}), "(x, zr[0], '-r')\n", (7390, 7406), True, 'import matplotlib.pyplot as plt\n'), ((7413, 7423), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7421, 7423), True, 'import matplotlib.pyplot as plt\n'), ((7435, 7457), 'numpy.array', 'np.array', (['y'], {'copy': '(True)'}), '(y, copy=True)\n', (7443, 7457), True, 'import numpy as np\n'), ((7526, 7548), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'ynew', '"""."""'], {}), "(x, ynew, '.')\n", (7534, 7548), True, 'import matplotlib.pyplot as plt\n'), ((7551, 7576), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'zmr[0]', '"""-r"""'], {}), "(x, zmr[0], '-r')\n", (7559, 7576), True, 'import matplotlib.pyplot as plt\n'), ((7583, 7593), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7591, 7593), True, 'import matplotlib.pyplot as plt\n'), ((2309, 2329), 'numpy.full_like', 'np.full_like', (['y', '(1.0)'], {}), '(y, 1.0)\n', (2321, 2329), True, 'import numpy as np\n'), ((2950, 2985), 'numpy.sqrt', 'np.sqrt', (['(1.0 + 8.0 * hMax ** usePow)'], {}), '(1.0 + 8.0 * hMax ** usePow)\n', (2957, 2985), True, 'import numpy as np\n'), ((3059, 3094), 'numpy.sqrt', 'np.sqrt', (['(1.0 + 8.0 * hMin ** usePow)'], {}), '(1.0 + 8.0 * hMin ** usePow)\n', (3066, 3094), True, 'import numpy as np\n'), ((3334, 3350), 'numpy.zeros_like', 'np.zeros_like', (['y'], {}), '(y)\n', (3347, 3350), True, 'import numpy as np\n'), ((3592, 3691), 'scipy.interpolate.interp1d', 'interp.interp1d', (['tmpx', 'tmpy'], {'kind': '"""linear"""', 'bounds_error': '(False)', 'fill_value': '(tmpy[0], tmpy[-1])'}), "(tmpx, tmpy, kind='linear', bounds_error=False, fill_value=(\n tmpy[0], tmpy[-1]))\n", (3607, 3691), True, 'from scipy import interpolate as interp\n'), ((5845, 5944), 'scipy.interpolate.interp1d', 'interp.interp1d', (['tmpx', 'tmpy'], {'kind': '"""linear"""', 'bounds_error': '(False)', 'fill_value': '(tmpy[0], tmpy[-1])'}), "(tmpx, tmpy, kind='linear', bounds_error=False, fill_value=(\n tmpy[0], tmpy[-1]))\n", (5860, 5944), True, 'from scipy import interpolate as interp\n'), ((6320, 6359), 'scipy.fftpack.dct', 'dct', (['(gamma * dcty)'], {'type': '(3)', 'norm': '"""ortho"""'}), "(gamma * dcty, type=3, norm='ortho')\n", (6323, 6359), False, 'from scipy.fftpack import dct\n'), ((6591, 6605), 'numpy.where', 'np.where', (['iFin'], {}), '(iFin)\n', (6599, 6605), True, 'import numpy as np\n'), ((6879, 6899), 'numpy.logical_not', 'np.logical_not', (['iFin'], {}), '(iFin)\n', (6893, 6899), True, 'import numpy as np\n'), ((3296, 3310), 'numpy.isfinite', 'np.isfinite', (['y'], {}), '(y)\n', (3307, 3310), True, 'import numpy as np\n'), ((3497, 3515), 'numpy.where', 'np.where', (['isFinite'], {}), '(isFinite)\n', (3505, 3515), True, 'import numpy as np\n'), ((4088, 4133), 'scipy.fftpack.dct', 'dct', (['(wTot * (y - z) + z)'], {'type': '(2)', 'norm': '"""ortho"""'}), "(wTot * (y - z) + z, type=2, norm='ortho')\n", (4091, 4133), False, 'from scipy.fftpack import dct\n'), ((5060, 5083), 'numpy.sqrt', 'np.sqrt', (['(1.0 + 16.0 * s)'], {}), '(1.0 + 16.0 * s)\n', (5067, 5083), True, 'import numpy as np\n'), ((5754, 5768), 'numpy.where', 'np.where', (['iFin'], {}), '(iFin)\n', (5762, 5768), True, 'import numpy as np\n'), ((6262, 6291), 'numpy.linalg.norm', 'LA.norm', (['(dcty * (gamma - 1.0))'], {}), '(dcty * (gamma - 1.0))\n', (6269, 6291), True, 'from numpy import linalg as LA\n'), ((6376, 6390), 'numpy.where', 'np.where', (['iFin'], {}), '(iFin)\n', (6384, 6390), True, 'import numpy as np\n'), ((6724, 6740), 'numpy.sqrt', 'np.sqrt', (['(1.0 - h)'], {}), '(1.0 - h)\n', (6731, 6740), True, 'import numpy as np\n'), ((6941, 6955), 'numpy.isfinite', 'np.isfinite', (['w'], {}), '(w)\n', (6952, 6955), True, 'import numpy as np\n'), ((7081, 7097), 'numpy.cos', 'np.cos', (['(x / 10.0)'], {}), '(x / 10.0)\n', (7087, 7097), True, 'import numpy as np\n'), ((4730, 4745), 'numpy.linalg.norm', 'LA.norm', (['(z0 - z)'], {}), '(z0 - z)\n', (4737, 4745), True, 'from numpy import linalg as LA\n'), ((4748, 4758), 'numpy.linalg.norm', 'LA.norm', (['z'], {}), '(z)\n', (4755, 4758), True, 'from numpy import linalg as LA\n'), ((6644, 6663), 'numpy.median', 'np.median', (['r[gdIdx]'], {}), '(r[gdIdx])\n', (6653, 6663), True, 'import numpy as np\n'), ((4655, 4694), 'scipy.fftpack.dct', 'dct', (['(gamma * dcty)'], {'type': '(3)', 'norm': '"""ortho"""'}), "(gamma * dcty, type=3, norm='ortho')\n", (4658, 4694), False, 'from scipy.fftpack import dct\n'), ((5100, 5116), 'numpy.sqrt', 'np.sqrt', (['(1.0 + h)'], {}), '(1.0 + h)\n', (5107, 5116), True, 'import numpy as np\n'), ((5119, 5131), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (5126, 5131), True, 'import numpy as np\n'), ((6416, 6436), 'numpy.sqrt', 'np.sqrt', (['wTot[gdIdx]'], {}), '(wTot[gdIdx])\n', (6423, 6436), True, 'import numpy as np\n'), ((4173, 4185), 'numpy.log2', 'np.log2', (['nit'], {}), '(nit)\n', (4180, 4185), True, 'import numpy as np\n'), ((2716, 2744), 'numpy.linspace', 'np.linspace', (['(1.0)', 'sizy', 'sizy'], {}), '(1.0, sizy, sizy)\n', (2727, 2744), True, 'import numpy as np\n'), ((4282, 4299), 'numpy.log10', 'np.log10', (['sMinBnd'], {}), '(sMinBnd)\n', (4290, 4299), True, 'import numpy as np\n'), ((4300, 4317), 'numpy.log10', 'np.log10', (['sMaxBnd'], {}), '(sMaxBnd)\n', (4308, 4317), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math
from functools import reduce
from operator import mul
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
from scipy.interpolate import interp1d
from scipy.signal import convolve
from scipy.special import hyp2f1
COEFFS = [
[[1]],
[[-1, 2],
[-2, 3]],
[[1, -24, 30],
[3, -16, 15],
[4, -15, 12]],
[[1, 4, -30, 28],
[-4, 105, -300, 210],
[-5, 48, -105, 64],
[-16, 105, -192, 105]],
[[1, 30, 210, -840, 630],
[-5, -144, 1155, -2240, 1260],
[22, -735, 3504, -5460, 2700],
[35, -512, 1890, -2560, 1155],
[32, -315, 960, -1155, 480]],
]
def binom(a, k):
return reduce(mul, ((a+1-i)/i for i in range(1, k+1)), 1.)
class ARWavelet:
def __init__(self, q, p):
assert q<6, 'q > 5 not implemented'
assert p<=q, 'p must be smaller than q'
self.q = q
self.symmetry = (-1)**(q+p-1)
self.coeffs = COEFFS[q-1][p-1]
self.norm = math.sqrt(quad(np.poly1d(self.coeffs[::-1])**2, 0, 1)[0])
def as_piecewise_poly(self):
# without normalization!
shift = np.poly1d([2, -1])
w_poly = np.poly1d(self.coeffs[::-1])
return [self.symmetry*w_poly(-shift), w_poly(shift)]
def moment(self, m):
return 2.**(-m-1.) / self.norm * np.sum([
self.coeffs[i] * np.sum([
binom(i, j) * (-1.)**j / (m+j+1.) \
* (self.symmetry + (-1.)**i * (2.**(j+m+1.) - 1.))
for j in range(i+1)
])
for i in range(self.q)
])
def moment_quad(self, m):
mom_coeffs = np.zeros(m+1)
mom_coeffs[0] = 1.
mom_poly = np.poly1d(mom_coeffs)
w_poly = np.poly1d(self.coeffs[::-1])
return quad(lambda x: self.symmetry*w_poly(1-2*x)*mom_poly(x)/self.norm, 0, .5)[0] \
+ quad(lambda x: w_poly(2*x-1)*mom_poly(x)/self.norm, .5, 1)[0]
def moment_polyint(self, m):
w_left, w_right = self.as_piecewise_poly()
mom_coeffs = np.zeros(m+1)
mom_coeffs[0] = 1.
# piecewiese polynomial scalar product with weight 1.
P_left = np.polyint(w_left*mom_coeffs)
P_right = np.polyint(w_right*mom_coeffs)
return (P_right(1.) - P_right(.5)) + (P_left(.5) - P_left(0.)) / self.norm
def conv(self, t, n=0., k=0., H=1./3.):
# t = 2.**n * t - k
return 2.**(-n*H) * np.piecewise(
t, [t>0, t<=0],
[lambda t: self._conv_exact(t, H),
lambda t: self.symmetry * self._conv_exact(1-t, H)]
)
def _conv_exact(self, t, H):
# _conv_exact(t, H) for t>0
# _conv_exact(1-t, H) for t<0
h = .5 - H
# h = 3/2 - H
return np.sum([
binom(i, j) * self.coeffs[i] * (-2.)**j * (
self.symmetry * self._mono_conv_int(t, j, h, 0, .5)
+ (-1)**i * self._mono_conv_int(t, j, h, .5, 1)
)
for i in range(self.q)
for j in range(i+1)
], axis=0) / self.norm
def _mono_conv_int(self, t, n, h, a, b):
return np.piecewise(
t, [(a<=t)&(t<=b), t<a, t>b],
[lambda t: self._mci_larger(t, n, h, a, t) + self._mci_smaller(t, n, h, t, b),
lambda t: self._mci_smaller(t, n, h, a, b),
lambda t: self._mci_larger(t, n, h, a, b)]
)
def _mci_larger(self, t, j, h, a, b):
# t>b>a: int_a^b (t-s)^(-h) s^n ds
res = b**(j+1.) * hyp2f1(h, j+1., j+2., b/t)
if isinstance(a, np.ndarray) or a != 0:
res -= a**(j+1.) * hyp2f1(h, j+1., j+2., a/t)
return t**(-h) * res / (j+1.)
def _mci_smaller(self, t, j, h, a, b):
# t<a<b: int_a^b (s-t)^(-h) s^n ds
res = b**(j+1.-h) * hyp2f1(h, h-j-1., h-j, t/b)
if isinstance(a, np.ndarray) or a != 0:
res -= a**(j+1.-h) * hyp2f1(h, h-j-1., h-j, t/a)
return res / (j+1.-h)
def _conv_exact_naive(self, t, H):
# looks right, but does not agree with numerical results...
return np.sum([
binom(i, j) * binom(j, k) * self.coeffs[i] * (-2.)**j * t**(j-k) / (k+H+.5) \
* (self.symmetry * ((.5-t)**(k+1) * np.abs(.5-t)**(H-.5)
- np.sign(.5-t) * (-t)**k * np.abs(t)**(H+.5))
+ (-1.)**i * ((1.-t)**(k+1) * np.abs(1.-t)**(H-.5)
- np.sign(1.-t) * (.5-t)**k * np.abs(.5-t)**(H+.5)))
for i in range(self.q)
for j in range(i+1)
for k in range(j+1)
], axis=0) / self.norm
class ARWaveletNumerical(ARWavelet):
def __init__(self, q, p, num):
self.p = p
super().__init__(q, p)
assert ~(num & (num-1)), 'num must be a power of two'
self.num = num
# self.t = np.linspace(-1, 1, 2*self.num+1)
self.t = np.linspace(-2, 2, 4*self.num-1)
t_half = np.linspace(0, 1, num//2)
f = self.coeffs[-1] + t_half*0
for i in range(2, len(self.coeffs)+1):
f = self.coeffs[-i] + f*t_half
self.wavelet = np.zeros(num)
self.wavelet[:num//2] = self.symmetry * f[::-1] / self.norm
self.wavelet[num//2:] = f / self.norm
def __call__(self, n=0., k=0.):
return (2.**(-n)*(np.linspace(0, 1, self.num)+k), 2.**(-n/2.)*self.wavelet)
def _norm(self, t):
return np.sqrt(t**2+1./self.num**2)
def conv_num(self, t, weight, H=1./3.):
iw = interp1d(t[::t.size//(weight.size-1)], weight)(t[:-1])
ker = self._norm(self.t)**(H-.5)
w = self.wavelet*iw
c = convolve(ker, w, mode='valid') / self.num
c[:c.size//2] -= c[0]
c[c.size//2:] -= c[-1]
return c
def conv_num_strain(self, H=1./3., cutoff=3.):
ker = np.sign(self.t) * self._norm(self.t)**(H-1.5)
conv = convolve(ker, self.wavelet, mode='valid') / self.num
conv[:conv.size//2] -= conv[0]
conv[conv.size//2:] -= conv[-1]
abs_conv = np.abs(conv)
l_slice = slice(self.num-32,self.num+12)
r_slice = slice(2*self.num-12,2*self.num+32)
l_view = conv[l_slice]
r_view = conv[r_slice]
l_sign = np.sign(l_view[np.argmax(abs_conv[l_slice])])
r_sign = np.sign(r_view[np.argmax(abs_conv[r_slice])])
abs_conv[l_slice] = 0.
abs_conv[r_slice] = 0.
if self.p == 4:
m_slice = slice(int(1.5*self.num-32),int(1.5*self.num+32))
m_view = conv[m_slice]
abs_conv[m_slice] = 0.
m_sign = +1
hh = np.max(abs_conv) / cutoff
l_view[np.abs(l_view)>hh] = l_sign*hh
r_view[np.abs(r_view)>hh] = r_sign*hh
if self.p == 4:
m_view[np.abs(m_view)>hh] = m_sign*hh
return conv
# vim: set ff=unix tw=79 sw=4 ts=8 et ic ai :
| [
"numpy.poly1d",
"numpy.abs",
"numpy.argmax",
"scipy.special.hyp2f1",
"numpy.zeros",
"numpy.polyint",
"numpy.max",
"numpy.linspace",
"numpy.sign",
"scipy.interpolate.interp1d",
"scipy.signal.convolve",
"numpy.sqrt"
] | [((1178, 1196), 'numpy.poly1d', 'np.poly1d', (['[2, -1]'], {}), '([2, -1])\n', (1187, 1196), True, 'import numpy as np\n'), ((1214, 1242), 'numpy.poly1d', 'np.poly1d', (['self.coeffs[::-1]'], {}), '(self.coeffs[::-1])\n', (1223, 1242), True, 'import numpy as np\n'), ((1686, 1701), 'numpy.zeros', 'np.zeros', (['(m + 1)'], {}), '(m + 1)\n', (1694, 1701), True, 'import numpy as np\n'), ((1746, 1767), 'numpy.poly1d', 'np.poly1d', (['mom_coeffs'], {}), '(mom_coeffs)\n', (1755, 1767), True, 'import numpy as np\n'), ((1785, 1813), 'numpy.poly1d', 'np.poly1d', (['self.coeffs[::-1]'], {}), '(self.coeffs[::-1])\n', (1794, 1813), True, 'import numpy as np\n'), ((2093, 2108), 'numpy.zeros', 'np.zeros', (['(m + 1)'], {}), '(m + 1)\n', (2101, 2108), True, 'import numpy as np\n'), ((2214, 2245), 'numpy.polyint', 'np.polyint', (['(w_left * mom_coeffs)'], {}), '(w_left * mom_coeffs)\n', (2224, 2245), True, 'import numpy as np\n'), ((2262, 2294), 'numpy.polyint', 'np.polyint', (['(w_right * mom_coeffs)'], {}), '(w_right * mom_coeffs)\n', (2272, 2294), True, 'import numpy as np\n'), ((4940, 4976), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(4 * self.num - 1)'], {}), '(-2, 2, 4 * self.num - 1)\n', (4951, 4976), True, 'import numpy as np\n'), ((4990, 5017), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(num // 2)'], {}), '(0, 1, num // 2)\n', (5001, 5017), True, 'import numpy as np\n'), ((5170, 5183), 'numpy.zeros', 'np.zeros', (['num'], {}), '(num)\n', (5178, 5183), True, 'import numpy as np\n'), ((5461, 5498), 'numpy.sqrt', 'np.sqrt', (['(t ** 2 + 1.0 / self.num ** 2)'], {}), '(t ** 2 + 1.0 / self.num ** 2)\n', (5468, 5498), True, 'import numpy as np\n'), ((6085, 6097), 'numpy.abs', 'np.abs', (['conv'], {}), '(conv)\n', (6091, 6097), True, 'import numpy as np\n'), ((3560, 3594), 'scipy.special.hyp2f1', 'hyp2f1', (['h', '(j + 1.0)', '(j + 2.0)', '(b / t)'], {}), '(h, j + 1.0, j + 2.0, b / t)\n', (3566, 3594), False, 'from scipy.special import hyp2f1\n'), ((3846, 3882), 'scipy.special.hyp2f1', 'hyp2f1', (['h', '(h - j - 1.0)', '(h - j)', '(t / b)'], {}), '(h, h - j - 1.0, h - j, t / b)\n', (3852, 3882), False, 'from scipy.special import hyp2f1\n'), ((5549, 5599), 'scipy.interpolate.interp1d', 'interp1d', (['t[::t.size // (weight.size - 1)]', 'weight'], {}), '(t[::t.size // (weight.size - 1)], weight)\n', (5557, 5599), False, 'from scipy.interpolate import interp1d\n'), ((5685, 5715), 'scipy.signal.convolve', 'convolve', (['ker', 'w'], {'mode': '"""valid"""'}), "(ker, w, mode='valid')\n", (5693, 5715), False, 'from scipy.signal import convolve\n'), ((5872, 5887), 'numpy.sign', 'np.sign', (['self.t'], {}), '(self.t)\n', (5879, 5887), True, 'import numpy as np\n'), ((5933, 5974), 'scipy.signal.convolve', 'convolve', (['ker', 'self.wavelet'], {'mode': '"""valid"""'}), "(ker, self.wavelet, mode='valid')\n", (5941, 5974), False, 'from scipy.signal import convolve\n'), ((6652, 6668), 'numpy.max', 'np.max', (['abs_conv'], {}), '(abs_conv)\n', (6658, 6668), True, 'import numpy as np\n'), ((3666, 3700), 'scipy.special.hyp2f1', 'hyp2f1', (['h', '(j + 1.0)', '(j + 2.0)', '(a / t)'], {}), '(h, j + 1.0, j + 2.0, a / t)\n', (3672, 3700), False, 'from scipy.special import hyp2f1\n'), ((3955, 3991), 'scipy.special.hyp2f1', 'hyp2f1', (['h', '(h - j - 1.0)', '(h - j)', '(t / a)'], {}), '(h, h - j - 1.0, h - j, t / a)\n', (3961, 3991), False, 'from scipy.special import hyp2f1\n'), ((6294, 6322), 'numpy.argmax', 'np.argmax', (['abs_conv[l_slice]'], {}), '(abs_conv[l_slice])\n', (6303, 6322), True, 'import numpy as np\n'), ((6357, 6385), 'numpy.argmax', 'np.argmax', (['abs_conv[r_slice]'], {}), '(abs_conv[r_slice])\n', (6366, 6385), True, 'import numpy as np\n'), ((6693, 6707), 'numpy.abs', 'np.abs', (['l_view'], {}), '(l_view)\n', (6699, 6707), True, 'import numpy as np\n'), ((6739, 6753), 'numpy.abs', 'np.abs', (['r_view'], {}), '(r_view)\n', (6745, 6753), True, 'import numpy as np\n'), ((5362, 5389), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'self.num'], {}), '(0, 1, self.num)\n', (5373, 5389), True, 'import numpy as np\n'), ((6813, 6827), 'numpy.abs', 'np.abs', (['m_view'], {}), '(m_view)\n', (6819, 6827), True, 'import numpy as np\n'), ((1051, 1079), 'numpy.poly1d', 'np.poly1d', (['self.coeffs[::-1]'], {}), '(self.coeffs[::-1])\n', (1060, 1079), True, 'import numpy as np\n'), ((4284, 4299), 'numpy.abs', 'np.abs', (['(0.5 - t)'], {}), '(0.5 - t)\n', (4290, 4299), True, 'import numpy as np\n'), ((4338, 4354), 'numpy.sign', 'np.sign', (['(0.5 - t)'], {}), '(0.5 - t)\n', (4345, 4354), True, 'import numpy as np\n'), ((4364, 4373), 'numpy.abs', 'np.abs', (['t'], {}), '(t)\n', (4370, 4373), True, 'import numpy as np\n'), ((4427, 4442), 'numpy.abs', 'np.abs', (['(1.0 - t)'], {}), '(1.0 - t)\n', (4433, 4442), True, 'import numpy as np\n'), ((4477, 4493), 'numpy.sign', 'np.sign', (['(1.0 - t)'], {}), '(1.0 - t)\n', (4484, 4493), True, 'import numpy as np\n'), ((4505, 4520), 'numpy.abs', 'np.abs', (['(0.5 - t)'], {}), '(0.5 - t)\n', (4511, 4520), True, 'import numpy as np\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
NutritionEngine: This script is used to determine ideal weights
of suitable foods, So that our nutritional requirement is
satisfied Uses: Linear Regression, Gradient Descent for
cost optimization. Try the below example and follow the
console.
$ python example_google.py
Todo:
* Lower price selection of foods to get all nutrition in
optimal price
* Improving the performance of existing cost reduction function.
http://www.nutremo.com
http://www.nutremo.in
"""
import sys
import numpy as np
import utils.console as console
import utils.constants as constants
import utils.regression as regression
import utils.utils as utils
np.set_printoptions(suppress=True)
np.random.seed(10)
def food_engine(food_list, theta_init, iterations):
previous_cost = constants.MAX_COST
if iterations <= 0:
iterations = constants.ITERATIONS
if food_list:
final_foods = food_list
elif constants.FIRST_RUN:
final_foods = console.show_food_groups()
open(constants.INPUT_FILE, 'w').write(','.join(final_foods))
else:
final_foods = open(constants.INPUT_FILE, 'r') \
.read().split('\n')[0].split(',')
daily_nutrients_limit_y = [
constants.DAILY_NUTRIENTS_LIMIT[x]
for x in constants.REQUIRED_NUTRIENT_LIST
]
x, y, normalize_vector = utils.build_x_and_y(
final_foods,
constants.SAMPLE_SIZE,
daily_nutrients_limit_y,
constants.REQUIRED_NUTRIENT_LIST
)
# input_sample_size = x.shape[0]
foods_count = x.shape[1]
nutrients_count = x.shape[2]
if theta_init.tolist():
theta = theta_init
elif constants.FIRST_RUN or food_list:
theta = np.array(
[[x] * nutrients_count for x in np.random.rand(foods_count)]
)
else:
theta = np.array(eval(open(constants.PREVIOUS_THETA_FILE, 'r').read()))
previous_cost = eval(open(constants.PREVIOUS_COST_FILE, 'r').read())
for loop in range(iterations):
regression.gradient_descent(
x,
y,
theta,
alpha=constants.ALPHA,
beta1=constants.BETA1,
beta2=constants.BETA2,
epsilon=constants.EPSILON,
iteration=loop + 1
)
print(
str(loop) + " Normalized cost",
regression.compute_cost(x, theta, y),
"||||", "Original Cost",
regression.compute_cost(x / normalize_vector,
theta, y / normalize_vector)
)
if previous_cost >= regression.compute_cost(x, theta, y):
previous_cost = regression.compute_cost(x, theta, y)
if loop % constants.SAVE_FREQUENCY == 0 and \
regression.compute_cost(x, theta, y) <= previous_cost:
open(constants.PREVIOUS_THETA_FILE, 'w') \
.write(str(theta.tolist()))
open(constants.PREVIOUS_COST_FILE, 'w') \
.write(str(previous_cost))
open(constants.INPUT_FILE, 'w') \
.write(','.join(final_foods))
console.display_output_normalized(
x,
y,
theta,
final_foods,
constants.REQUIRED_NUTRIENT_LIST,
normalize_vector
)
console.display_output(
x / normalize_vector,
y / normalize_vector,
theta,
final_foods,
constants.REQUIRED_NUTRIENT_LIST
)
output = utils.add_or_remove_foods(x, y, theta, final_foods)
if output:
food_engine(output[0], output[1], output[2])
if constants.FIRST_RUN:
weight = int(input("enter your weight\n"))
calories = int(input("Select required calories.\n"))
carb_percentage = int(input(
"select required Carbohydrates. Ideal percentage -> 45% to 60%"
))
protein_percentage = int(input(
"select required Protein percentage. Ideal percentage -> 10% to 35%"
)) # type: int
fat_percentage = 100 - carb_percentage - protein_percentage
calories_by_carbs = (carb_percentage / 100.0) * calories
calories_by_proteins = (protein_percentage / 100.0) * calories
calories_by_fat = (
(100 - carb_percentage - protein_percentage)
/ 100.0
) * calories
if carb_percentage + protein_percentage > 95.0:
print("Please enter correct percentages next time you use the App.")
sys.exit(1)
print(calories_by_carbs / 4.0, calories_by_proteins / 4.0,
calories_by_fat / 9.0)
constants.DAILY_NUTRIENTS_LIMIT[71] = calories
constants.DAILY_NUTRIENTS_LIMIT[110] = calories_by_proteins / 4.0
constants.DAILY_NUTRIENTS_LIMIT[61] = calories_by_carbs / 4.0
constants.DAILY_NUTRIENTS_LIMIT[130] = calories_by_fat / 4.0
constants.DAILY_NUTRIENTS_LIMIT[74] = (((min(fat_percentage, 8)) / 100.0)
* calories) / 9.0
constants.DAILY_NUTRIENTS_LIMIT[89] = (weight * 14) / 1000.0
constants.DAILY_NUTRIENTS_LIMIT[92] = (weight * 19) / 1000.0
constants.DAILY_NUTRIENTS_LIMIT[94] = (weight * 42) / 1000.0
constants.DAILY_NUTRIENTS_LIMIT[97] = (weight * 38) / 1000.0
constants.DAILY_NUTRIENTS_LIMIT[102] = (weight * 19) / 1000.0
constants.DAILY_NUTRIENTS_LIMIT[105] = (weight * 33) / 1000.0
constants.DAILY_NUTRIENTS_LIMIT[122] = (weight * 20) / 1000.0
constants.DAILY_NUTRIENTS_LIMIT[131] = (weight * 5) / 1000.0
constants.DAILY_NUTRIENTS_LIMIT[133] = (weight * 24) / 1000.0
print(calories_by_proteins / 4.0)
for i in constants.REQUIRED_NUTRIENT_LIST:
print("{0:<50}".format(constants.NUTRIENT_LIST[i][:35])
+ "{0:<20}".format(constants.DAILY_NUTRIENTS_LIMIT[i]))
print("-" * 75)
dailyRequirementLimitNutrientsOut = [str(x)
for x in
constants.DAILY_NUTRIENTS_LIMIT]
open(constants.DAILY_NUTRIENTS_LIMIT_FILE, 'w') \
.write(','.join(dailyRequirementLimitNutrientsOut))
food_engine(
constants.INPUT_FOOD_LIST,
np.array([[i] * 0 for i in np.random.rand(0)]),
0
)
else:
constants.DAILY_NUTRIENTS_LIMIT = [
float(x) for x in
open(constants.DAILY_NUTRIENTS_LIMIT_FILE, 'r').read().split(',')
]
food_engine([], np.array([[i] * 0 for i in np.random.rand(0)]), 0)
| [
"numpy.set_printoptions",
"numpy.random.seed",
"utils.regression.gradient_descent",
"utils.regression.compute_cost",
"utils.utils.add_or_remove_foods",
"utils.console.show_food_groups",
"utils.console.display_output",
"utils.console.display_output_normalized",
"utils.utils.build_x_and_y",
"numpy.r... | [((750, 784), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (769, 784), True, 'import numpy as np\n'), ((785, 803), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (799, 803), True, 'import numpy as np\n'), ((1433, 1551), 'utils.utils.build_x_and_y', 'utils.build_x_and_y', (['final_foods', 'constants.SAMPLE_SIZE', 'daily_nutrients_limit_y', 'constants.REQUIRED_NUTRIENT_LIST'], {}), '(final_foods, constants.SAMPLE_SIZE,\n daily_nutrients_limit_y, constants.REQUIRED_NUTRIENT_LIST)\n', (1452, 1551), True, 'import utils.utils as utils\n'), ((3225, 3341), 'utils.console.display_output_normalized', 'console.display_output_normalized', (['x', 'y', 'theta', 'final_foods', 'constants.REQUIRED_NUTRIENT_LIST', 'normalize_vector'], {}), '(x, y, theta, final_foods, constants.\n REQUIRED_NUTRIENT_LIST, normalize_vector)\n', (3258, 3341), True, 'import utils.console as console\n'), ((3395, 3519), 'utils.console.display_output', 'console.display_output', (['(x / normalize_vector)', '(y / normalize_vector)', 'theta', 'final_foods', 'constants.REQUIRED_NUTRIENT_LIST'], {}), '(x / normalize_vector, y / normalize_vector, theta,\n final_foods, constants.REQUIRED_NUTRIENT_LIST)\n', (3417, 3519), True, 'import utils.console as console\n'), ((3575, 3626), 'utils.utils.add_or_remove_foods', 'utils.add_or_remove_foods', (['x', 'y', 'theta', 'final_foods'], {}), '(x, y, theta, final_foods)\n', (3600, 3626), True, 'import utils.utils as utils\n'), ((2103, 2268), 'utils.regression.gradient_descent', 'regression.gradient_descent', (['x', 'y', 'theta'], {'alpha': 'constants.ALPHA', 'beta1': 'constants.BETA1', 'beta2': 'constants.BETA2', 'epsilon': 'constants.EPSILON', 'iteration': '(loop + 1)'}), '(x, y, theta, alpha=constants.ALPHA, beta1=\n constants.BETA1, beta2=constants.BETA2, epsilon=constants.EPSILON,\n iteration=loop + 1)\n', (2130, 2268), True, 'import utils.regression as regression\n'), ((4573, 4584), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4581, 4584), False, 'import sys\n'), ((1065, 1091), 'utils.console.show_food_groups', 'console.show_food_groups', ([], {}), '()\n', (1089, 1091), True, 'import utils.console as console\n'), ((2437, 2473), 'utils.regression.compute_cost', 'regression.compute_cost', (['x', 'theta', 'y'], {}), '(x, theta, y)\n', (2460, 2473), True, 'import utils.regression as regression\n'), ((2524, 2598), 'utils.regression.compute_cost', 'regression.compute_cost', (['(x / normalize_vector)', 'theta', '(y / normalize_vector)'], {}), '(x / normalize_vector, theta, y / normalize_vector)\n', (2547, 2598), True, 'import utils.regression as regression\n'), ((2673, 2709), 'utils.regression.compute_cost', 'regression.compute_cost', (['x', 'theta', 'y'], {}), '(x, theta, y)\n', (2696, 2709), True, 'import utils.regression as regression\n'), ((2739, 2775), 'utils.regression.compute_cost', 'regression.compute_cost', (['x', 'theta', 'y'], {}), '(x, theta, y)\n', (2762, 2775), True, 'import utils.regression as regression\n'), ((2854, 2890), 'utils.regression.compute_cost', 'regression.compute_cost', (['x', 'theta', 'y'], {}), '(x, theta, y)\n', (2877, 2890), True, 'import utils.regression as regression\n'), ((6277, 6294), 'numpy.random.rand', 'np.random.rand', (['(0)'], {}), '(0)\n', (6291, 6294), True, 'import numpy as np\n'), ((6513, 6530), 'numpy.random.rand', 'np.random.rand', (['(0)'], {}), '(0)\n', (6527, 6530), True, 'import numpy as np\n'), ((1853, 1880), 'numpy.random.rand', 'np.random.rand', (['foods_count'], {}), '(foods_count)\n', (1867, 1880), True, 'import numpy as np\n')] |
"""some utility functions for geometry calculation."""
import numpy as np
import numba
def surface_equ_3d(polygon_surfaces):
""" compute the normal vector and d (3d surface equation parameters) of a set of polygons
Given a series of polygon points x, y, z, compute the corresponding plane parameters a, b, c and d, which
satisfies ax+by+cz+d=0, and the normal vector (a, b, c) should point to the inner part of the object.
# polygon_surfaces: [num_polygon, num_surfaces, num_points_of_polygon, 3]
:param polygon_surfaces: array [N_poly, N_surfaces, N_points_of_a_surface, 3]
:return: normal vector and d (surface equation parameters): array [N_poly, N_num_surface, 3/1]
"""
# compute the edge vector v0->v1 and v1->v2, [num_polygon, num_surfaces, 2, 3]
surface_vec = polygon_surfaces[:, :, :2, :] - polygon_surfaces[:, :, 1:3, :]
# normal_vec: [..., 3], [num_polygon, num_surfaces, 3], the normal vec points to the inner space of the object
normal_vec = np.cross(surface_vec[:, :, 0, :], surface_vec[:, :, 1, :])
# print(normal_vec.shape, points[..., 0, :].shape)
# d = -np.inner(normal_vec, points[..., 0, :]), pick a random point to compute the offset d
d = np.einsum('aij, aij->ai', normal_vec, polygon_surfaces[:, :, 0, :])
return normal_vec, -d
@numba.njit
def _points_in_convex_polygon_3d_jit(points, polygon_surfaces, normal_vec, d, num_surfaces):
"""check points is in 3d convex polygons.
:param points: [N_points, 3]
:param polygon_surfaces: [N_poly, max_num_surfaces, max_num_points_of_surface, 3]
:param normal_vec: [N_poly, max_num_surfaces, 3]
:param d: [N_poly, max_num_surfaces]
:param num_surfaces: [N_poly], may not be used, just set to a large number like 99999
:return: bool array: [N_point, N_poly]
"""
max_num_surfaces, max_num_points_of_surface = polygon_surfaces.shape[1:3]
num_points = points.shape[0]
num_poly = polygon_surfaces.shape[0]
ret = np.ones((num_points, num_poly), dtype=np.bool_)
sign = 0.0
for i in range(num_points): # for each point
for j in range(num_poly): # for each polyhedron
for k in range(max_num_surfaces):
if k > num_surfaces[j]:
break
sign = points[i, 0] * normal_vec[j, k, 0] \
+ points[i, 1] * normal_vec[j, k, 1] \
+ points[i, 2] * normal_vec[j, k, 2] + d[j, k]
if sign >= 0:
ret[i, j] = False
break
return ret
def points_in_convex_polygon_3d_jit(points,
polygon_surfaces,
num_surfaces=None):
"""
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces, max_num_points_of_surface, 3] array.
all surfaces' normal vector must direct to internal.
max_num_points_of_surface must at least 3.
num_surfaces: [num_polygon] array. indicate how many surfaces
a polygon contain
Returns:
[num_points, num_polygon] bool array.
"""
max_num_surfaces, max_num_points_of_surface = polygon_surfaces.shape[1:3]
num_points = points.shape[0]
num_polygons = polygon_surfaces.shape[0] # actually num of polyhedron
if num_surfaces is None:
num_surfaces = np.full((num_polygons,), 9999999, dtype=np.int64)
normal_vec, d = surface_equ_3d(polygon_surfaces[:, :, :3, :]) # normal vec can be computed with only 3 points
# normal_vec: [num_polygon, max_num_surfaces, 3]
# d: [num_polygon, max_num_surfaces]
return _points_in_convex_polygon_3d_jit(points, polygon_surfaces, normal_vec, d, num_surfaces)
@numba.jit
def points_in_convex_polygon_jit(points, polygon, clockwise=True):
"""check points is in 2d convex polygons. True when point in polygon
Args:
points: [num_points, 2] array.
polygon: [num_polygon, num_points_of_polygon, 2] array.
clockwise: bool. indicate polygon is clockwise.
Returns:
[num_points, num_polygon] bool array.
"""
# first convert polygon to directed lines
num_points_of_polygon = polygon.shape[1]
num_points = points.shape[0]
num_polygons = polygon.shape[0]
if clockwise:
vec1 = polygon - polygon[:, [num_points_of_polygon - 1] +
list(range(num_points_of_polygon - 1)), :]
else:
vec1 = polygon[:, [num_points_of_polygon - 1] +
list(range(num_points_of_polygon - 1)), :] - polygon
# vec1: [num_polygon, num_points_of_polygon, 2]
ret = np.zeros((num_points, num_polygons), dtype=np.bool_)
success = True
cross = 0.0
for i in range(num_points):
for j in range(num_polygons):
success = True
for k in range(num_points_of_polygon):
cross = vec1[j, k, 1] * (polygon[j, k, 0] - points[i, 0])
cross -= vec1[j, k, 0] * (polygon[j, k, 1] - points[i, 1])
if cross >= 0:
success = False
break
ret[i, j] = success
return ret
| [
"numpy.full",
"numpy.zeros",
"numpy.ones",
"numpy.cross",
"numpy.einsum"
] | [((1002, 1060), 'numpy.cross', 'np.cross', (['surface_vec[:, :, 0, :]', 'surface_vec[:, :, 1, :]'], {}), '(surface_vec[:, :, 0, :], surface_vec[:, :, 1, :])\n', (1010, 1060), True, 'import numpy as np\n'), ((1220, 1287), 'numpy.einsum', 'np.einsum', (['"""aij, aij->ai"""', 'normal_vec', 'polygon_surfaces[:, :, 0, :]'], {}), "('aij, aij->ai', normal_vec, polygon_surfaces[:, :, 0, :])\n", (1229, 1287), True, 'import numpy as np\n'), ((1985, 2032), 'numpy.ones', 'np.ones', (['(num_points, num_poly)'], {'dtype': 'np.bool_'}), '((num_points, num_poly), dtype=np.bool_)\n', (1992, 2032), True, 'import numpy as np\n'), ((4668, 4720), 'numpy.zeros', 'np.zeros', (['(num_points, num_polygons)'], {'dtype': 'np.bool_'}), '((num_points, num_polygons), dtype=np.bool_)\n', (4676, 4720), True, 'import numpy as np\n'), ((3397, 3446), 'numpy.full', 'np.full', (['(num_polygons,)', '(9999999)'], {'dtype': 'np.int64'}), '((num_polygons,), 9999999, dtype=np.int64)\n', (3404, 3446), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from ..hardware import *
from qupulse.hardware.dacs.alazar import AlazarCard, AlazarProgram
class AlazarProgramTest(unittest.TestCase):
def test_iter(self):
args = ([], [], 0)
program = AlazarProgram(*args)
for x, y in zip(program, args):
self.assertIs(x, y)
class AlazarTest(unittest.TestCase):
def test_add_mask_prototype(self):
card = AlazarCard(None)
card.register_mask_for_channel('M', 3, 'auto')
self.assertEqual(card._mask_prototypes, dict(M=(3, 'auto')))
with self.assertRaises(ValueError):
card.register_mask_for_channel('M', 'A', 'auto')
with self.assertRaises(NotImplementedError):
card.register_mask_for_channel('M', 1, 'periodic')
def test_make_mask(self):
card = AlazarCard(None)
card.register_mask_for_channel('M', 3, 'auto')
begins = np.arange(15, dtype=np.uint64)*16
lengths = 1+np.arange(15, dtype=np.uint64)
with self.assertRaises(KeyError):
card._make_mask('N', begins, lengths)
with self.assertRaises(ValueError):
card._make_mask('M', begins, lengths*3)
mask = card._make_mask('M', begins, lengths)
self.assertEqual(mask.identifier, 'M')
np.testing.assert_equal(mask.begin, begins)
np.testing.assert_equal(mask.length, lengths)
self.assertEqual(mask.channel, 3)
def test_register_measurement_windows(self):
raw_card = dummy_modules.dummy_atsaverage.core.AlazarCard()
card = AlazarCard(raw_card)
self.assertIs(card.card, raw_card)
card.register_mask_for_channel('A', 3, 'auto')
card.register_mask_for_channel('B', 1, 'auto')
card.config = dummy_modules.dummy_atsaverage.config.ScanlineConfiguration()
card.register_measurement_windows('empty', dict())
begins = np.arange(100)*176.5
lengths = np.ones(100)*10*np.pi
card.register_measurement_windows('otto', dict(A=(begins, lengths)))
self.assertEqual(set(card._registered_programs.keys()), {'empty', 'otto'})
self.assertEqual(card._registered_programs['empty'].masks, [])
expected_begins = np.rint(begins / 10).astype(dtype=np.uint64)
np.testing.assert_equal(card._registered_programs['otto'].masks[0].begin, expected_begins)
# pi ist genau 3
length = card._registered_programs['otto'].masks[0].length
np.testing.assert_equal(length if isinstance(length, np.ndarray) else length.as_ndarray(), 3)
self.assertEqual(card._registered_programs['otto'].masks[0].channel, 3)
self.assertEqual(card._registered_programs['otto'].masks[0].identifier, 'A')
def test_register_operations(self):
card = AlazarCard(None)
operations = 'this is no operatoin but a string'
card.register_operations('test', operations)
self.assertEqual(len(card._registered_programs), 1)
self.assertIs(card._registered_programs['test'].operations, operations)
def test_mask_prototypes(self):
card = AlazarCard(None)
self.assertIs(card.mask_prototypes, card._mask_prototypes)
def test_arm_operation(self):
raw_card = dummy_modules.dummy_atsaverage.core.AlazarCard()
card = AlazarCard(raw_card)
card.register_mask_for_channel('A', 3, 'auto')
card.register_mask_for_channel('B', 1, 'auto')
card.register_operations('otto', [])
card.config = dummy_modules.dummy_atsaverage.config.ScanlineConfiguration()
with self.assertRaises(RuntimeError):
card.arm_program('otto')
card.register_operations('otto', ['asd'])
with self.assertRaises(RuntimeError):
card.arm_program('otto')
begins = np.arange(100) * 176.5
lengths = np.ones(100) * 10 * np.pi
card.register_measurement_windows('otto', dict(A=(begins, lengths)))
card.config.totalRecordSize = 17
with self.assertRaises(ValueError):
card.arm_program('otto')
card.config.totalRecordSize = 0
card.arm_program('otto')
self.assertEqual(card.config._apply_calls, [(raw_card, True)])
self.assertEqual(card.card._startAcquisition_calls, [1])
card.arm_program('otto')
self.assertEqual(card.config._apply_calls, [(raw_card, True)])
self.assertEqual(card.card._startAcquisition_calls, [1, 1])
| [
"numpy.ones",
"qupulse.hardware.dacs.alazar.AlazarProgram",
"numpy.rint",
"numpy.arange",
"qupulse.hardware.dacs.alazar.AlazarCard",
"numpy.testing.assert_equal"
] | [((246, 266), 'qupulse.hardware.dacs.alazar.AlazarProgram', 'AlazarProgram', (['*args'], {}), '(*args)\n', (259, 266), False, 'from qupulse.hardware.dacs.alazar import AlazarCard, AlazarProgram\n'), ((433, 449), 'qupulse.hardware.dacs.alazar.AlazarCard', 'AlazarCard', (['None'], {}), '(None)\n', (443, 449), False, 'from qupulse.hardware.dacs.alazar import AlazarCard, AlazarProgram\n'), ((844, 860), 'qupulse.hardware.dacs.alazar.AlazarCard', 'AlazarCard', (['None'], {}), '(None)\n', (854, 860), False, 'from qupulse.hardware.dacs.alazar import AlazarCard, AlazarProgram\n'), ((1318, 1361), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['mask.begin', 'begins'], {}), '(mask.begin, begins)\n', (1341, 1361), True, 'import numpy as np\n'), ((1370, 1415), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['mask.length', 'lengths'], {}), '(mask.length, lengths)\n', (1393, 1415), True, 'import numpy as np\n'), ((1591, 1611), 'qupulse.hardware.dacs.alazar.AlazarCard', 'AlazarCard', (['raw_card'], {}), '(raw_card)\n', (1601, 1611), False, 'from qupulse.hardware.dacs.alazar import AlazarCard, AlazarProgram\n'), ((2303, 2397), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["card._registered_programs['otto'].masks[0].begin", 'expected_begins'], {}), "(card._registered_programs['otto'].masks[0].begin,\n expected_begins)\n", (2326, 2397), True, 'import numpy as np\n'), ((2811, 2827), 'qupulse.hardware.dacs.alazar.AlazarCard', 'AlazarCard', (['None'], {}), '(None)\n', (2821, 2827), False, 'from qupulse.hardware.dacs.alazar import AlazarCard, AlazarProgram\n'), ((3131, 3147), 'qupulse.hardware.dacs.alazar.AlazarCard', 'AlazarCard', (['None'], {}), '(None)\n', (3141, 3147), False, 'from qupulse.hardware.dacs.alazar import AlazarCard, AlazarProgram\n'), ((3333, 3353), 'qupulse.hardware.dacs.alazar.AlazarCard', 'AlazarCard', (['raw_card'], {}), '(raw_card)\n', (3343, 3353), False, 'from qupulse.hardware.dacs.alazar import AlazarCard, AlazarProgram\n'), ((934, 964), 'numpy.arange', 'np.arange', (['(15)'], {'dtype': 'np.uint64'}), '(15, dtype=np.uint64)\n', (943, 964), True, 'import numpy as np\n'), ((988, 1018), 'numpy.arange', 'np.arange', (['(15)'], {'dtype': 'np.uint64'}), '(15, dtype=np.uint64)\n', (997, 1018), True, 'import numpy as np\n'), ((1930, 1944), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (1939, 1944), True, 'import numpy as np\n'), ((3833, 3847), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (3842, 3847), True, 'import numpy as np\n'), ((1969, 1981), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (1976, 1981), True, 'import numpy as np\n'), ((2250, 2270), 'numpy.rint', 'np.rint', (['(begins / 10)'], {}), '(begins / 10)\n', (2257, 2270), True, 'import numpy as np\n'), ((3874, 3886), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (3881, 3886), True, 'import numpy as np\n')] |
import numpy as np
from PIL import Image
# create a 400 x 400 grid
dim = np.linspace(-10, 10, 400)
x, y, _ = np.meshgrid(dim, dim, [1]) # the [1] adds an extra dimension
# positions and widths of the 'hills'
position_x = np.array([-3.0, 7.0, 9.0])
position_y = np.array([0.0, 8.0, -9.0])
width_x = np.array([5.3, 8.3, 4.0])
width_y = np.array([6.3, 5.7, 4.0])
# calculate height as a combination of Gaussians
d = np.sqrt(((x - position_x) / width_x) ** 2 + ((y - position_y) / width_y) ** 2)
z = np.exp(-d ** 2) # shape is (400, 400, 3) because we have 3 hills
z = z.sum(axis=2) # add the hills to get a single landscape
znorm = (z - z.min()) / (z.max() - z.min()) # normalize to range (0.0 .. 1.0)
# contour lines
contour = (znorm * 8).astype(np.uint8) * 32
im = Image.fromarray(contour, mode='L')
im.save('contour_steps.png')
# isolines
isolines = ((znorm * 100).round() % 16) == 0
isolines = (isolines * 255).astype(np.uint8)
im = Image.fromarray(isolines, mode='L')
im.save('contour_isolines.png')
| [
"numpy.meshgrid",
"numpy.array",
"numpy.exp",
"numpy.linspace",
"PIL.Image.fromarray",
"numpy.sqrt"
] | [((76, 101), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(400)'], {}), '(-10, 10, 400)\n', (87, 101), True, 'import numpy as np\n'), ((112, 138), 'numpy.meshgrid', 'np.meshgrid', (['dim', 'dim', '[1]'], {}), '(dim, dim, [1])\n', (123, 138), True, 'import numpy as np\n'), ((226, 252), 'numpy.array', 'np.array', (['[-3.0, 7.0, 9.0]'], {}), '([-3.0, 7.0, 9.0])\n', (234, 252), True, 'import numpy as np\n'), ((271, 297), 'numpy.array', 'np.array', (['[0.0, 8.0, -9.0]'], {}), '([0.0, 8.0, -9.0])\n', (279, 297), True, 'import numpy as np\n'), ((309, 334), 'numpy.array', 'np.array', (['[5.3, 8.3, 4.0]'], {}), '([5.3, 8.3, 4.0])\n', (317, 334), True, 'import numpy as np\n'), ((346, 371), 'numpy.array', 'np.array', (['[6.3, 5.7, 4.0]'], {}), '([6.3, 5.7, 4.0])\n', (354, 371), True, 'import numpy as np\n'), ((426, 504), 'numpy.sqrt', 'np.sqrt', (['(((x - position_x) / width_x) ** 2 + ((y - position_y) / width_y) ** 2)'], {}), '(((x - position_x) / width_x) ** 2 + ((y - position_y) / width_y) ** 2)\n', (433, 504), True, 'import numpy as np\n'), ((509, 524), 'numpy.exp', 'np.exp', (['(-d ** 2)'], {}), '(-d ** 2)\n', (515, 524), True, 'import numpy as np\n'), ((783, 817), 'PIL.Image.fromarray', 'Image.fromarray', (['contour'], {'mode': '"""L"""'}), "(contour, mode='L')\n", (798, 817), False, 'from PIL import Image\n'), ((954, 989), 'PIL.Image.fromarray', 'Image.fromarray', (['isolines'], {'mode': '"""L"""'}), "(isolines, mode='L')\n", (969, 989), False, 'from PIL import Image\n')] |
"""
This script is used to evaluate the embeddings on the target tasks.
You'll need to download the datasets, compute the embeddings and set the path accordingly.
"""
from tqdm import tqdm
import pandas as pd
import numpy as np
from pathlib import Path
from collections import defaultdict
from itertools import chain
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import roc_auc_score
import json
US8K_MEDATADA_FILE = './data/UrbanSound8K/metadata/UrbanSound8K.csv'
GTZAN_TRAIN_FILE = './data/GTZAN/train_filtered.txt'
GTZAN_TEST_FILE = './data/GTZAN/test_filtered.txt'
EMBEDDING_FOLDERS_US8K = {
'mfcc': './data/embeddings/us8k/mfcc/',
'tag_w2v_128_self_1h': './data/tag_embeddings/us8k/embeddings_ae_w2v_128_selfatt_c_1h_200/',
'tag_w2v_128_self_4h': './data/tag_embeddings/us8k/embeddings_ae_w2v_128_selfatt_c_4h_200/',
'tag_w2v_1152_self_1h': './data/tag_embeddings/us8k/embeddings_ae_w2v_selfatt_c_1h_200/',
'tag_w2v_1152_self_4h': './data/tag_embeddings/us8k/embeddings_ae_w2v_selfatt_c_4h_200/',
'tag_w2v_128_mean': './data/tag_embeddings/us8k/embeddings_ae_w2v_128_mean_c_200/',
'tag_w2v_1152_mean': './data/tag_embeddings/us8k/embeddings_ae_w2v_mean_c_200/',
}
EMBEDDING_FOLDERS_GTZAN = {
'mfcc': './data/embeddings/gtzan/mfcc/',
'ae_w2v_128_self_1h' : './data/embeddings/gtzan/embeddings_ae_w2v_128_selfatt_c_1h_200/',
'ae_w2v_128_self_4h' : './data/embeddings/gtzan/embeddings_ae_w2v_128_selfatt_c_4h_200/',
'ae_w2v_1152_self_1h': './data/embeddings/gtzan/embeddings_ae_w2v_selfatt_c_1h_200/',
'ae_w2v_1152_self_4h': './data/embeddings/gtzan/embeddings_ae_w2v_selfatt_c_4h_200/',
'ae_w2v_128_mean': './data/embeddings/gtzan/embeddings_ae_w2v_128_mean_c_200/',
'ae_w2v_1152_mean': './data/embeddings/gtzan/embeddings_ae_w2v_mean_c_200/',
}
EMBEDDING_FOLDERS_NSYNTH = {
'mfcc': ('./data/embeddings/nsynth/train/mfcc/',
'./data/embeddings/nsynth/test/mfcc/'),
'ae_w2v_128_self_1h': ('./data/embeddings/nsynth/train/embeddings_ae_w2v_128_selfatt_c_1h_200/',
'./data/embeddings/nsynth/test/embeddings_ae_w2v_128_selfatt_c_1h_200/'),
'ae_w2v_128_self_4h': ('./data/embeddings/nsynth/train/embeddings_ae_w2v_128_selfatt_c_4h_200/',
'./data/embeddings/nsynth/test/embeddings_ae_w2v_128_selfatt_c_4h_200/'),
'ae_w2v_1152_self_1h': ('./data/embeddings/nsynth/train/embeddings_ae_w2v_selfatt_c_1h_200/',
'./data/embeddings/nsynth/test/embeddings_ae_w2v_selfatt_c_1h_200/'),
'ae_w2v_1152_self_4h': ('./data/embeddings/nsynth/train/embeddings_ae_w2v_selfatt_c_4h_200/',
'./data/embeddings/nsynth/test/embeddings_ae_w2v_selfatt_c_4h_200/'),
'ae_w2v_128_mean': ('./data/embeddings/nsynth/train/embeddings_ae_w2v_128_mean_c_200/',
'./data/embeddings/nsynth/test/embeddings_ae_w2v_128_mean_c_200/'),
'ae_w2v_1152_mean': ('./data/embeddings/nsynth/train/embeddings_ae_w2v_mean_c_200/',
'./data/embeddings/nsynth/test/embeddings_ae_w2v_mean_c_200/'),
}
GTZAN_CLASS_MAPPING = {
'blues': 0,
'classical': 1,
'country': 2,
'disco': 3,
'hiphop': 4,
'jazz': 5,
'metal': 6,
'pop': 7,
'reggae': 8,
'rock': 9
}
NSYNTH_CLASS_MAPPING = {
'brass': 0,
'guitar': 1,
'string': 2,
'vocal': 3,
'flute': 4,
'keyboard': 5,
'reed': 6,
'organ': 7,
'mallet': 8,
'bass': 9
}
def compute_roc_aucs(y_test, y_prob):
macro_roc_auc_ovo = roc_auc_score(y_test, y_prob, multi_class="ovo",
average="macro")
weighted_roc_auc_ovo = roc_auc_score(y_test, y_prob, multi_class="ovo",
average="weighted")
macro_roc_auc_ovr = roc_auc_score(y_test, y_prob, multi_class="ovr",
average="macro")
weighted_roc_auc_ovr = roc_auc_score(y_test, y_prob, multi_class="ovr",
average="weighted")
return macro_roc_auc_ovo, weighted_roc_auc_ovo, macro_roc_auc_ovr, weighted_roc_auc_ovr
# --------------- US8K ---------------
def create_folds(embedding_folder):
# slice_file_name fsID start end salience fold classID class
data = pd.read_csv(US8K_MEDATADA_FILE, error_bad_lines=False).values.tolist()
folds = [defaultdict(list) for _ in range(10)]
for d in data:
try:
fold_idx = d[5]-1
class_idx = d[6]
file_name = d[0]
folds[fold_idx]['X'].append(np.load(Path(embedding_folder, f'{file_name.split(".")[0]}.npy')))
folds[fold_idx]['y'].append(class_idx)
except:
pass
return folds
def return_other_fold_indexes(test_fold_idx):
return [i for i in range(10) if i != test_fold_idx]
def eval_US8K(embedding_folder):
folds = create_folds(embedding_folder)
scores = []
roc_auc_scores = []
for fold_idx, test_fold in enumerate(folds):
other_fold_indexes = return_other_fold_indexes(fold_idx)
X = np.array(list(chain(*[folds[idx]['X'] for idx in other_fold_indexes]))).squeeze()
y = np.array(list(chain(*[folds[idx]['y'] for idx in other_fold_indexes])))
X_test = np.array(test_fold['X']).squeeze()
y_test = np.array(test_fold['y'])
if len(X_test.shape) > 2:
X = X.mean(axis=1)
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
clf = MLPClassifier(hidden_layer_sizes=(256,))
clf.fit(X, y)
if len(X_test.shape) > 2:
X_test = X_test.mean(axis=1)
X_test = scaler.transform(X_test)
scores.append(clf.score(X_test, y_test))
y_prob = clf.predict_proba(X_test)
roc_auc_scores.append(compute_roc_aucs(y_test, y_prob))
print(f'\nScores: {roc_auc_scores}, mean: {[np.mean(s) for s in zip(*roc_auc_scores)]}\n')
return np.mean(scores), [np.mean(s) for s in zip(*roc_auc_scores)]
# --------------- GTZAN ---------------
def create_dataset_gtzan(embedding_folder):
train_files = pd.read_csv(GTZAN_TRAIN_FILE, error_bad_lines=False).values.tolist()
test_files = pd.read_csv(GTZAN_TEST_FILE, error_bad_lines=False).values.tolist()
X_train = []
y_train = []
X_test = []
y_test = []
for f_name in train_files:
f_name = Path(f_name[0]).stem
f = Path(embedding_folder, f'{f_name}.npy')
label_idx = GTZAN_CLASS_MAPPING[f.stem.split('.')[0]]
X_train.append(np.load(f))
y_train.append(label_idx)
for f_name in test_files:
f_name = Path(f_name[0]).stem
f = Path(embedding_folder, f'{f_name}.npy')
label_idx = GTZAN_CLASS_MAPPING[f.stem.split('.')[0]]
X_test.append(np.load(f))
y_test.append(label_idx)
X_train = np.array(X_train)
y_train = np.array(y_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
return X_train, X_test, y_train, y_test
def eval_gtzan_fault_filtered(embedding_folder):
X_train, X_test, y_train, y_test = create_dataset_gtzan(embedding_folder)
print("aggregate and scale...")
if len(X_train.shape) > 2:
X_train = X_train.mean(axis=1)
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
print("train...")
clf = MLPClassifier(hidden_layer_sizes=(256,))
clf.fit(X_train, y_train)
print("eval...")
if len(X_test.shape) > 2:
X_test = X_test.mean(axis=1)
X_test = scaler.transform(X_test)
score = clf.score(X_test, y_test)
y_prob = clf.predict_proba(X_test)
roc_auc_score = compute_roc_aucs(y_test, y_prob)
print(f'\MLP score: {roc_auc_score}\n')
return score, roc_auc_score
# --------------- NSYNTH ---------------
def create_dataset_nsynth(embedding_folder_train, embedding_folder_test):
print("loading train data...")
p = Path(embedding_folder_train)
X_train = []
y_train = []
for f in tqdm(p.iterdir()):
try:
if '_d' not in f.stem:
label_idx = NSYNTH_CLASS_MAPPING[f.stem.split('_')[0]]
X_train.append(np.load(f))
y_train.append(label_idx)
except:
pass
X_train = np.array(X_train)
y_train = np.array(y_train)
print("loading test data...")
p = Path(embedding_folder_test)
X_test = []
y_test = []
for f in tqdm(p.iterdir()):
try:
if '_d' not in f.stem:
label_idx = NSYNTH_CLASS_MAPPING[f.stem.split('_')[0]]
X_test.append(np.load(f))
y_test.append(label_idx)
except:
pass
X_test = np.array(X_test)
y_test = np.array(y_test)
return X_train, X_test, y_train, y_test
def eval_nsynth(embedding_folder_train, embedding_folder_test):
X_train, X_test, y_train, y_test = create_dataset_nsynth(embedding_folder_train, embedding_folder_test)
print("aggregate and scale...")
if len(X_train.shape) > 2:
X_train = X_train.mean(axis=1)
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
print("train...")
clf = MLPClassifier(hidden_layer_sizes=(256,))
clf.fit(X_train, y_train)
print("eval...")
if len(X_test.shape) > 2:
X_test = X_test.mean(axis=1)
X_test = scaler.transform(X_test)
score = clf.score(X_test, y_test)
y_prob = clf.predict_proba(X_test)
roc_auc_score = compute_roc_aucs(y_test, y_prob)
print(f'\MLP score: {roc_auc_score}\n')
return score, roc_auc_score
if __name__ == "__main__":
performances = {
'NSynth': defaultdict(list),
'US8K': defaultdict(list),
'GTZAN': defaultdict(list),
}
roc_aucs = {
'NSynth': defaultdict(list),
'US8K': defaultdict(list),
'GTZAN': defaultdict(list),
}
for run_idx in range(10):
# NSYNTH
print('--------------- NSYNTH ---------------')
for embedding_name, (embedding_folder, embedding_folder_test) in EMBEDDING_FOLDERS_NSYNTH.items():
print(f'\nEmbedding: {embedding_name}')
score, roc_auc = eval_nsynth(embedding_folder, embedding_folder_test)
performances['NSynth'][embedding_name].append(score)
roc_aucs['NSynth'][embedding_name].append(roc_auc)
print('\n\n')
# US8K
print('--------------- US8K ---------------')
for embedding_name, embedding_folder in EMBEDDING_FOLDERS_US8K.items():
print(f'\nEmbedding: {embedding_name}')
score, roc_auc = eval_US8K(embedding_folder)
performances['US8K'][embedding_name].append(score)
roc_aucs['US8K'][embedding_name].append(roc_auc)
print('\n\n')
# GTZAN
print('--------------- GTZAN ---------------')
for embedding_name, embedding_folder in EMBEDDING_FOLDERS_GTZAN.items():
print(f'\nEmbedding: {embedding_name}')
score, roc_auc = eval_gtzan_fault_filtered(embedding_folder)
performances['GTZAN'][embedding_name].append(score)
roc_aucs['GTZAN'][embedding_name].append(roc_auc)
print('\n\n')
json.dump(performances, open('results/performances_self.json', 'w'))
| [
"numpy.load",
"sklearn.preprocessing.StandardScaler",
"pandas.read_csv",
"sklearn.metrics.roc_auc_score",
"collections.defaultdict",
"pathlib.Path",
"numpy.mean",
"numpy.array",
"sklearn.neural_network.MLPClassifier",
"itertools.chain"
] | [((3874, 3939), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_prob'], {'multi_class': '"""ovo"""', 'average': '"""macro"""'}), "(y_test, y_prob, multi_class='ovo', average='macro')\n", (3887, 3939), False, 'from sklearn.metrics import roc_auc_score\n'), ((4005, 4073), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_prob'], {'multi_class': '"""ovo"""', 'average': '"""weighted"""'}), "(y_test, y_prob, multi_class='ovo', average='weighted')\n", (4018, 4073), False, 'from sklearn.metrics import roc_auc_score\n'), ((4139, 4204), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_prob'], {'multi_class': '"""ovr"""', 'average': '"""macro"""'}), "(y_test, y_prob, multi_class='ovr', average='macro')\n", (4152, 4204), False, 'from sklearn.metrics import roc_auc_score\n'), ((4270, 4338), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_prob'], {'multi_class': '"""ovr"""', 'average': '"""weighted"""'}), "(y_test, y_prob, multi_class='ovr', average='weighted')\n", (4283, 4338), False, 'from sklearn.metrics import roc_auc_score\n'), ((7213, 7230), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (7221, 7230), True, 'import numpy as np\n'), ((7245, 7262), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (7253, 7262), True, 'import numpy as np\n'), ((7276, 7292), 'numpy.array', 'np.array', (['X_test'], {}), '(X_test)\n', (7284, 7292), True, 'import numpy as np\n'), ((7306, 7322), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (7314, 7322), True, 'import numpy as np\n'), ((7616, 7632), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (7630, 7632), False, 'from sklearn.preprocessing import StandardScaler\n'), ((7730, 7770), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'hidden_layer_sizes': '(256,)'}), '(hidden_layer_sizes=(256,))\n', (7743, 7770), False, 'from sklearn.neural_network import MLPClassifier\n'), ((8295, 8323), 'pathlib.Path', 'Path', (['embedding_folder_train'], {}), '(embedding_folder_train)\n', (8299, 8323), False, 'from pathlib import Path\n'), ((8642, 8659), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (8650, 8659), True, 'import numpy as np\n'), ((8674, 8691), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (8682, 8691), True, 'import numpy as np\n'), ((8735, 8762), 'pathlib.Path', 'Path', (['embedding_folder_test'], {}), '(embedding_folder_test)\n', (8739, 8762), False, 'from pathlib import Path\n'), ((9076, 9092), 'numpy.array', 'np.array', (['X_test'], {}), '(X_test)\n', (9084, 9092), True, 'import numpy as np\n'), ((9106, 9122), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (9114, 9122), True, 'import numpy as np\n'), ((9465, 9481), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (9479, 9481), False, 'from sklearn.preprocessing import StandardScaler\n'), ((9579, 9619), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'hidden_layer_sizes': '(256,)'}), '(hidden_layer_sizes=(256,))\n', (9592, 9619), False, 'from sklearn.neural_network import MLPClassifier\n'), ((4718, 4735), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4729, 4735), False, 'from collections import defaultdict\n'), ((5667, 5691), 'numpy.array', 'np.array', (["test_fold['y']"], {}), "(test_fold['y'])\n", (5675, 5691), True, 'import numpy as np\n'), ((5775, 5791), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (5789, 5791), False, 'from sklearn.preprocessing import StandardScaler\n'), ((5861, 5901), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'hidden_layer_sizes': '(256,)'}), '(hidden_layer_sizes=(256,))\n', (5874, 5901), False, 'from sklearn.neural_network import MLPClassifier\n'), ((6306, 6321), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (6313, 6321), True, 'import numpy as np\n'), ((6773, 6812), 'pathlib.Path', 'Path', (['embedding_folder', 'f"""{f_name}.npy"""'], {}), "(embedding_folder, f'{f_name}.npy')\n", (6777, 6812), False, 'from pathlib import Path\n'), ((7029, 7068), 'pathlib.Path', 'Path', (['embedding_folder', 'f"""{f_name}.npy"""'], {}), "(embedding_folder, f'{f_name}.npy')\n", (7033, 7068), False, 'from pathlib import Path\n'), ((10052, 10069), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10063, 10069), False, 'from collections import defaultdict\n'), ((10087, 10104), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10098, 10104), False, 'from collections import defaultdict\n'), ((10123, 10140), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10134, 10140), False, 'from collections import defaultdict\n'), ((10183, 10200), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10194, 10200), False, 'from collections import defaultdict\n'), ((10218, 10235), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10229, 10235), False, 'from collections import defaultdict\n'), ((10254, 10271), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10265, 10271), False, 'from collections import defaultdict\n'), ((6324, 6334), 'numpy.mean', 'np.mean', (['s'], {}), '(s)\n', (6331, 6334), True, 'import numpy as np\n'), ((6740, 6755), 'pathlib.Path', 'Path', (['f_name[0]'], {}), '(f_name[0])\n', (6744, 6755), False, 'from pathlib import Path\n'), ((6898, 6908), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (6905, 6908), True, 'import numpy as np\n'), ((6996, 7011), 'pathlib.Path', 'Path', (['f_name[0]'], {}), '(f_name[0])\n', (7000, 7011), False, 'from pathlib import Path\n'), ((7153, 7163), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (7160, 7163), True, 'import numpy as np\n'), ((4634, 4688), 'pandas.read_csv', 'pd.read_csv', (['US8K_MEDATADA_FILE'], {'error_bad_lines': '(False)'}), '(US8K_MEDATADA_FILE, error_bad_lines=False)\n', (4645, 4688), True, 'import pandas as pd\n'), ((5540, 5595), 'itertools.chain', 'chain', (["*[folds[idx]['y'] for idx in other_fold_indexes]"], {}), "(*[folds[idx]['y'] for idx in other_fold_indexes])\n", (5545, 5595), False, 'from itertools import chain\n'), ((5615, 5639), 'numpy.array', 'np.array', (["test_fold['X']"], {}), "(test_fold['X'])\n", (5623, 5639), True, 'import numpy as np\n'), ((6470, 6522), 'pandas.read_csv', 'pd.read_csv', (['GTZAN_TRAIN_FILE'], {'error_bad_lines': '(False)'}), '(GTZAN_TRAIN_FILE, error_bad_lines=False)\n', (6481, 6522), True, 'import pandas as pd\n'), ((6556, 6607), 'pandas.read_csv', 'pd.read_csv', (['GTZAN_TEST_FILE'], {'error_bad_lines': '(False)'}), '(GTZAN_TEST_FILE, error_bad_lines=False)\n', (6567, 6607), True, 'import pandas as pd\n'), ((6247, 6257), 'numpy.mean', 'np.mean', (['s'], {}), '(s)\n', (6254, 6257), True, 'import numpy as np\n'), ((8540, 8550), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (8547, 8550), True, 'import numpy as np\n'), ((8976, 8986), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (8983, 8986), True, 'import numpy as np\n'), ((5446, 5501), 'itertools.chain', 'chain', (["*[folds[idx]['X'] for idx in other_fold_indexes]"], {}), "(*[folds[idx]['X'] for idx in other_fold_indexes])\n", (5451, 5501), False, 'from itertools import chain\n')] |
import unittest
import numpy as np
from numpy.testing import assert_array_almost_equal
from .test_base import BaseSparrayTest, dense2d
class TestTrueDivision(BaseSparrayTest):
def test_truediv(self):
c = 3
assert_array_almost_equal(dense2d / c, (self.sp2d / c).toarray())
with np.errstate(divide='ignore'):
assert_array_almost_equal(c / dense2d, c / self.sp2d)
def test_itruediv(self):
self.sp2d /= 1
assert_array_almost_equal(dense2d, self.sp2d.toarray())
b = np.random.random(dense2d.shape)
self.sp2d /= b
assert_array_almost_equal(dense2d / b, self.sp2d.toarray())
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.testing.assert_array_almost_equal",
"numpy.random.random",
"numpy.errstate"
] | [((646, 661), 'unittest.main', 'unittest.main', ([], {}), '()\n', (659, 661), False, 'import unittest\n'), ((500, 531), 'numpy.random.random', 'np.random.random', (['dense2d.shape'], {}), '(dense2d.shape)\n', (516, 531), True, 'import numpy as np\n'), ((295, 323), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (306, 323), True, 'import numpy as np\n'), ((331, 384), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['(c / dense2d)', '(c / self.sp2d)'], {}), '(c / dense2d, c / self.sp2d)\n', (356, 384), False, 'from numpy.testing import assert_array_almost_equal\n')] |
from srxraylib.plot.gol import plot, set_qt
import matplotlib.pylab as plt
import numpy
labelsize=25
figsize=(12,8)
import matplotlib
import matplotlib.pylab as plt
matplotlib.rc('xtick', labelsize=labelsize)
matplotlib.rc('ytick', labelsize=labelsize)
matplotlib.rcParams.update({'font.size': labelsize})
set_qt()
#
#
#
am = numpy.loadtxt("scan_peak_vs_positive_radius.txt", skiprows=0)
amC = numpy.loadtxt("scan_peak_vs_positive_radius_corrected.txt", skiprows=0)
amR = numpy.loadtxt("scan_peak_vs_positive_radius_cropped.txt", skiprows=0)
amE = numpy.loadtxt("scan_peak_vs_positive_radius_extrapolated.txt", skiprows=0)
fm = plot(numpy.abs(am[:,0]), am[:,1] / am[-1,1],
numpy.abs(amC[:,0]), amC[:,1] / am[-1,1],
numpy.abs(amR[:,0]), amR[:,1] / am[-1,1],
numpy.abs(amE[:,0]), amE[:,1] / am[-1,1],
xlog=True,figsize=figsize,
legend=["Uncorrected",
"Corrected (ideal)",
"Corrected (cropped)",
"Corrected (extrapolated)"],
legend_position=[0.35, 0.635],
xtitle="Radius [m]",ytitle="Strehl Ratio I/I0",xrange=[60,1e6], show=0)
matplotlib.pylab.grid() #b=True, which='major', color='#666666', linestyle='-', alpha=0.2)
filefig = "scan_peak_vs_positive_radius.pdf"
fm[0].savefig(filefig)
print("File written to disk: %s" % filefig)
plt.show()
#
#
#
am = numpy.loadtxt("scan_peak_vs_negative_radius.txt", skiprows=0)
amC = numpy.loadtxt("scan_peak_vs_negative_radius_corrected.txt", skiprows=0)
amR = numpy.loadtxt("scan_peak_vs_negative_radius_cropped.txt", skiprows=0)
amE = numpy.loadtxt("scan_peak_vs_negative_radius_extrapolated.txt", skiprows=0)
# amE = numpy.loadtxt("tmp.txt", skiprows=0)
fm = plot(numpy.abs(am[:,0]), am[:,1] / am[-1,1],
numpy.abs(amC[:,0]), amC[:,1] / am[-1,1],
numpy.abs(amR[:,0]), amR[:,1] / am[-1,1],
numpy.abs(amE[:,0]), amE[:,1] / am[-1,1],
xlog=True,figsize=figsize,
legend=["Uncorrected",
"Corrected (ideal)",
"Corrected (cropped)",
"Corrected (extrapolated)"],
legend_position=[0.5,0.635],
xtitle="Radius [m]",ytitle="Strehl Ratio I/I0",xrange=[60,1e6], show=0)
matplotlib.pylab.grid()
filefig = "scan_peak_vs_negative_radius.pdf"
fm[0].savefig(filefig)
print("File written to disk: %s" % filefig)
plt.show()
#================================== HIGH ENERGY ============================================
if False:
#
#
#
am = numpy.loadtxt("scan_peak_vs_positive_radius_1230eV.txt", skiprows=0)
amC = numpy.loadtxt("scan_peak_vs_positive_radius_corrected_1230eV.txt", skiprows=0)
amR = numpy.loadtxt("scan_peak_vs_positive_radius_cropped_1230eV_new.txt", skiprows=0)
amE = numpy.loadtxt("scan_peak_vs_positive_radius_extrapolated_1230eV_new.txt", skiprows=0)
# plot(numpy.abs(amE[:,0]), amE[:,1] / am[-1,1], xlog=True, title="extrapolated", show=0)
# plot(numpy.abs(amR[:,0]), amR[:,1] / am[-1,1], xlog=True, title="cropped")
fm = plot(numpy.abs(am[:,0]), am[:,1] / am[-1,1],
numpy.abs(amC[:,0]), amC[:,1] / am[-1,1],
numpy.abs(amR[:,0]), amR[:,1] / am[-1,1],
numpy.abs(amE[:,0]), amE[:,1] / am[-1,1],
xlog=True,figsize=(12,8),
legend=["Uncorrected",
"Corrected (ideal)",
"Corrected (cropped)",
"Corrected (extrapolated)"],
xtitle="Radius [m]",ytitle="Strehl Ratio I/I0",xrange=[60,1e6], show=0,
title="1230.888 eV")
matplotlib.pylab.grid() #b=True, which='major', color='#666666', linestyle='-', alpha=0.2)
filefig = "scan_peak_vs_positive_radius_1230eV.png"
fm[0].savefig(filefig)
print("File written to disk: %s" % filefig)
plt.show()
#
#
#
am = numpy.loadtxt("scan_peak_vs_negative_radius_1230eV.txt", skiprows=0)
amC = numpy.loadtxt("scan_peak_vs_negative_radius_corrected_1230eV_new.txt", skiprows=0)
amR = numpy.loadtxt("scan_peak_vs_negative_radius_cropped_1230eV_new.txt", skiprows=0)
amE = numpy.loadtxt("scan_peak_vs_negative_radius_extrapolated_1230eV_new.txt", skiprows=0)
fm = plot(numpy.abs(am[:,0]), am[:,1] / am[-1,1],
numpy.abs(amC[:,0]), amC[:,1] / am[-1,1],
numpy.abs(amR[:,0]), amR[:,1] / am[-1,1],
numpy.abs(amE[:,0]), amE[:,1] / am[-1,1],
xlog=True,figsize=(12,8),
legend=["Uncorrected",
"Corrected (ideal)",
"Corrected (cropped)",
"Corrected (extrapolated)"],
xtitle="Radius [m]",ytitle="Strehl Ratio I/I0",xrange=[60,1e6], show=0,
title="1230.888 eV")
matplotlib.pylab.grid() #b=True, which='major', color='#666666', linestyle='-', alpha=0.2)
filefig = "scan_peak_vs_negative_radius_1230eV.png"
fm[0].savefig(filefig)
print("File written to disk: %s" % filefig)
plt.show()
| [
"matplotlib.rc",
"numpy.abs",
"matplotlib.rcParams.update",
"numpy.loadtxt",
"srxraylib.plot.gol.set_qt",
"matplotlib.pylab.grid",
"matplotlib.pylab.show"
] | [((166, 209), 'matplotlib.rc', 'matplotlib.rc', (['"""xtick"""'], {'labelsize': 'labelsize'}), "('xtick', labelsize=labelsize)\n", (179, 209), False, 'import matplotlib\n'), ((218, 261), 'matplotlib.rc', 'matplotlib.rc', (['"""ytick"""'], {'labelsize': 'labelsize'}), "('ytick', labelsize=labelsize)\n", (231, 261), False, 'import matplotlib\n'), ((270, 322), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': labelsize}"], {}), "({'font.size': labelsize})\n", (296, 322), False, 'import matplotlib\n'), ((325, 333), 'srxraylib.plot.gol.set_qt', 'set_qt', ([], {}), '()\n', (331, 333), False, 'from srxraylib.plot.gol import plot, set_qt\n'), ((347, 408), 'numpy.loadtxt', 'numpy.loadtxt', (['"""scan_peak_vs_positive_radius.txt"""'], {'skiprows': '(0)'}), "('scan_peak_vs_positive_radius.txt', skiprows=0)\n", (360, 408), False, 'import numpy\n'), ((415, 486), 'numpy.loadtxt', 'numpy.loadtxt', (['"""scan_peak_vs_positive_radius_corrected.txt"""'], {'skiprows': '(0)'}), "('scan_peak_vs_positive_radius_corrected.txt', skiprows=0)\n", (428, 486), False, 'import numpy\n'), ((493, 562), 'numpy.loadtxt', 'numpy.loadtxt', (['"""scan_peak_vs_positive_radius_cropped.txt"""'], {'skiprows': '(0)'}), "('scan_peak_vs_positive_radius_cropped.txt', skiprows=0)\n", (506, 562), False, 'import numpy\n'), ((569, 643), 'numpy.loadtxt', 'numpy.loadtxt', (['"""scan_peak_vs_positive_radius_extrapolated.txt"""'], {'skiprows': '(0)'}), "('scan_peak_vs_positive_radius_extrapolated.txt', skiprows=0)\n", (582, 643), False, 'import numpy\n'), ((1173, 1196), 'matplotlib.pylab.grid', 'matplotlib.pylab.grid', ([], {}), '()\n', (1194, 1196), False, 'import matplotlib\n'), ((1376, 1386), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (1384, 1386), True, 'import matplotlib.pylab as plt\n'), ((1399, 1460), 'numpy.loadtxt', 'numpy.loadtxt', (['"""scan_peak_vs_negative_radius.txt"""'], {'skiprows': '(0)'}), "('scan_peak_vs_negative_radius.txt', skiprows=0)\n", (1412, 1460), False, 'import numpy\n'), ((1467, 1538), 'numpy.loadtxt', 'numpy.loadtxt', (['"""scan_peak_vs_negative_radius_corrected.txt"""'], {'skiprows': '(0)'}), "('scan_peak_vs_negative_radius_corrected.txt', skiprows=0)\n", (1480, 1538), False, 'import numpy\n'), ((1545, 1614), 'numpy.loadtxt', 'numpy.loadtxt', (['"""scan_peak_vs_negative_radius_cropped.txt"""'], {'skiprows': '(0)'}), "('scan_peak_vs_negative_radius_cropped.txt', skiprows=0)\n", (1558, 1614), False, 'import numpy\n'), ((1621, 1695), 'numpy.loadtxt', 'numpy.loadtxt', (['"""scan_peak_vs_negative_radius_extrapolated.txt"""'], {'skiprows': '(0)'}), "('scan_peak_vs_negative_radius_extrapolated.txt', skiprows=0)\n", (1634, 1695), False, 'import numpy\n'), ((2268, 2291), 'matplotlib.pylab.grid', 'matplotlib.pylab.grid', ([], {}), '()\n', (2289, 2291), False, 'import matplotlib\n'), ((2404, 2414), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (2412, 2414), True, 'import matplotlib.pylab as plt\n'), ((654, 673), 'numpy.abs', 'numpy.abs', (['am[:, 0]'], {}), '(am[:, 0])\n', (663, 673), False, 'import numpy\n'), ((706, 726), 'numpy.abs', 'numpy.abs', (['amC[:, 0]'], {}), '(amC[:, 0])\n', (715, 726), False, 'import numpy\n'), ((758, 778), 'numpy.abs', 'numpy.abs', (['amR[:, 0]'], {}), '(amR[:, 0])\n', (767, 778), False, 'import numpy\n'), ((810, 830), 'numpy.abs', 'numpy.abs', (['amE[:, 0]'], {}), '(amE[:, 0])\n', (819, 830), False, 'import numpy\n'), ((1751, 1770), 'numpy.abs', 'numpy.abs', (['am[:, 0]'], {}), '(am[:, 0])\n', (1760, 1770), False, 'import numpy\n'), ((1803, 1823), 'numpy.abs', 'numpy.abs', (['amC[:, 0]'], {}), '(amC[:, 0])\n', (1812, 1823), False, 'import numpy\n'), ((1855, 1875), 'numpy.abs', 'numpy.abs', (['amR[:, 0]'], {}), '(amR[:, 0])\n', (1864, 1875), False, 'import numpy\n'), ((1907, 1927), 'numpy.abs', 'numpy.abs', (['amE[:, 0]'], {}), '(amE[:, 0])\n', (1916, 1927), False, 'import numpy\n'), ((2550, 2618), 'numpy.loadtxt', 'numpy.loadtxt', (['"""scan_peak_vs_positive_radius_1230eV.txt"""'], {'skiprows': '(0)'}), "('scan_peak_vs_positive_radius_1230eV.txt', skiprows=0)\n", (2563, 2618), False, 'import numpy\n'), ((2629, 2707), 'numpy.loadtxt', 'numpy.loadtxt', (['"""scan_peak_vs_positive_radius_corrected_1230eV.txt"""'], {'skiprows': '(0)'}), "('scan_peak_vs_positive_radius_corrected_1230eV.txt', skiprows=0)\n", (2642, 2707), False, 'import numpy\n'), ((2718, 2803), 'numpy.loadtxt', 'numpy.loadtxt', (['"""scan_peak_vs_positive_radius_cropped_1230eV_new.txt"""'], {'skiprows': '(0)'}), "('scan_peak_vs_positive_radius_cropped_1230eV_new.txt', skiprows=0\n )\n", (2731, 2803), False, 'import numpy\n'), ((2809, 2898), 'numpy.loadtxt', 'numpy.loadtxt', (['"""scan_peak_vs_positive_radius_extrapolated_1230eV_new.txt"""'], {'skiprows': '(0)'}), "('scan_peak_vs_positive_radius_extrapolated_1230eV_new.txt',\n skiprows=0)\n", (2822, 2898), False, 'import numpy\n'), ((3636, 3659), 'matplotlib.pylab.grid', 'matplotlib.pylab.grid', ([], {}), '()\n', (3657, 3659), False, 'import matplotlib\n'), ((3862, 3872), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (3870, 3872), True, 'import matplotlib.pylab as plt\n'), ((3902, 3970), 'numpy.loadtxt', 'numpy.loadtxt', (['"""scan_peak_vs_negative_radius_1230eV.txt"""'], {'skiprows': '(0)'}), "('scan_peak_vs_negative_radius_1230eV.txt', skiprows=0)\n", (3915, 3970), False, 'import numpy\n'), ((3981, 4067), 'numpy.loadtxt', 'numpy.loadtxt', (['"""scan_peak_vs_negative_radius_corrected_1230eV_new.txt"""'], {'skiprows': '(0)'}), "('scan_peak_vs_negative_radius_corrected_1230eV_new.txt',\n skiprows=0)\n", (3994, 4067), False, 'import numpy\n'), ((4074, 4159), 'numpy.loadtxt', 'numpy.loadtxt', (['"""scan_peak_vs_negative_radius_cropped_1230eV_new.txt"""'], {'skiprows': '(0)'}), "('scan_peak_vs_negative_radius_cropped_1230eV_new.txt', skiprows=0\n )\n", (4087, 4159), False, 'import numpy\n'), ((4165, 4254), 'numpy.loadtxt', 'numpy.loadtxt', (['"""scan_peak_vs_negative_radius_extrapolated_1230eV_new.txt"""'], {'skiprows': '(0)'}), "('scan_peak_vs_negative_radius_extrapolated_1230eV_new.txt',\n skiprows=0)\n", (4178, 4254), False, 'import numpy\n'), ((4817, 4840), 'matplotlib.pylab.grid', 'matplotlib.pylab.grid', ([], {}), '()\n', (4838, 4840), False, 'import matplotlib\n'), ((5043, 5053), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (5051, 5053), True, 'import matplotlib.pylab as plt\n'), ((3084, 3103), 'numpy.abs', 'numpy.abs', (['am[:, 0]'], {}), '(am[:, 0])\n', (3093, 3103), False, 'import numpy\n'), ((3140, 3160), 'numpy.abs', 'numpy.abs', (['amC[:, 0]'], {}), '(amC[:, 0])\n', (3149, 3160), False, 'import numpy\n'), ((3196, 3216), 'numpy.abs', 'numpy.abs', (['amR[:, 0]'], {}), '(amR[:, 0])\n', (3205, 3216), False, 'import numpy\n'), ((3252, 3272), 'numpy.abs', 'numpy.abs', (['amE[:, 0]'], {}), '(amE[:, 0])\n', (3261, 3272), False, 'import numpy\n'), ((4265, 4284), 'numpy.abs', 'numpy.abs', (['am[:, 0]'], {}), '(am[:, 0])\n', (4274, 4284), False, 'import numpy\n'), ((4321, 4341), 'numpy.abs', 'numpy.abs', (['amC[:, 0]'], {}), '(amC[:, 0])\n', (4330, 4341), False, 'import numpy\n'), ((4377, 4397), 'numpy.abs', 'numpy.abs', (['amR[:, 0]'], {}), '(amR[:, 0])\n', (4386, 4397), False, 'import numpy\n'), ((4433, 4453), 'numpy.abs', 'numpy.abs', (['amE[:, 0]'], {}), '(amE[:, 0])\n', (4442, 4453), False, 'import numpy\n')] |
import argparse
import copy
import os
from itertools import chain
import numpy as np
import tensorboardX
import torch
import torch.nn.functional as F
import tqdm
from . import envs, nets, replay, run, utils
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class TD3Agent:
def __init__(
self,
obs_space_size,
act_space_size,
actor_net_cls=nets.BaselineActor,
critic_net_cls=nets.BigCritic,
hidden_size=256,
):
self.actor = actor_net_cls(
obs_space_size, act_space_size, hidden_size=hidden_size
)
self.critic1 = critic_net_cls(
obs_space_size, act_space_size, hidden_size=hidden_size
)
self.critic2 = critic_net_cls(
obs_space_size, act_space_size, hidden_size=hidden_size
)
def to(self, device):
self.actor = self.actor.to(device)
self.critic1 = self.critic1.to(device)
self.critic2 = self.critic2.to(device)
def eval(self):
self.actor.eval()
self.critic1.eval()
self.critic2.eval()
def train(self):
self.actor.train()
self.critic1.train()
self.critic2.train()
def save(self, path):
actor_path = os.path.join(path, "actor.pt")
critic1_path = os.path.join(path, "critic1.pt")
critic2_path = os.path.join(path, "critic2.pt")
torch.save(self.actor.state_dict(), actor_path)
torch.save(self.critic1.state_dict(), critic1_path)
torch.save(self.critic2.state_dict(), critic2_path)
def load(self, path):
actor_path = os.path.join(path, "actor.pt")
critic1_path = os.path.join(path, "critic1.pt")
critic2_path = os.path.join(path, "critic2.pt")
self.actor.load_state_dict(torch.load(actor_path))
self.critic1.load_state_dict(torch.load(critic1_path))
self.critic2.load_state_dict(torch.load(critic2_path))
def forward(self, state, from_cpu=True):
if from_cpu:
state = self.process_state(state)
self.actor.eval()
with torch.no_grad():
action = self.actor(state)
self.actor.train()
if from_cpu:
action = self.process_act(action)
return action
def process_state(self, state):
return torch.from_numpy(np.expand_dims(state, 0).astype(np.float32)).to(
utils.device
)
def process_act(self, act):
return np.squeeze(act.cpu().numpy(), 0)
def td3(
agent,
train_env,
test_env,
buffer,
num_steps=1_000_000,
transitions_per_step=1,
max_episode_steps=100_000,
batch_size=256,
tau=0.005,
actor_lr=1e-4,
critic_lr=1e-3,
gamma=0.99,
sigma_start=0.2,
sigma_final=0.1,
sigma_anneal=100_000,
theta=0.15,
eval_interval=5000,
eval_episodes=10,
warmup_steps=1000,
actor_clip=None,
critic_clip=None,
actor_l2=0.0,
critic_l2=0.0,
delay=2,
target_noise_scale=0.2,
save_interval=100_000,
c=0.5,
name="td3_run",
render=False,
save_to_disk=True,
log_to_disk=True,
verbosity=0,
gradient_updates_per_step=1,
td_reg_coeff=0.0,
td_reg_coeff_decay=0.9999,
infinite_bootstrap=True,
**_,
):
"""
Train `agent` on `train_env` with Twin Delayed Deep Deterministic Policy
Gradient algorithm, and evaluate on `test_env`.
Reference: https://arxiv.org/abs/1802.09477
"""
if save_to_disk or log_to_disk:
save_dir = utils.make_process_dirs(name)
if log_to_disk:
# create tb writer, save hparams
writer = tensorboardX.SummaryWriter(save_dir)
writer.add_hparams(locals(), {})
agent.to(device)
# initialize target networks
target_agent = copy.deepcopy(agent)
target_agent.to(device)
utils.hard_update(target_agent.actor, agent.actor)
utils.hard_update(target_agent.critic1, agent.critic1)
utils.hard_update(target_agent.critic2, agent.critic2)
random_process = utils.OrnsteinUhlenbeckProcess(
size=train_env.action_space.shape,
sigma=sigma_start,
sigma_min=sigma_final,
n_steps_annealing=sigma_anneal,
theta=theta,
)
# set up optimizers
critic_optimizer = torch.optim.Adam(
chain(agent.critic1.parameters(), agent.critic2.parameters(),),
lr=critic_lr,
weight_decay=critic_l2,
betas=(0.9, 0.999),
)
actor_optimizer = torch.optim.Adam(
agent.actor.parameters(), lr=actor_lr, weight_decay=actor_l2
)
run.warmup_buffer(buffer, train_env, warmup_steps, max_episode_steps)
done = True
steps_iter = range(num_steps)
if verbosity:
steps_iter = tqdm.tqdm(steps_iter)
for step in steps_iter:
for _ in range(transitions_per_step):
if done:
state = train_env.reset()
random_process.reset_states()
steps_this_ep = 0
done = False
action = agent.forward(state)
noisy_action = run.exploration_noise(action, random_process)
next_state, reward, done, info = train_env.step(noisy_action)
if infinite_bootstrap:
# allow infinite bootstrapping
if steps_this_ep + 1 == max_episode_steps:
done = False
buffer.push(state, noisy_action, reward, next_state, done)
state = next_state
steps_this_ep += 1
if steps_this_ep >= max_episode_steps:
done = True
update_policy = step % delay == 0
for _ in range(gradient_updates_per_step):
learn(
buffer=buffer,
target_agent=target_agent,
agent=agent,
actor_optimizer=actor_optimizer,
critic_optimizer=critic_optimizer,
batch_size=batch_size,
target_noise_scale=target_noise_scale,
c=c,
gamma=gamma,
critic_clip=critic_clip,
actor_clip=actor_clip,
td_reg_coeff=td_reg_coeff,
update_policy=update_policy,
)
# move target model towards training model
if update_policy:
utils.soft_update(target_agent.actor, agent.actor, tau)
# original td3 impl only updates critic targets with the actor...
utils.soft_update(target_agent.critic1, agent.critic1, tau)
utils.soft_update(target_agent.critic2, agent.critic2, tau)
# decay td regularization
td_reg_coeff *= td_reg_coeff_decay
if step % eval_interval == 0 or step == num_steps - 1:
mean_return = run.evaluate_agent(
agent, test_env, eval_episodes, max_episode_steps, render
)
if log_to_disk:
writer.add_scalar("return", mean_return, step * transitions_per_step)
if step % save_interval == 0 and save_to_disk:
agent.save(save_dir)
if save_to_disk:
agent.save(save_dir)
return agent
def learn(
buffer,
target_agent,
agent,
actor_optimizer,
critic_optimizer,
batch_size,
target_noise_scale,
c,
gamma,
critic_clip,
actor_clip,
td_reg_coeff,
update_policy=True,
):
per = isinstance(buffer, replay.PrioritizedReplayBuffer)
if per:
batch, imp_weights, priority_idxs = buffer.sample(batch_size)
imp_weights = imp_weights.to(device)
else:
batch = buffer.sample(batch_size)
# prepare transitions for models
state_batch, action_batch, reward_batch, next_state_batch, done_batch = batch
state_batch = state_batch.to(device)
next_state_batch = next_state_batch.to(device)
action_batch = action_batch.to(device)
reward_batch = reward_batch.to(device)
done_batch = done_batch.to(device)
agent.train()
with torch.no_grad():
# create critic targets (clipped double Q learning)
target_action_s1 = target_agent.actor(next_state_batch)
target_noise = torch.clamp(
target_noise_scale * torch.randn(*target_action_s1.shape).to(device), -c, c
)
# target smoothing
target_action_s1 = torch.clamp(target_action_s1 + target_noise, -1.0, 1.0,)
target_action_value_s1 = torch.min(
target_agent.critic1(next_state_batch, target_action_s1),
target_agent.critic2(next_state_batch, target_action_s1),
)
td_target = reward_batch + gamma * (1.0 - done_batch) * target_action_value_s1
# update critics
agent_critic1_pred = agent.critic1(state_batch, action_batch)
td_error1 = td_target - agent_critic1_pred
if per:
critic1_loss = (imp_weights * 0.5 * (td_error1 ** 2)).mean()
else:
critic1_loss = 0.5 * (td_error1 ** 2).mean()
agent_critic2_pred = agent.critic2(state_batch, action_batch)
td_error2 = td_target - agent_critic2_pred
if per:
critic2_loss = (imp_weights * 0.5 * (td_error2 ** 2)).mean()
else:
critic2_loss = 0.5 * (td_error2 ** 2).mean()
critic_loss = critic1_loss + critic2_loss
critic_optimizer.zero_grad()
critic_loss.backward()
if critic_clip:
torch.nn.utils.clip_grad_norm_(
chain(agent.critic1.parameters(), agent.critic2.parameters()), critic_clip
)
critic_optimizer.step()
if update_policy:
# actor update
agent_actions = agent.actor(state_batch)
actor_loss = -(
agent.critic1(state_batch, agent_actions).mean()
- td_reg_coeff * critic_loss.detach()
)
actor_optimizer.zero_grad()
actor_loss.backward()
if actor_clip:
torch.nn.utils.clip_grad_norm_(agent.actor.parameters(), actor_clip)
actor_optimizer.step()
if per:
new_priorities = (abs(td_error1) + 1e-5).cpu().detach().squeeze(1).numpy()
buffer.update_priorities(priority_idxs, new_priorities)
def add_args(parser):
parser.add_argument(
"--num_steps", type=int, default=10 ** 6, help="number of training steps",
)
parser.add_argument(
"--transitions_per_step",
type=int,
default=1,
help="env transitions collected per training step. Defaults to 1, in which case we're training for num_steps total env steps. But when looking for replay ratios < 1, this value will need to be set higher.",
)
parser.add_argument(
"--max_episode_steps",
type=int,
default=100000,
help="maximum steps per episode",
)
parser.add_argument(
"--batch_size", type=int, default=256, help="training batch size"
)
parser.add_argument(
"--tau", type=float, default=0.005, help="for model parameter % update"
)
parser.add_argument(
"--actor_lr", type=float, default=1e-4, help="actor learning rate"
)
parser.add_argument(
"--critic_lr", type=float, default=1e-3, help="critic learning rate"
)
parser.add_argument(
"--gamma", type=float, default=0.99, help="gamma, the discount factor"
)
parser.add_argument("--sigma_final", type=float, default=0.1)
parser.add_argument(
"--sigma_anneal",
type=float,
default=100_000,
help="How many steps to anneal sigma over.",
)
parser.add_argument(
"--theta",
type=float,
default=0.15,
help="theta for Ornstein Uhlenbeck process computation",
)
parser.add_argument(
"--sigma_start",
type=float,
default=0.2,
help="sigma for Ornstein Uhlenbeck process computation",
)
parser.add_argument(
"--eval_interval",
type=int,
default=5000,
help="how often to test the agent without exploration (in episodes)",
)
parser.add_argument(
"--eval_episodes",
type=int,
default=10,
help="how many episodes to run for when testing",
)
parser.add_argument(
"--warmup_steps", type=int, default=1000, help="warmup length, in steps"
)
parser.add_argument("--render", action="store_true")
parser.add_argument("--actor_clip", type=float, default=None)
parser.add_argument("--critic_clip", type=float, default=None)
parser.add_argument("--name", type=str, default="td3_run")
parser.add_argument("--actor_l2", type=float, default=0.0)
parser.add_argument("--critic_l2", type=float, default=0.0)
parser.add_argument("--delay", type=int, default=2)
parser.add_argument("--target_noise_scale", type=float, default=0.2)
parser.add_argument("--save_interval", type=int, default=100_000)
parser.add_argument("--c", type=float, default=0.5)
parser.add_argument("--verbosity", type=int, default=1)
parser.add_argument("--gradient_updates_per_step", type=int, default=1)
parser.add_argument("--prioritized_replay", action="store_true")
parser.add_argument("--buffer_size", type=int, default=1_000_000)
parser.add_argument("--skip_save_to_disk", action="store_true")
parser.add_argument("--skip_log_to_disk", action="store_true")
parser.add_argument("--td_reg_coeff", type=float, default=0.0)
parser.add_argument("--td_reg_coeff_decay", type=float, default=0.9999)
| [
"tqdm.tqdm",
"copy.deepcopy",
"tensorboardX.SummaryWriter",
"torch.load",
"torch.randn",
"numpy.expand_dims",
"torch.clamp",
"torch.cuda.is_available",
"torch.no_grad",
"os.path.join"
] | [((3801, 3821), 'copy.deepcopy', 'copy.deepcopy', (['agent'], {}), '(agent)\n', (3814, 3821), False, 'import copy\n'), ((242, 267), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (265, 267), False, 'import torch\n'), ((1261, 1291), 'os.path.join', 'os.path.join', (['path', '"""actor.pt"""'], {}), "(path, 'actor.pt')\n", (1273, 1291), False, 'import os\n'), ((1315, 1347), 'os.path.join', 'os.path.join', (['path', '"""critic1.pt"""'], {}), "(path, 'critic1.pt')\n", (1327, 1347), False, 'import os\n'), ((1371, 1403), 'os.path.join', 'os.path.join', (['path', '"""critic2.pt"""'], {}), "(path, 'critic2.pt')\n", (1383, 1403), False, 'import os\n'), ((1628, 1658), 'os.path.join', 'os.path.join', (['path', '"""actor.pt"""'], {}), "(path, 'actor.pt')\n", (1640, 1658), False, 'import os\n'), ((1682, 1714), 'os.path.join', 'os.path.join', (['path', '"""critic1.pt"""'], {}), "(path, 'critic1.pt')\n", (1694, 1714), False, 'import os\n'), ((1738, 1770), 'os.path.join', 'os.path.join', (['path', '"""critic2.pt"""'], {}), "(path, 'critic2.pt')\n", (1750, 1770), False, 'import os\n'), ((3648, 3684), 'tensorboardX.SummaryWriter', 'tensorboardX.SummaryWriter', (['save_dir'], {}), '(save_dir)\n', (3674, 3684), False, 'import tensorboardX\n'), ((4752, 4773), 'tqdm.tqdm', 'tqdm.tqdm', (['steps_iter'], {}), '(steps_iter)\n', (4761, 4773), False, 'import tqdm\n'), ((8017, 8032), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8030, 8032), False, 'import torch\n'), ((8346, 8401), 'torch.clamp', 'torch.clamp', (['(target_action_s1 + target_noise)', '(-1.0)', '(1.0)'], {}), '(target_action_s1 + target_noise, -1.0, 1.0)\n', (8357, 8401), False, 'import torch\n'), ((1806, 1828), 'torch.load', 'torch.load', (['actor_path'], {}), '(actor_path)\n', (1816, 1828), False, 'import torch\n'), ((1867, 1891), 'torch.load', 'torch.load', (['critic1_path'], {}), '(critic1_path)\n', (1877, 1891), False, 'import torch\n'), ((1930, 1954), 'torch.load', 'torch.load', (['critic2_path'], {}), '(critic2_path)\n', (1940, 1954), False, 'import torch\n'), ((2108, 2123), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2121, 2123), False, 'import torch\n'), ((8227, 8263), 'torch.randn', 'torch.randn', (['*target_action_s1.shape'], {}), '(*target_action_s1.shape)\n', (8238, 8263), False, 'import torch\n'), ((2349, 2373), 'numpy.expand_dims', 'np.expand_dims', (['state', '(0)'], {}), '(state, 0)\n', (2363, 2373), True, 'import numpy as np\n')] |
import mxnet as mx
from mxnet.gluon.model_zoo import vision
import os
import numpy as np
import glob
import pandas as pd
#from scipy.spatial.distance import cosine
#from IPython.display import Image
from PIL import Image
from tqdm import tqdm
from flask import url_for
import cv2 as cv
from gluoncv import model_zoo, data, utils
from matplotlib import pyplot as plt
# set the context on CPU, switch to GPU if there is one available
ctx = mx.cpu()
pd.set_option('display.max_colwidth', -1)
import glob2
from tqdm import tqdm
'''def cropNormFit(fnx):
"
accepts an mx decoded image
returns an mxnet array ready for transformation image
"
image = mx.image.imdecode(open(fnx, 'rb').read()).astype(np.float32)
resized = mx.image.resize_short(image, 224) #minimum 224x224 images
cropped, crop_info = mx.image.center_crop(resized, (224, 224))
normalized = mx.image.color_normalize(cropped/255,
mean=mx.nd.array([0.485, 0.456, 0.406]),
std=mx.nd.array([0.229, 0.224, 0.225]))
# the network expect batches of the form (N,3,224,224)
flipped_axis = normalized.transpose((2,0,1)) # Flipping from (224, 224, 3) to (3, 224, 224)
batchified = flipped_axis.expand_dims(axis=0) # change the shape from (3, 224, 224) to (1, 3, 224, 224)
return batchified
def vectorize(batchified, preloaded_model):
"
accepts a preprocessed vector
returns a numpy transformation
"
return preloaded_model.features(batchified)[0].asnumpy()
def cosineSimilarity(u, v):
similarity = np.dot(u,v) / (np.linalg.norm(u) * np.linalg.norm(v))
return float(similarity)
def get_image_sims(fn_image_to_compare, trained_model, fn_df_save):
batchified_image = cropNormFit(fn_image_to_compare)
img_vec = vectorize(batchified_image ,preloaded_model=trained_model)
df_corpus = pd.read_pickle(fn_df_save).reset_index(drop=True)
df_corpus['ref_vec'] = None
df_corpus['ref_cosim'] = None
for index in tqdm(range(df_corpus.count()[0])):
try:
cos_sim = cosineSimilarity(u = df_corpus['vector'].loc[index],
v = img_vec)
df_corpus['ref_cosim'].loc[index] = cos_sim
except:
df_corpus['ref_cosim'].loc[index] = 0
continue
return df_corpus'''
def load_model():
print("we're loading densenet model: \
https://modelzoo.co/model/densely-connected-convolutional-networks-2")
densenetX = vision.densenet201(pretrained=True)
print("we just loaded: ")
type(densenetX)
return densenetX
def load_yolo_model():
print("we're loading YOLO model: \
https://modelzoo.co/model/yolodark2mxnet")
net = model_zoo.get_model('yolo3_darknet53_voc', pretrained=True)
print("we just loaded: ")
type(net)
return net
def yolo_extraction(fnx, trained_yolo_model):
x, img = data.transforms.presets.yolo.load_test(fnx, short=512)
class_IDs, scores, bounding_boxs = trained_yolo_model(x)
for index, bbox in enumerate(bounding_boxs[0]):
class_ID = int(class_IDs[0][index].asnumpy()[0])
class_name = trained_yolo_model.classes[class_ID]
class_score = scores[0][index].asnumpy()
if (class_name == 'person') & (class_score > 0.8):
#print('index: ', index)
#print('class_ID: ', class_ID)
#print('class_name: ', class_name)
#print('class_score: ',class_score)
#print('bbox: ', bbox.asnumpy())
xmin, ymin, xmax, ymax = [int(x) for x in bbox.asnumpy()]
xmin = max(0, xmin)
xmax = min(x.shape[3], xmax)
ymin = max(0, ymin)
ymax = min(x.shape[2], ymax)
im_fname_save = fnx.replace('.jpg','_humanCrop.jpg')
plt.imsave(im_fname_save, img[ymin:ymax,xmin:xmax,:])
img_rect = cv.rectangle(img=img, pt1=(xmin, ymin), pt2=(xmax, ymax),
color=10000, thickness=10)
plt.imsave(fnx, img_rect)
return im_fname_save
break
def cropNormFit(fnx):
'''
accepts an mx decoded a filename of an image
returns an mxnet array ready for transformation image
'''
image = mx.image.imdecode(open(fnx, 'rb').read()).astype(np.float32)
resized = mx.image.resize_short(image, 224) #minimum 224x224 images
cropped, crop_info = mx.image.center_crop(resized, (224, 224))
normalized = mx.image.color_normalize(cropped/255,
mean=mx.nd.array([0.485, 0.456, 0.406]),
std=mx.nd.array([0.229, 0.224, 0.225]))
# the network expect batches of the form (N,3,224,224)
flipped_axis = normalized.transpose((2,0,1)) # Flipping from (224, 224, 3) to (3, 224, 224)
batchified = flipped_axis.expand_dims(axis=0) # change the shape from (3, 224, 224) to (1, 3, 224, 224)
return batchified
def vectorize(batchified, preloaded_model):
'''
accepts a preprocessed vector
returns a numpy transformation
'''
return preloaded_model.features(batchified)[0].asnumpy()
def cosineSimilarity(u, v):
similarity = np.dot(u,v) / (np.linalg.norm(u) * np.linalg.norm(v))
return similarity
def get_image_sims(fn_image_to_compare, trained_model, trained_yolo_model, fn_df_save):
print('helper fnx pre_yolo: ', fn_image_to_compare)
yolo_image = yolo_extraction(fn_image_to_compare, trained_yolo_model)
print('helper fnx post_yolo: ', yolo_image)
batchified_image = cropNormFit(yolo_image)
img_vec = vectorize(batchified_image ,preloaded_model=trained_model)
df_corpus = pd.read_hdf(fn_df_save, key='df').reset_index(drop=True)
df_corpus['cosim'] = df_corpus['vector'].apply(lambda x: cosineSimilarity(x, img_vec))
df_corpus = df_corpus.sort_values('cosim', ascending=False).reset_index(drop=True)
return df_corpus
def createResultsHTML(df_html, upload_image, result_one, fn_to_export_template):
'''
Input: dataframe of similarities, the full path of the uploaded image,
and the relative /templates .html results page. Must have a ['cosim'] col
Ouptput: Saves a .html file in the /tempates folder
'''
df_html_final = df_html.to_html().replace('<table border="1" class="dataframe">',
'''
<head><link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css"></head>
<table border="1" class="dataframe">''')
html_string = '''
<!DOCTYPE html>
<html lang="en">
<head>
<title>CCR Tweets</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js"></script>
<style>
img {
width: 100%;
height: auto;
max-width:500px;
max-height:1000px;
}
</style>
</head>
<body>
<div class="container">
<h2>Your Upload: </h2>
<img src= "{{ img_org }}" alt="Your Upload" >
</div>
<div class="container">
<h2>Top Three: </h2>
<img src= "{{ img_res1 }}" alt="Your Result 1" width="500" height="600">
<img src= "{{ img_res2 }}" alt="Your Result 2" width="500" height="600">
<img src= "{{ img_res3 }}" alt="Your Result 1" width="500" height="600">
</div>
<div class="container">
<h2>Perfect Matches: </h2>
YYY
</div>
</body>
</html>
'''.encode('utf-8', errors='replace').decode('utf-8', errors='replace')
print('Helper Saving: ',fn_to_export_template)
with open(fn_to_export_template, "w") as f:
f.write(html_string)
| [
"gluoncv.model_zoo.get_model",
"pandas.read_hdf",
"mxnet.image.center_crop",
"mxnet.gluon.model_zoo.vision.densenet201",
"mxnet.image.resize_short",
"matplotlib.pyplot.imsave",
"mxnet.cpu",
"cv2.rectangle",
"mxnet.nd.array",
"numpy.dot",
"numpy.linalg.norm",
"pandas.set_option",
"gluoncv.dat... | [((444, 452), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (450, 452), True, 'import mxnet as mx\n'), ((453, 494), 'pandas.set_option', 'pd.set_option', (['"""display.max_colwidth"""', '(-1)'], {}), "('display.max_colwidth', -1)\n", (466, 494), True, 'import pandas as pd\n'), ((2543, 2578), 'mxnet.gluon.model_zoo.vision.densenet201', 'vision.densenet201', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (2561, 2578), False, 'from mxnet.gluon.model_zoo import vision\n'), ((2774, 2833), 'gluoncv.model_zoo.get_model', 'model_zoo.get_model', (['"""yolo3_darknet53_voc"""'], {'pretrained': '(True)'}), "('yolo3_darknet53_voc', pretrained=True)\n", (2793, 2833), False, 'from gluoncv import model_zoo, data, utils\n'), ((2958, 3012), 'gluoncv.data.transforms.presets.yolo.load_test', 'data.transforms.presets.yolo.load_test', (['fnx'], {'short': '(512)'}), '(fnx, short=512)\n', (2996, 3012), False, 'from gluoncv import model_zoo, data, utils\n'), ((4382, 4415), 'mxnet.image.resize_short', 'mx.image.resize_short', (['image', '(224)'], {}), '(image, 224)\n', (4403, 4415), True, 'import mxnet as mx\n'), ((4465, 4506), 'mxnet.image.center_crop', 'mx.image.center_crop', (['resized', '(224, 224)'], {}), '(resized, (224, 224))\n', (4485, 4506), True, 'import mxnet as mx\n'), ((5251, 5263), 'numpy.dot', 'np.dot', (['u', 'v'], {}), '(u, v)\n', (5257, 5263), True, 'import numpy as np\n'), ((3862, 3917), 'matplotlib.pyplot.imsave', 'plt.imsave', (['im_fname_save', 'img[ymin:ymax, xmin:xmax, :]'], {}), '(im_fname_save, img[ymin:ymax, xmin:xmax, :])\n', (3872, 3917), True, 'from matplotlib import pyplot as plt\n'), ((3939, 4027), 'cv2.rectangle', 'cv.rectangle', ([], {'img': 'img', 'pt1': '(xmin, ymin)', 'pt2': '(xmax, ymax)', 'color': '(10000)', 'thickness': '(10)'}), '(img=img, pt1=(xmin, ymin), pt2=(xmax, ymax), color=10000,\n thickness=10)\n', (3951, 4027), True, 'import cv2 as cv\n'), ((4072, 4097), 'matplotlib.pyplot.imsave', 'plt.imsave', (['fnx', 'img_rect'], {}), '(fnx, img_rect)\n', (4082, 4097), True, 'from matplotlib import pyplot as plt\n'), ((4609, 4643), 'mxnet.nd.array', 'mx.nd.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (4620, 4643), True, 'import mxnet as mx\n'), ((4691, 4725), 'mxnet.nd.array', 'mx.nd.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (4702, 4725), True, 'import mxnet as mx\n'), ((5266, 5283), 'numpy.linalg.norm', 'np.linalg.norm', (['u'], {}), '(u)\n', (5280, 5283), True, 'import numpy as np\n'), ((5286, 5303), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (5300, 5303), True, 'import numpy as np\n'), ((5735, 5768), 'pandas.read_hdf', 'pd.read_hdf', (['fn_df_save'], {'key': '"""df"""'}), "(fn_df_save, key='df')\n", (5746, 5768), True, 'import pandas as pd\n')] |
"""
Defining a synthetic lesion
===============================
"""
# # Defining a Lesion
# Conducting a lesion analysis in ConWhAt is extremely simple. All that is needed is a binary `.nii` format lesion mask, with ones indicating lesioned tissue, and zeros elsewhere.
#
#
# >(Note: we terms like 'lesion' and 'damage' throughout most of this documentation, as that is the most natural primary context for ConWhAt analyses. Remember however that all we are doing at the end of the day is doing a set of look-up operations between a list of standard space coordinates on the one hand (as defined by non-zero values in a `.nii` image), and the spatial locations of each 'connectome edge' - i.e. each entry in our anatomical connectivity matrix. One can envisave many alternative interpretations/applications of this procedure; for example to map the connectivity effects of magnetic field or current distributions from nonivasive brain stimulation). Still, for concreteness and simplicity, we stick with 'lesion', 'damage', etc. for the most part. )
#
#
# A common way to obtain a lesion map is to from a patient's T1-weighted MR image. Although this can be done manually, it is strongly recommended to use an automated lesion segmentation tools, followed by manual editing.
#
# An alternative way is simply to define a lesion location using standard space coordinates, and build a 'lesion' mask *de-novo*. This is what we do in the following example. On the next page we do a ConWhAt connectome-based decomposition analysis on this 'synthetic' lesion mask.
#
# ---
# sphinx_gallery_thumbnail_number = 3
###################################################################################################
# Setup
# ---------------------
# ConWhAt stuff
from conwhat import VolConnAtlas,StreamConnAtlas,VolTractAtlas,StreamTractAtlas
from conwhat.viz.volume import plot_vol_and_rois_nilearn
# Neuroimaging stuff
import nibabel as nib
from nilearn.plotting import plot_roi
from nilearn.datasets import load_mni152_template
from nipy.labs.spatial_models.mroi import subdomain_from_balls
from nipy.labs.spatial_models.discrete_domain import grid_domain_from_image
# Viz stuff
from matplotlib import pyplot as plt
# Generic stuff
import os,sys,glob,numpy as np
# (For docs only: suppress warnings)
import warnings
warnings.filterwarnings("ignore")
###################################################################################################
# Define some variables
# Locate the standard space template image
t1_mni_url = 'https://github.com/Washington-University/HCPpipelines/raw/master/global/templates/MNI152_T1_1mm_brain.nii.gz'
os.system('wget ' + t1_mni_url)
t1_mni_file = t1_mni_url.split('/')[-1]
t1_mni_img = nib.load(t1_mni_file)
# This is the output we will save to file and use in the next example
lesion_file = 'synthetic_lesion_20mm_sphere_-46_-60_6.nii.gz'
# Define the 'synthetic lesion' location and size using standard (MNI) space coordinates
com = [-46,-60,6] # com = centre of mass
rad = 20 # radius
###################################################################################################
# Create the ROI
domain = grid_domain_from_image(t1_mni_img)
lesion_img = subdomain_from_balls(domain,np.array([com]), np.array([rad])).to_image()
# Plot on brain slices
disp = plot_roi(lesion_img,bg_img=t1_mni_img,black_bg=False);
# Save to file
lesion_img.to_filename(lesion_file)
# ...now we move on to doing a lesion analysis with this file.
| [
"nibabel.load",
"warnings.filterwarnings",
"nipy.labs.spatial_models.discrete_domain.grid_domain_from_image",
"os.system",
"numpy.array",
"nilearn.plotting.plot_roi"
] | [((2326, 2359), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (2349, 2359), False, 'import warnings\n'), ((2655, 2686), 'os.system', 'os.system', (["('wget ' + t1_mni_url)"], {}), "('wget ' + t1_mni_url)\n", (2664, 2686), False, 'import os, sys, glob, numpy as np\n'), ((2740, 2761), 'nibabel.load', 'nib.load', (['t1_mni_file'], {}), '(t1_mni_file)\n', (2748, 2761), True, 'import nibabel as nib\n'), ((3185, 3219), 'nipy.labs.spatial_models.discrete_domain.grid_domain_from_image', 'grid_domain_from_image', (['t1_mni_img'], {}), '(t1_mni_img)\n', (3207, 3219), False, 'from nipy.labs.spatial_models.discrete_domain import grid_domain_from_image\n'), ((3337, 3392), 'nilearn.plotting.plot_roi', 'plot_roi', (['lesion_img'], {'bg_img': 't1_mni_img', 'black_bg': '(False)'}), '(lesion_img, bg_img=t1_mni_img, black_bg=False)\n', (3345, 3392), False, 'from nilearn.plotting import plot_roi\n'), ((3261, 3276), 'numpy.array', 'np.array', (['[com]'], {}), '([com])\n', (3269, 3276), True, 'import os, sys, glob, numpy as np\n'), ((3278, 3293), 'numpy.array', 'np.array', (['[rad]'], {}), '([rad])\n', (3286, 3293), True, 'import os, sys, glob, numpy as np\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#
from tqdm import tqdm
import numpy as np
import traceback
import sys
PITCH_CLASS_NAMES = [
'C', 'C#', 'D', 'Eb', 'E', 'F', 'F#', 'G', 'Ab', 'A', 'Bb', 'B']
POS_RESOLUTION = 4
ROOT_pitch = {
'C': 0,
'C#': 1,
'D': 2,
'Eb': 3,
'E': 4,
'F': 5,
'F#': 6,
'G': 7,
'Ab': 8,
'A': 9,
'Bb': 10,
'B': 11
}
_CHORD_KIND_PITCHES = {
'': [0, 4, 7],
'm': [0, 3, 7],
'+': [0, 4, 8],
'dim': [0, 3, 6],
'7': [0, 4, 7, 10],
'maj7': [0, 4, 7, 11],
'm7': [0, 3, 7, 10],
'm7b5': [0, 3, 6, 10],
}
def get_tonality(e):
def get_pitch_class_histogram(notes, use_duration=True, normalize=True):
weights = np.ones(len(notes))
if use_duration:
weights *= [note[3] for note in notes] # duration
histogram, _ = np.histogram([note[2] % 12 for note in notes], bins=np.arange(
13), weights=weights, density=normalize)
if normalize:
histogram /= (histogram.sum() + (histogram.sum() == 0))
return histogram
e = [i for i in e if i[2] < 128]
histogram = get_pitch_class_histogram(e)
major_count = histogram[PITCH_CLASS_NAMES.index('C')]
minor_count = histogram[PITCH_CLASS_NAMES.index('A')]
if major_count < minor_count:
is_major = False
elif major_count > minor_count:
is_major = True
else:
is_major = None
return is_major
def fix(items):
tmp = []
target_tokens = ['Bar', 'Pos', 'Pitch', 'Dur']
i = 0
for item in items:
if item.split('_')[0] == target_tokens[i]:
tmp.append(item)
i = (i + 1) % len(target_tokens)
return tmp
def get_value(s):
return s.split('_')[1]
def get_pitch(chord):
try:
root, type = chord.split(':')
except:
return None
cur_pitch = []
for i in _CHORD_KIND_PITCHES[type]:
cur_pitch.append((ROOT_pitch[root] + i) % 12)
return cur_pitch
if __name__ == '__main__':
all_num = 0
ok_num = 0
note_num = 0
beat_num = 0
chord_num = 0
struct_num = 0
struct_num_2 = 0
struct_num_3 = 0
pause1_num = 0
pause2_num = 0
pause3_num = 0
tonality_num = 0
tonality_sum = 0
chord_sum_2 = 0
chord_num_2 = 0
assert len(sys.argv) == 1 + 1
prefix = sys.argv[1]
with open(f'{prefix}/test.hyp.txt', 'r') as h, open(f'{prefix}/test.src.txt', 'r') as s:
for hyp_str, src_str in tqdm(list(zip(h, s))):
try:
all_num += 1
hyp = hyp_str.strip().split()
hyp = fix(hyp)
hyp = [[int(get_value(hyp[j])) for j in range(i, i+4)]
for i in range(0, len(hyp) // 4 * 4, 4)]
src = src_str.strip().split()
is_major = get_tonality(hyp)
if is_major is not None:
tonality_sum += 1
if is_major == (src[0] == 'MAJ'):
tonality_num += 1
src = src[1:]
src = [[get_value(src[i]), src[i+1], int(get_value(src[i+2]))]
for i in range(0, len(src), 3)]
max_pos = 0
note_items = []
for idx in range(min(len(hyp), len(src))):
hyp_item = hyp[idx]
src_item = src[idx]
note_num += 1
bar, pos, pitch, dur = hyp_item
chord, struct, beat = src_item
if pos // POS_RESOLUTION == beat:
beat_num += 1
cur_pitch = get_pitch(chord)
if cur_pitch is None or pitch % 12 in cur_pitch:
chord_num += 1
if idx != len(hyp) - 1:
if struct == 'HALF':
pause1_num += 1
elif struct == 'AUT':
pause2_num += 1
else:
pause3_num += 1
next_item = hyp[idx + 1]
cur_pos = 4 * POS_RESOLUTION * bar + pos
next_pos = 4 * POS_RESOLUTION * \
next_item[0] + next_item[1]
if next_pos - cur_pos >= POS_RESOLUTION * 1.5 and struct == 'HALF' and dur >= POS_RESOLUTION:
struct_num += 1
if next_pos - cur_pos >= POS_RESOLUTION * 2.0 and struct == 'AUT' and dur >= POS_RESOLUTION:
struct_num_2 += 1
if struct == 'NOT':
if next_pos - cur_pos < POS_RESOLUTION * 2.0 or dur < POS_RESOLUTION:
struct_num_3 += 1
ok_num += 1
except:
continue
print('TA:', round(tonality_num/tonality_sum, 5))
print('CA:', round(chord_num/note_num, 5))
print('RA:', round(beat_num / note_num, 5))
print('AA:', round((struct_num+struct_num_2+struct_num_3) /
(pause1_num + pause2_num+pause3_num), 5))
| [
"numpy.arange"
] | [((998, 1011), 'numpy.arange', 'np.arange', (['(13)'], {}), '(13)\n', (1007, 1011), True, 'import numpy as np\n')] |
import numpy as np
from PIL import Image
import os,glob
'''
READ DATA
'''
def fwt97(s, width=255, height=255):
''' Forward Cohen-Daubechies-Feauveau 9 tap / 7 tap wavelet transform
performed on all columns of the 2D n*n matrix signal s via lifting.
The returned result is s, the modified input matrix.
The highpass and lowpass results are stored on the left half and right
half of s respectively, after the matrix is transposed. '''
# 9/7 Coefficients:
a1 = -1.586134342
a2 = -0.05298011854
a3 = 0.8829110762
a4 = 0.4435068522
# Scale coeff:
k1 = 0.81289306611596146 # 1/1.230174104914
k2 = 0.61508705245700002 # 1.230174104914/2
# Another k used by <NAME> is 1.1496043988602418
for col in range(width): # Do the 1D transform on all cols:
# Predict 1. y1
for row in range(1, height-1, 2):
s[row][col] += a1 * (s[row-1][col] + s[row+1][col])
s[height-1][col] += 2 * a1 * s[height-2][col] # Symmetric extension
# Update 1. y0
for row in range(2, height, 2):
s[row][col] += a2 * (s[row-1][col] + s[row+1][col])
s[0][col] += 2 * a2 * s[1][col] # Symmetric extension
# Predict 2.
for row in range(1, height-1, 2):
s[row][col] += a3 * (s[row-1][col] + s[row+1][col])
s[height-1][col] += 2 * a3 * s[height-2][col]
# Update 2.
for row in range(2, height, 2):
s[row][col] += a4 * (s[row-1][col] + s[row+1][col])
s[0][col] += 2 * a4 * s[1][col]
# de-interleave
# temp_bank = [[0]*width for i in range(height)]
temp_bank = np.empty((height,width))
print(s.shape)
# height,width=width,height
for row in range(height):
for col in range(width):
# k1 and k2 scale the vals
# simultaneously transpose the matrix when deinterleaving
if row % 2 == 0: # even
temp_bank[col][row//2] = k1 * s[row][col]
else: # odd
temp_bank[col][row//2 + height//2] = k2 * s[row][col]
# write temp_bank to s:
for row in range(width):
for col in range(height):
s[row][col] = temp_bank[row][col]
return s
def fwt97_simple(s, width=255, height=255):
''' Forward Cohen-Daubechies-Feauveau 9 tap / 7 tap wavelet transform
performed on all columns of the 2D n*n matrix signal s via lifting.
The returned result is s, the modified input matrix.
The highpass and lowpass results are stored on the left half and right
half of s respectively, after the matrix is transposed. '''
# 9/7 Coefficients:
a1 = -1.586134342
a2 = -0.05298011854
a3 = 0.8829110762
a4 = 0.4435068522
# Scale coeff:
k1 = 0.81289306611596146 # 1/1.230174104914
k2 = 0.61508705245700002 # 1.230174104914/2
# Another k used by <NAME> is 1.1496043988602418
for col in range(width): # Do the 1D transform on all cols:
# Predict 1. y1
for row in range(1, height-1, 2):
s[row][col] += a1 * (s[row-1][col] + s[row+1][col])
s[height-1][col] += 2 * a1 * s[height-2][col] # Symmetric extension
# Update 1. y0
for row in range(2, height, 2):
s[row][col] += a2 * (s[row-1][col] + s[row+1][col])
s[0][col] += 2 * a2 * s[1][col] # Symmetric extension
# Predict 2.
for row in range(1, height-1, 2):
s[row][col] += a3 * (s[row-1][col] + s[row+1][col])
s[height-1][col] += 2 * a3 * s[height-2][col]
# Update 2.
for row in range(2, height, 2):
s[row][col] += a4 * (s[row-1][col] + s[row+1][col])
s[0][col] += 2 * a4 * s[1][col]
# de-interleave
# temp_bank = [[0]*width for i in range(height)]
temp_bank = np.empty((height,width))
print(s.shape)
# height,width=width,height
for row in range(height):
for col in range(width):
# k1 and k2 scale the vals
# simultaneously transpose the matrix when deinterleaving
if row % 2 == 0: # even
temp_bank[col][row//2] = k1 * s[row][col]
else: # odd
temp_bank[col][row//2 + height//2] = k2 * s[row][col]
# write temp_bank to s:
for row in range(width):
for col in range(height):
s[row][col] = temp_bank[row][col]
return s
def makeWT2(img,d=224):
# os.makedirs(target_dir, exist_ok=True)
out=fwt97(img, d//2,d)#w,h
outL=out[:,:d//2]
outH=out[:,d//2:]
# return np.rot90(out,3)
return outL,outH
def makeWT97(path0,d=224):
# os.makedirs(target_dir, exist_ok=True)
im = Image.open(path0)
img = (np.array(im).reshape((-1,d))).astype(np.float)
out=fwt97(img, d,d)
out=out[:,:d//2]
return np.rot90(out,3)
# return out | [
"numpy.empty",
"numpy.rot90",
"numpy.array",
"PIL.Image.open"
] | [((1705, 1730), 'numpy.empty', 'np.empty', (['(height, width)'], {}), '((height, width))\n', (1713, 1730), True, 'import numpy as np\n'), ((3964, 3989), 'numpy.empty', 'np.empty', (['(height, width)'], {}), '((height, width))\n', (3972, 3989), True, 'import numpy as np\n'), ((4872, 4889), 'PIL.Image.open', 'Image.open', (['path0'], {}), '(path0)\n', (4882, 4889), False, 'from PIL import Image\n'), ((5004, 5020), 'numpy.rot90', 'np.rot90', (['out', '(3)'], {}), '(out, 3)\n', (5012, 5020), True, 'import numpy as np\n'), ((4901, 4913), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (4909, 4913), True, 'import numpy as np\n')] |
#!/export/b18/ssadhu/tools/python/bin/python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 23 14:46:38 2018
@author: <NAME>
"""
'Prepare data, train MLP and do cross validation using batch loading'
import argparse
import numpy as np
import pickle
import torch
import torch.utils.data
from torch import nn
from torch.autograd import Variable
from os import listdir
import os
from os.path import isfile, join
import sys
def get_megbatch(train_files,data_dim,meg_batch_num,outdir):
print('%s: Getting mega match number %d with %d files...' % (sys.argv[0],meg_batch_num,len(train_files)))
train_data=np.empty((0,data_dim)); train_labels=np.array([]);
for i, dat_set in enumerate(train_files):
data_dict=pickle.load(open(dat_set,'rb'))
data,labels=dict_2_data(data_dict,data_dim)
train_data=np.append(train_data,data,axis=0)
train_labels=np.append(train_labels,labels)
print('%s: Megabatch %d compiled!' % (sys.argv[0],meg_batch_num))
np.save(join(outdir,'data_mbatch_'+str(meg_batch_num)+'.npy'),train_data)
np.save(join(outdir,'labels_mbatch_'+str(meg_batch_num)+'.npy'),train_labels)
sys.stdout.flush()
def get_data_dim(data_dict):
utt_list=list(data_dict.keys())
data_samp=data_dict[utt_list[0]]
data_dim=data_samp.shape[1]-1
return data_dim
def dict_2_data(data_dict,data_dim):
data=np.empty(data_dim)
labels=np.array([])
utt_list=list(data_dict.keys())
for i, utt_id in enumerate(utt_list):
data=np.vstack((data,data_dict[utt_id][:,0:-1]))
labels=np.append(labels,data_dict[utt_id][:,-1])
data=data[1:,:]
return data, labels
def tidyData(data_dir):
print('%s: Checking for train and test data...' % sys.argv[0])
allfiles = [f for f in listdir(data_dir) if isfile(join(data_dir, f))]
print('%s: In total %d train and test data files found..' % (sys.argv[0],len(allfiles)))
train_files=[]; test_files=[]
for i in range(len(allfiles)):
if 'test' in allfiles[i]:
test_files.append(allfiles[i])
for i in range(len(allfiles)):
if 'train' in allfiles[i]:
train_files.append(allfiles[i])
# Check the data dimension
data=np.load(os.path.join(data_dir, train_files[0]))
data_dim=data.shape[1]-1
# Load all train and test data into big files
train_data=np.empty((0,data_dim)); test_data=np.empty((0,data_dim)); train_labels=np.array([]); test_labels=np.array([])
print('%s: Loading training files...' % sys.argv[0])
for i in range(len(train_files)):
data=np.load(os.path.join(data_dir, train_files[i]))
train_data=np.append(train_data,data[:,0:-1],axis=0)
train_labels=np.append(train_labels,data[:,-1])
print('%s: Loading test files...' % sys.argv[0])
for i in range(len(test_files)):
data=np.load(os.path.join(data_dir, test_files[i]))
test_data=np.append(test_data,data[:,0:-1],axis=0)
test_labels=np.append(test_labels,data[:,-1])
return train_data, train_labels, test_data, test_labels
def get_sample_meanvar(train_files):
size_acc=0
data_dict=pickle.load(open(train_files[0],'rb'))
data_dim=get_data_dim(data_dict)
data,labels=dict_2_data(data_dict,data_dim)
size_acc+=np.shape(data)[0]
mean_acc=data.mean(axis=0)
print('%s: Getting mean of training samples...' % sys.argv[0])
for ind, file in enumerate(train_files):
if ind==0:
continue;
data_dict=pickle.load(open(file,'rb'))
data,labels=dict_2_data(data_dict,data_dim)
size_now=np.shape(data)[0]
mean_now=data.mean(axis=0)
mean_acc=(mean_now*size_now+mean_acc*size_acc)/(size_now+size_acc)
size_acc+=size_now
size_acc=0
data_dict=pickle.load(open(train_files[0],'rb'))
data,labels=dict_2_data(data_dict,data_dim)
size_acc+=np.shape(data)[0]
var_acc=np.sum(np.square(data-mean_acc),axis=0)
print('%s: Getting variance of training samples...' % sys.argv[0])
for ind,file in enumerate(train_files):
if ind==0:
continue;
data_dict=pickle.load(open(file,'rb'))
data,labels=dict_2_data(data_dict,data_dim)
size_now=np.shape(data)[0]
size_acc+=size_now
var_acc+=np.sum(np.square(data-mean_acc),axis=0)
var_acc=var_acc/size_acc;
return mean_acc, var_acc
def error_rate(model, features, labels, loss_fn):
outputs = model(features)
loss_test = loss_fn(outputs, labels)
_, predicted = torch.max(outputs, dim=1)
hits = (labels == predicted).float().sum()
return loss_test.data[0], (1 - hits / labels.size(0)).data[0]
def error_rate_2(model, d_data, d_labels, r_data, r_labels, loss_fn):
d_out = model(d_data); r_out = model(r_data)
d_loss = loss_fn(d_out, d_labels); r_loss = loss_fn(r_out, r_labels)
_, d_pred = torch.max(d_out, dim=1); _, r_pred = torch.max(r_out, dim=1)
d_hits = (d_labels == d_pred).float().sum(); r_hits = (r_labels == r_pred).float().sum()
return d_loss.data[0], (1 - d_hits / d_labels.size(0)).data[0], r_loss.data[0], (1 - r_hits / r_labels.size(0)).data[0]
def run(train_files,test_data,test_labels,result_data,result_labels,args):
if args.mvnorm:
mean=np.load(join(args.data_directory,'data_mean.npy'))
var=np.load(join(args.data_directory,'data_var.npy'))
if args.activation=='sigmoid':
activ=nn.Sigmoid()
elif args.activation=='tanh':
activ=nn.Tanh()
elif args.activation=='relu':
activ=nn.ReLU()
else:
raise ValueError('Activation function not found!')
# Check the data dimension
data_dict=pickle.load(open(train_files[0],'rb'))
data_dim=get_data_dim(data_dict)
if args.mvnorm:
test_data -= mean
test_data /= np.sqrt(var)
if args.mvnorm:
result_data -= mean
result_data /= np.sqrt(var)
# Build the MLP.
targetdim=args.ntargets
print('%s: Building the MLP...' % sys.argv[0])
sys.stdout.flush()
if args.kink_dim:
structure = [nn.Linear(data_dim, args.kink_dim), activ]
for i in range(args.nlayers - 1):
if i==0:
structure += [nn.Linear(args.kink_dim, args.nunits), activ]
else:
structure += [nn.Linear(args.nunits, args.nunits), activ]
structure += [nn.Linear(args.nunits, targetdim)]
model = nn.Sequential(*structure)
else:
structure = [nn.Linear(data_dim, args.nunits), activ]
for i in range(args.nlayers - 1):
structure += [nn.Linear(args.nunits, args.nunits), activ]
structure += [nn.Linear(args.nunits, targetdim)]
model = nn.Sequential(*structure)
if args.gpu is not None:
with torch.cuda.device(args.gpu):
model.cuda(args.gpu)
print('%s: Defining Loss Function...' % sys.argv[0])
sys.stdout.flush()
# Loss function.
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lrate,
weight_decay=args.weight_decay)
test_data, test_labels = torch.from_numpy(test_data).float(), \
torch.from_numpy(test_labels).long()
result_data, result_labels = torch.from_numpy(result_data).float(), \
torch.from_numpy(result_labels).long()
#v_train_data, v_train_labels = Variable(train_data), Variable(train_labels)
if args.gpu is not None:
with torch.cuda.device(args.gpu):
v_test_data, v_test_labels = Variable(test_data).cuda(), Variable(test_labels).cuda()
v_result_data, v_result_labels = Variable(result_data).cuda(), Variable(result_labels).cuda()
else:
v_test_data, v_test_labels = Variable(test_data), Variable(test_labels)
v_result_data, v_result_labels = Variable(result_data), Variable(result_labels)
print('%s: Start Training Iterations...' % sys.argv[0])
sys.stdout.flush()
megbatch_dir=join(args.data_directory,'mega_batches')
if args.gpu is not None:
with torch.cuda.device(args.gpu):
#Start Each Epoch
cv_err_old=1
warn_time=0
for epoch in range(args.epochs):
t_loss = 0.0
t_er = 0.0
# Start Each Mega_batch
batch_num=0
for meg_batch in range(1,args.mega_batch_num+1):
train_data=np.load(join(megbatch_dir,'data_mbatch_'+str(meg_batch)+'.npy'))
train_labels=np.load(join(megbatch_dir,'labels_mbatch_'+str(meg_batch)+'.npy'))
if args.mvnorm:
train_data -= mean
train_data /= np.sqrt(var)
train_data, train_labels = torch.from_numpy(train_data).float(), \
torch.from_numpy(train_labels).long()
dataset = torch.utils.data.TensorDataset(train_data, train_labels)
trainloader = torch.utils.data.DataLoader(dataset, batch_size=args.bsize,
shuffle=True)
for i, data in enumerate(trainloader):
batch_num+=1
inputs, labels = Variable(data[0]).cuda(), Variable(data[1]).cuda()
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_fn(outputs, labels)
# Compute the error rate on the training set.
_, predicted = torch.max(outputs, dim=1)
hits = (labels == predicted).float().sum()
t_er += (1 - hits / labels.size(0)).data[0]
t_loss += loss.data[0]
loss.backward()
optimizer.step()
t_loss /= batch_num
t_er /= batch_num
cv_loss, cv_er = error_rate(model, v_test_data, v_test_labels, loss_fn)
#re_loss, re_er = error_rate(model, v_result_data, v_result_labels, loss_fn)
#cv_loss, cv_er, re_loss, re_er = error_rate_2(model, v_test_data, v_test_labels, v_result_data, v_result_labels, loss_fn)
logmsg = 'epoch: {epoch} loss (train): {t_loss:.3f} ' \
'error rate (train): {t_er:.3%} loss (cv): {cv_loss:.3f} ' \
'error rate (cv): {cv_er:.3%}'.format(
epoch=epoch+1, t_loss=t_loss, t_er=t_er,
cv_loss=cv_loss, cv_er=cv_er)
sys.stdout.flush()
print(logmsg)
if args.cv_stop:
if cv_er>cv_err_old:
warn_time+=1
cv_err_old=cv_er
if warn_time>=args.cv_stop:
print('%s: Cross Validation Error found to increase in %d epochs.. exiting with present model!' % (sys.argv[0],args.cv_stop))
re_loss, re_er = error_rate(model, v_result_data, v_result_labels, loss_fn)
print('%s: The final test performance is: %.2f %%' % (sys.argv[0],re_er*100))
break
print('%s: Maximum number of epochs exceeded!' % sys.argv[0])
re_loss, re_er = error_rate(model, v_result_data, v_result_labels, loss_fn)
print('%s: The final test performance is: %.2f %%' % (sys.argv[0],re_er*100))
model=model.cpu()
with open(args.outmodel, 'wb') as fid:
pickle.dump(model, fid)
else:
#Start Each Epoch
cv_err_old=1
warn_time=0
for epoch in range(args.epochs):
t_loss = 0.0
t_er = 0.0
# Start Each Mega_batch
batch_num=0
for meg_batch in range(1,args.mega_batch_num+1):
train_data=np.load(join(megbatch_dir,'data_mbatch_'+str(meg_batch)+'.npy'))
train_labels=np.load(join(megbatch_dir,'labels_mbatch_'+str(meg_batch)+'.npy'))
if args.mvnorm:
train_data -= mean
train_data /= np.sqrt(var)
train_data, train_labels = torch.from_numpy(train_data).float(), \
torch.from_numpy(train_labels).long()
dataset = torch.utils.data.TensorDataset(train_data, train_labels)
trainloader = torch.utils.data.DataLoader(dataset, batch_size=args.bsize,
shuffle=True)
for i, data in enumerate(trainloader):
batch_num+=1
inputs, labels = Variable(data[0]), Variable(data[1])
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_fn(outputs, labels)
# Compute the error rate on the training set.
_, predicted = torch.max(outputs, dim=1)
hits = (labels == predicted).float().sum()
t_er += (1 - hits / labels.size(0)).data[0]
t_loss += loss.data[0]
loss.backward()
optimizer.step()
if i % args.validation_rate == args.validation_rate - 1:
t_loss /= args.validation_rate
t_er /= args.validation_rate
cv_loss, cv_er = error_rate(model, v_test_data, v_test_labels, loss_fn)
logmsg = 'epoch: {epoch} mega-batch: {meg_batch} mini-batch: {mbatch} loss (train): {t_loss:.3f} ' \
'error rate (train): {t_er:.3%} loss (cv): {cv_loss:.3f} ' \
'error rate (cv): {cv_er:.3%}'.format(
epoch=epoch+1, meg_batch=meg_batch, mbatch=i+1, t_loss=t_loss, t_er=t_er,
cv_loss=cv_loss, cv_er=cv_er)
t_er = 0.0
t_loss = 0.0
print(logmsg)
sys.stdout.flush()
if cv_er>cv_err_old:
warn_time+=1
cv_err_old=cv_er
if warn_time>=2:
print('%s: Cross Validation Error found to increase in 2 epochs.. exiting with present model!' % sys.argv[0])
re_loss, re_er = error_rate(model, v_result_data, v_result_labels, loss_fn)
print('%s: The final test performance is: %.2f %%' % (sys.argv[0],re_er*100))
break
with open(args.outmodel, 'wb') as fid:
pickle.dump(model, fid)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('data_directory', help='place to get all training and test data in .npy format')
parser.add_argument('outmodel', help='output file')
parser.add_argument('--ntargets', type=int, default=41, help='number of targets(41)')
parser.add_argument('--nlayers', type=int, default=4, help='number of hidden layers(4)')
parser.add_argument('--nunits', type=int, default=256, help='number of units per leayer(256)')
parser.add_argument('--gpu',type=int,help='gpu device id (Ignore if you do not want to run on gpu!)')
parser.add_argument('--bsize', type=int, default=1000,
help='batch size')
parser.add_argument('--mega_batch_num', type=int, default=5,
help='number of big data batches to be uploaded as a whole on to RAM/GPU')
parser.add_argument('--epochs', type=int, default=1000,
help='number of epochs')
parser.add_argument('--lrate', type=float, default=1e-3,
help='learning rate')
parser.add_argument('--mvnorm', action='store_true',
help='mean-variance normalization of the features')
parser.add_argument('--validation-rate', type=int, default=10,
help='frequency of the validation')
parser.add_argument('--weight-decay', type=float, default=0.0,
help='L2 regularization')
parser.add_argument('--cv_stop', type=int,
help='Stop after this many increases of CV error')
parser.add_argument('--activation', default='tanh',
help='tanh OR sigmoid OR relu')
parser.add_argument('--kink_dim', type=int , help='Puts a kink_dim dimensional layer at the beginning to plot filters')
args = parser.parse_args()
assert args.nlayers > 0
print('%s: Running MLP training...' % sys.argv[0])
sys.stdout.flush()
allfiles = [f for f in listdir(args.data_directory) if isfile(join(args.data_directory, f))]
train_files=[]; test_files=[]
for i in range(len(allfiles)):
if 'train' in allfiles[i]:
train_files.append(os.path.join(args.data_directory,allfiles[i]))
print('%s: In total %d train data files found..passing them for MLP training' % (sys.argv[0],len(train_files)))
sys.stdout.flush()
test_data_all=np.load(join(args.data_directory,'test_data.npy'))
test_labels_all=np.load(join(args.data_directory,'test_labels.npy'))
reduce_size=int(test_data_all.shape[0]/4)
test_data=test_data_all[reduce_size:2*reduce_size,:]
test_labels=test_labels_all[reduce_size:2*reduce_size]
result_data=test_data_all[0:reduce_size,:]
result_labels=test_labels_all[0:reduce_size]
run(train_files,test_data,test_labels,result_data,result_labels,args)
| [
"pickle.dump",
"argparse.ArgumentParser",
"numpy.empty",
"numpy.shape",
"sys.stdout.flush",
"torch.utils.data.TensorDataset",
"os.path.join",
"torch.utils.data.DataLoader",
"numpy.append",
"torch.nn.Linear",
"torch.nn.Tanh",
"torch.autograd.Variable",
"numpy.square",
"torch.max",
"torch.... | [((614, 637), 'numpy.empty', 'np.empty', (['(0, data_dim)'], {}), '((0, data_dim))\n', (622, 637), True, 'import numpy as np\n'), ((651, 663), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (659, 663), True, 'import numpy as np\n'), ((1168, 1186), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1184, 1186), False, 'import sys\n'), ((1401, 1419), 'numpy.empty', 'np.empty', (['data_dim'], {}), '(data_dim)\n', (1409, 1419), True, 'import numpy as np\n'), ((1431, 1443), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1439, 1443), True, 'import numpy as np\n'), ((2427, 2450), 'numpy.empty', 'np.empty', (['(0, data_dim)'], {}), '((0, data_dim))\n', (2435, 2450), True, 'import numpy as np\n'), ((2461, 2484), 'numpy.empty', 'np.empty', (['(0, data_dim)'], {}), '((0, data_dim))\n', (2469, 2484), True, 'import numpy as np\n'), ((2498, 2510), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2506, 2510), True, 'import numpy as np\n'), ((2524, 2536), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2532, 2536), True, 'import numpy as np\n'), ((4678, 4703), 'torch.max', 'torch.max', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (4687, 4703), False, 'import torch\n'), ((5034, 5057), 'torch.max', 'torch.max', (['d_out'], {'dim': '(1)'}), '(d_out, dim=1)\n', (5043, 5057), False, 'import torch\n'), ((5071, 5094), 'torch.max', 'torch.max', (['r_out'], {'dim': '(1)'}), '(r_out, dim=1)\n', (5080, 5094), False, 'import torch\n'), ((6235, 6253), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6251, 6253), False, 'import sys\n'), ((7165, 7183), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (7181, 7183), False, 'import sys\n'), ((7219, 7240), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (7238, 7240), False, 'from torch import nn\n'), ((8247, 8265), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8263, 8265), False, 'import sys\n'), ((8288, 8329), 'os.path.join', 'join', (['args.data_directory', '"""mega_batches"""'], {}), "(args.data_directory, 'mega_batches')\n", (8292, 8329), False, 'from os.path import isfile, join\n'), ((15654, 15698), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (15677, 15698), False, 'import argparse\n'), ((17586, 17604), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (17602, 17604), False, 'import sys\n'), ((18038, 18056), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (18054, 18056), False, 'import sys\n'), ((844, 879), 'numpy.append', 'np.append', (['train_data', 'data'], {'axis': '(0)'}), '(train_data, data, axis=0)\n', (853, 879), True, 'import numpy as np\n'), ((903, 934), 'numpy.append', 'np.append', (['train_labels', 'labels'], {}), '(train_labels, labels)\n', (912, 934), True, 'import numpy as np\n'), ((1535, 1580), 'numpy.vstack', 'np.vstack', (['(data, data_dict[utt_id][:, 0:-1])'], {}), '((data, data_dict[utt_id][:, 0:-1]))\n', (1544, 1580), True, 'import numpy as np\n'), ((1597, 1640), 'numpy.append', 'np.append', (['labels', 'data_dict[utt_id][:, -1]'], {}), '(labels, data_dict[utt_id][:, -1])\n', (1606, 1640), True, 'import numpy as np\n'), ((2287, 2325), 'os.path.join', 'os.path.join', (['data_dir', 'train_files[0]'], {}), '(data_dir, train_files[0])\n', (2299, 2325), False, 'import os\n'), ((2717, 2761), 'numpy.append', 'np.append', (['train_data', 'data[:, 0:-1]'], {'axis': '(0)'}), '(train_data, data[:, 0:-1], axis=0)\n', (2726, 2761), True, 'import numpy as np\n'), ((2780, 2816), 'numpy.append', 'np.append', (['train_labels', 'data[:, -1]'], {}), '(train_labels, data[:, -1])\n', (2789, 2816), True, 'import numpy as np\n'), ((2988, 3031), 'numpy.append', 'np.append', (['test_data', 'data[:, 0:-1]'], {'axis': '(0)'}), '(test_data, data[:, 0:-1], axis=0)\n', (2997, 3031), True, 'import numpy as np\n'), ((3049, 3084), 'numpy.append', 'np.append', (['test_labels', 'data[:, -1]'], {}), '(test_labels, data[:, -1])\n', (3058, 3084), True, 'import numpy as np\n'), ((3373, 3387), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (3381, 3387), True, 'import numpy as np\n'), ((4006, 4020), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (4014, 4020), True, 'import numpy as np\n'), ((4043, 4069), 'numpy.square', 'np.square', (['(data - mean_acc)'], {}), '(data - mean_acc)\n', (4052, 4069), True, 'import numpy as np\n'), ((5604, 5616), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (5614, 5616), False, 'from torch import nn\n'), ((6016, 6028), 'numpy.sqrt', 'np.sqrt', (['var'], {}), '(var)\n', (6023, 6028), True, 'import numpy as np\n'), ((6109, 6121), 'numpy.sqrt', 'np.sqrt', (['var'], {}), '(var)\n', (6116, 6121), True, 'import numpy as np\n'), ((6653, 6678), 'torch.nn.Sequential', 'nn.Sequential', (['*structure'], {}), '(*structure)\n', (6666, 6678), False, 'from torch import nn\n'), ((6950, 6975), 'torch.nn.Sequential', 'nn.Sequential', (['*structure'], {}), '(*structure)\n', (6963, 6975), False, 'from torch import nn\n'), ((18090, 18132), 'os.path.join', 'join', (['args.data_directory', '"""test_data.npy"""'], {}), "(args.data_directory, 'test_data.npy')\n", (18094, 18132), False, 'from os.path import isfile, join\n'), ((18161, 18205), 'os.path.join', 'join', (['args.data_directory', '"""test_labels.npy"""'], {}), "(args.data_directory, 'test_labels.npy')\n", (18165, 18205), False, 'from os.path import isfile, join\n'), ((1815, 1832), 'os.listdir', 'listdir', (['data_dir'], {}), '(data_dir)\n', (1822, 1832), False, 'from os import listdir\n'), ((2658, 2696), 'os.path.join', 'os.path.join', (['data_dir', 'train_files[i]'], {}), '(data_dir, train_files[i])\n', (2670, 2696), False, 'import os\n'), ((2931, 2968), 'os.path.join', 'os.path.join', (['data_dir', 'test_files[i]'], {}), '(data_dir, test_files[i])\n', (2943, 2968), False, 'import os\n'), ((3708, 3722), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (3716, 3722), True, 'import numpy as np\n'), ((4362, 4376), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (4370, 4376), True, 'import numpy as np\n'), ((4431, 4457), 'numpy.square', 'np.square', (['(data - mean_acc)'], {}), '(data - mean_acc)\n', (4440, 4457), True, 'import numpy as np\n'), ((5441, 5483), 'os.path.join', 'join', (['args.data_directory', '"""data_mean.npy"""'], {}), "(args.data_directory, 'data_mean.npy')\n", (5445, 5483), False, 'from os.path import isfile, join\n'), ((5504, 5545), 'os.path.join', 'join', (['args.data_directory', '"""data_var.npy"""'], {}), "(args.data_directory, 'data_var.npy')\n", (5508, 5545), False, 'from os.path import isfile, join\n'), ((5665, 5674), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (5672, 5674), False, 'from torch import nn\n'), ((6306, 6340), 'torch.nn.Linear', 'nn.Linear', (['data_dim', 'args.kink_dim'], {}), '(data_dim, args.kink_dim)\n', (6315, 6340), False, 'from torch import nn\n'), ((6602, 6635), 'torch.nn.Linear', 'nn.Linear', (['args.nunits', 'targetdim'], {}), '(args.nunits, targetdim)\n', (6611, 6635), False, 'from torch import nn\n'), ((6724, 6756), 'torch.nn.Linear', 'nn.Linear', (['data_dim', 'args.nunits'], {}), '(data_dim, args.nunits)\n', (6733, 6756), False, 'from torch import nn\n'), ((6899, 6932), 'torch.nn.Linear', 'nn.Linear', (['args.nunits', 'targetdim'], {}), '(args.nunits, targetdim)\n', (6908, 6932), False, 'from torch import nn\n'), ((7023, 7050), 'torch.cuda.device', 'torch.cuda.device', (['args.gpu'], {}), '(args.gpu)\n', (7040, 7050), False, 'import torch\n'), ((7765, 7792), 'torch.cuda.device', 'torch.cuda.device', (['args.gpu'], {}), '(args.gpu)\n', (7782, 7792), False, 'import torch\n'), ((8045, 8064), 'torch.autograd.Variable', 'Variable', (['test_data'], {}), '(test_data)\n', (8053, 8064), False, 'from torch.autograd import Variable\n'), ((8066, 8087), 'torch.autograd.Variable', 'Variable', (['test_labels'], {}), '(test_labels)\n', (8074, 8087), False, 'from torch.autograd import Variable\n'), ((8129, 8150), 'torch.autograd.Variable', 'Variable', (['result_data'], {}), '(result_data)\n', (8137, 8150), False, 'from torch.autograd import Variable\n'), ((8152, 8175), 'torch.autograd.Variable', 'Variable', (['result_labels'], {}), '(result_labels)\n', (8160, 8175), False, 'from torch.autograd import Variable\n'), ((8385, 8412), 'torch.cuda.device', 'torch.cuda.device', (['args.gpu'], {}), '(args.gpu)\n', (8402, 8412), False, 'import torch\n'), ((15584, 15607), 'pickle.dump', 'pickle.dump', (['model', 'fid'], {}), '(model, fid)\n', (15595, 15607), False, 'import pickle\n'), ((17632, 17660), 'os.listdir', 'listdir', (['args.data_directory'], {}), '(args.data_directory)\n', (17639, 17660), False, 'from os import listdir\n'), ((1843, 1860), 'os.path.join', 'join', (['data_dir', 'f'], {}), '(data_dir, f)\n', (1847, 1860), False, 'from os.path import isfile, join\n'), ((5723, 5732), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5730, 5732), False, 'from torch import nn\n'), ((6833, 6868), 'torch.nn.Linear', 'nn.Linear', (['args.nunits', 'args.nunits'], {}), '(args.nunits, args.nunits)\n', (6842, 6868), False, 'from torch import nn\n'), ((7414, 7441), 'torch.from_numpy', 'torch.from_numpy', (['test_data'], {}), '(test_data)\n', (7430, 7441), False, 'import torch\n'), ((7461, 7490), 'torch.from_numpy', 'torch.from_numpy', (['test_labels'], {}), '(test_labels)\n', (7477, 7490), False, 'import torch\n'), ((7540, 7569), 'torch.from_numpy', 'torch.from_numpy', (['result_data'], {}), '(result_data)\n', (7556, 7569), False, 'import torch\n'), ((7589, 7620), 'torch.from_numpy', 'torch.from_numpy', (['result_labels'], {}), '(result_labels)\n', (7605, 7620), False, 'import torch\n'), ((11123, 11141), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (11139, 11141), False, 'import sys\n'), ((12169, 12192), 'pickle.dump', 'pickle.dump', (['model', 'fid'], {}), '(model, fid)\n', (12180, 12192), False, 'import pickle\n'), ((13059, 13115), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['train_data', 'train_labels'], {}), '(train_data, train_labels)\n', (13089, 13115), False, 'import torch\n'), ((13146, 13219), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'args.bsize', 'shuffle': '(True)'}), '(dataset, batch_size=args.bsize, shuffle=True)\n', (13173, 13219), False, 'import torch\n'), ((17671, 17699), 'os.path.join', 'join', (['args.data_directory', 'f'], {}), '(args.data_directory, f)\n', (17675, 17699), False, 'from os.path import isfile, join\n'), ((17858, 17904), 'os.path.join', 'os.path.join', (['args.data_directory', 'allfiles[i]'], {}), '(args.data_directory, allfiles[i])\n', (17870, 17904), False, 'import os\n'), ((6442, 6479), 'torch.nn.Linear', 'nn.Linear', (['args.kink_dim', 'args.nunits'], {}), '(args.kink_dim, args.nunits)\n', (6451, 6479), False, 'from torch import nn\n'), ((6536, 6571), 'torch.nn.Linear', 'nn.Linear', (['args.nunits', 'args.nunits'], {}), '(args.nunits, args.nunits)\n', (6545, 6571), False, 'from torch import nn\n'), ((9311, 9367), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['train_data', 'train_labels'], {}), '(train_data, train_labels)\n', (9341, 9367), False, 'import torch\n'), ((9402, 9475), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'args.bsize', 'shuffle': '(True)'}), '(dataset, batch_size=args.bsize, shuffle=True)\n', (9429, 9475), False, 'import torch\n'), ((12845, 12857), 'numpy.sqrt', 'np.sqrt', (['var'], {}), '(var)\n', (12852, 12857), True, 'import numpy as np\n'), ((13689, 13714), 'torch.max', 'torch.max', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (13698, 13714), False, 'import torch\n'), ((7835, 7854), 'torch.autograd.Variable', 'Variable', (['test_data'], {}), '(test_data)\n', (7843, 7854), False, 'from torch.autograd import Variable\n'), ((7863, 7884), 'torch.autograd.Variable', 'Variable', (['test_labels'], {}), '(test_labels)\n', (7871, 7884), False, 'from torch.autograd import Variable\n'), ((7937, 7958), 'torch.autograd.Variable', 'Variable', (['result_data'], {}), '(result_data)\n', (7945, 7958), False, 'from torch.autograd import Variable\n'), ((7967, 7990), 'torch.autograd.Variable', 'Variable', (['result_labels'], {}), '(result_labels)\n', (7975, 7990), False, 'from torch.autograd import Variable\n'), ((9093, 9105), 'numpy.sqrt', 'np.sqrt', (['var'], {}), '(var)\n', (9100, 9105), True, 'import numpy as np\n'), ((9987, 10012), 'torch.max', 'torch.max', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (9996, 10012), False, 'import torch\n'), ((13404, 13421), 'torch.autograd.Variable', 'Variable', (['data[0]'], {}), '(data[0])\n', (13412, 13421), False, 'from torch.autograd import Variable\n'), ((13423, 13440), 'torch.autograd.Variable', 'Variable', (['data[1]'], {}), '(data[1])\n', (13431, 13440), False, 'from torch.autograd import Variable\n'), ((14892, 14910), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (14908, 14910), False, 'import sys\n'), ((12922, 12950), 'torch.from_numpy', 'torch.from_numpy', (['train_data'], {}), '(train_data)\n', (12938, 12950), False, 'import torch\n'), ((12978, 13008), 'torch.from_numpy', 'torch.from_numpy', (['train_labels'], {}), '(train_labels)\n', (12994, 13008), False, 'import torch\n'), ((9162, 9190), 'torch.from_numpy', 'torch.from_numpy', (['train_data'], {}), '(train_data)\n', (9178, 9190), False, 'import torch\n'), ((9222, 9252), 'torch.from_numpy', 'torch.from_numpy', (['train_labels'], {}), '(train_labels)\n', (9238, 9252), False, 'import torch\n'), ((9664, 9681), 'torch.autograd.Variable', 'Variable', (['data[0]'], {}), '(data[0])\n', (9672, 9681), False, 'from torch.autograd import Variable\n'), ((9690, 9707), 'torch.autograd.Variable', 'Variable', (['data[1]'], {}), '(data[1])\n', (9698, 9707), False, 'from torch.autograd import Variable\n')] |
# Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Quadratic Program Solver based on MOSEK.
"""
import re
from sys import stdout, stderr
from numpy import array, Inf, zeros, shape, tril, any
from numpy import flatnonzero as find
from scipy.sparse import csr_matrix as sparse
try:
from pymosek import mosekopt
except ImportError:
# print 'MOSEK not available'
pass
from pypower.mosek_options import mosek_options
def qps_mosek(H, c=None, A=None, l=None, u=None, xmin=None, xmax=None,
x0=None, opt=None):
"""Quadratic Program Solver based on MOSEK.
A wrapper function providing a PYPOWER standardized interface for using
MOSEKOPT to solve the following QP (quadratic programming) problem::
min 1/2 x'*H*x + c'*x
x
subject to::
l <= A*x <= u (linear constraints)
xmin <= x <= xmax (variable bounds)
Inputs (all optional except C{H}, C{C}, C{A} and C{L}):
- C{H} : matrix (possibly sparse) of quadratic cost coefficients
- C{C} : vector of linear cost coefficients
- C{A, l, u} : define the optional linear constraints. Default
values for the elements of L and U are -Inf and Inf, respectively.
- xmin, xmax : optional lower and upper bounds on the
C{x} variables, defaults are -Inf and Inf, respectively.
- C{x0} : optional starting value of optimization vector C{x}
- C{opt} : optional options structure with the following fields,
all of which are also optional (default values shown in parentheses)
- C{verbose} (0) - controls level of progress output displayed
- 0 = no progress output
- 1 = some progress output
- 2 = verbose progress output
- C{max_it} (0) - maximum number of iterations allowed
- 0 = use algorithm default
- C{mosek_opt} - options struct for MOSEK, values in
C{verbose} and C{max_it} override these options
- C{problem} : The inputs can alternatively be supplied in a single
C{problem} struct with fields corresponding to the input arguments
described above: C{H, c, A, l, u, xmin, xmax, x0, opt}
Outputs:
- C{x} : solution vector
- C{f} : final objective function value
- C{exitflag} : exit flag
- 1 = success
- 0 = terminated at maximum number of iterations
- -1 = primal or dual infeasible
< 0 = the negative of the MOSEK return code
- C{output} : output dict with the following fields:
- C{r} - MOSEK return code
- C{res} - MOSEK result dict
- C{lmbda} : dict containing the Langrange and Kuhn-Tucker
multipliers on the constraints, with fields:
- C{mu_l} - lower (left-hand) limit on linear constraints
- C{mu_u} - upper (right-hand) limit on linear constraints
- C{lower} - lower bound on optimization variables
- C{upper} - upper bound on optimization variables
@author: <NAME> (PSERC Cornell)
"""
##----- input argument handling -----
## gather inputs
if isinstance(H, dict): ## problem struct
p = H
else: ## individual args
p = {'H': H, 'c': c, 'A': A, 'l': l, 'u': u}
if xmin is not None:
p['xmin'] = xmin
if xmax is not None:
p['xmax'] = xmax
if x0 is not None:
p['x0'] = x0
if opt is not None:
p['opt'] = opt
## define nx, set default values for H and c
if 'H' not in p or len(p['H']) or not any(any(p['H'])):
if ('A' not in p) | len(p['A']) == 0 & \
('xmin' not in p) | len(p['xmin']) == 0 & \
('xmax' not in p) | len(p['xmax']) == 0:
stderr.write('qps_mosek: LP problem must include constraints or variable bounds\n')
else:
if 'A' in p & len(p['A']) > 0:
nx = shape(p['A'])[1]
elif 'xmin' in p & len(p['xmin']) > 0:
nx = len(p['xmin'])
else: # if isfield(p, 'xmax') && ~isempty(p.xmax)
nx = len(p['xmax'])
p['H'] = sparse((nx, nx))
qp = 0
else:
nx = shape(p['H'])[0]
qp = 1
if 'c' not in p | len(p['c']) == 0:
p['c'] = zeros(nx)
if 'x0' not in p | len(p['x0']) == 0:
p['x0'] = zeros(nx)
## default options
if 'opt' not in p:
p['opt'] = []
if 'verbose' in p['opt']:
verbose = p['opt']['verbose']
else:
verbose = 0
if 'max_it' in p['opt']:
max_it = p['opt']['max_it']
else:
max_it = 0
if 'mosek_opt' in p['opt']:
mosek_opt = mosek_options(p['opt']['mosek_opt'])
else:
mosek_opt = mosek_options()
if max_it:
mosek_opt['MSK_IPAR_INTPNT_MAX_ITERATIONS'] = max_it
if qp:
mosek_opt['MSK_IPAR_OPTIMIZER'] = 0 ## default solver only for QP
## set up problem struct for MOSEK
prob = {}
prob['c'] = p['c']
if qp:
prob['qosubi'], prob['qosubj'], prob['qoval'] = find(tril(sparse(p['H'])))
if 'A' in p & len(p['A']) > 0:
prob['a'] = sparse(p['A'])
if 'l' in p & len(p['A']) > 0:
prob['blc'] = p['l']
if 'u' in p & len(p['A']) > 0:
prob['buc'] = p['u']
if 'xmin' in p & len(p['xmin']) > 0:
prob['blx'] = p['xmin']
if 'xmax' in p & len(p['xmax']) > 0:
prob['bux'] = p['xmax']
## A is not allowed to be empty
if 'a' not in prob | len(prob['a']) == 0:
unconstrained = True
prob['a'] = sparse((1, (1, 1)), (1, nx))
prob.blc = -Inf
prob.buc = Inf
else:
unconstrained = False
##----- run optimization -----
if verbose:
methods = [
'default',
'interior point',
'<default>',
'<default>',
'primal simplex',
'dual simplex',
'primal dual simplex',
'automatic simplex',
'<default>',
'<default>',
'concurrent'
]
if len(H) == 0 or not any(any(H)):
lpqp = 'LP'
else:
lpqp = 'QP'
# (this code is also in mpver.m)
# MOSEK Version 6.0.0.93 (Build date: 2010-10-26 13:03:27)
# MOSEK Version 6.0.0.106 (Build date: 2011-3-17 10:46:54)
# pat = 'Version (\.*\d)+.*Build date: (\d\d\d\d-\d\d-\d\d)';
pat = 'Version (\.*\d)+.*Build date: (\d+-\d+-\d+)'
s, e, tE, m, t = re.compile(eval('mosekopt'), pat)
if len(t) == 0:
vn = '<unknown>'
else:
vn = t[0][0]
print('MOSEK Version %s -- %s %s solver\n' %
(vn, methods[mosek_opt['MSK_IPAR_OPTIMIZER'] + 1], lpqp))
cmd = 'minimize echo(%d)' % verbose
r, res = mosekopt(cmd, prob, mosek_opt)
##----- repackage results -----
if 'sol' in res:
if 'bas' in res['sol']:
sol = res['sol.bas']
else:
sol = res['sol.itr']
x = sol['xx']
else:
sol = array([])
x = array([])
##----- process return codes -----
if 'symbcon' in res:
sc = res['symbcon']
else:
r2, res2 = mosekopt('symbcon echo(0)')
sc = res2['symbcon']
eflag = -r
msg = ''
if r == sc.MSK_RES_OK:
if len(sol) > 0:
# if sol['solsta'] == sc.MSK_SOL_STA_OPTIMAL:
if sol['solsta'] == 'OPTIMAL':
msg = 'The solution is optimal.'
eflag = 1
else:
eflag = -1
# if sol['prosta'] == sc['MSK_PRO_STA_PRIM_INFEAS']:
if sol['prosta'] == 'PRIMAL_INFEASIBLE':
msg = 'The problem is primal infeasible.'
# elif sol['prosta'] == sc['MSK_PRO_STA_DUAL_INFEAS']:
elif sol['prosta'] == 'DUAL_INFEASIBLE':
msg = 'The problem is dual infeasible.'
else:
msg = sol['solsta']
elif r == sc['MSK_RES_TRM_MAX_ITERATIONS']:
eflag = 0
msg = 'The optimizer terminated at the maximum number of iterations.'
else:
if 'rmsg' in res and 'rcodestr' in res:
msg = '%s : %s' % (res['rcodestr'], res['rmsg'])
else:
msg = 'MOSEK return code = %d' % r
## always alert user if license is expired
if (verbose or r == 1001) and len(msg) < 0:
stdout.write('%s\n' % msg)
##----- repackage results -----
if r == 0:
f = p['c'].T * x
if len(p['H']) > 0:
f = 0.5 * x.T * p['H'] * x + f
else:
f = array([])
output = {}
output['r'] = r
output['res'] = res
if 'sol' in res:
lmbda = {}
lmbda['lower'] = sol['slx']
lmbda['upper'] = sol['sux']
lmbda['mu_l'] = sol['slc']
lmbda['mu_u'] = sol['suc']
if unconstrained:
lmbda['mu_l'] = array([])
lmbda['mu_u'] = array([])
else:
lmbda = array([])
return x, f, eflag, output, lmbda
| [
"sys.stdout.write",
"numpy.zeros",
"numpy.shape",
"numpy.any",
"scipy.sparse.csr_matrix",
"numpy.array",
"pypower.mosek_options.mosek_options",
"pymosek.mosekopt",
"sys.stderr.write"
] | [((7057, 7087), 'pymosek.mosekopt', 'mosekopt', (['cmd', 'prob', 'mosek_opt'], {}), '(cmd, prob, mosek_opt)\n', (7065, 7087), False, 'from pymosek import mosekopt\n'), ((4369, 4385), 'scipy.sparse.csr_matrix', 'sparse', (['(nx, nx)'], {}), '((nx, nx))\n', (4375, 4385), True, 'from scipy.sparse import csr_matrix as sparse\n'), ((4514, 4523), 'numpy.zeros', 'zeros', (['nx'], {}), '(nx)\n', (4519, 4523), False, 'from numpy import array, Inf, zeros, shape, tril, any\n'), ((4585, 4594), 'numpy.zeros', 'zeros', (['nx'], {}), '(nx)\n', (4590, 4594), False, 'from numpy import array, Inf, zeros, shape, tril, any\n'), ((4911, 4947), 'pypower.mosek_options.mosek_options', 'mosek_options', (["p['opt']['mosek_opt']"], {}), "(p['opt']['mosek_opt'])\n", (4924, 4947), False, 'from pypower.mosek_options import mosek_options\n'), ((4978, 4993), 'pypower.mosek_options.mosek_options', 'mosek_options', ([], {}), '()\n', (4991, 4993), False, 'from pypower.mosek_options import mosek_options\n'), ((5386, 5400), 'scipy.sparse.csr_matrix', 'sparse', (["p['A']"], {}), "(p['A'])\n", (5392, 5400), True, 'from scipy.sparse import csr_matrix as sparse\n'), ((5811, 5839), 'scipy.sparse.csr_matrix', 'sparse', (['(1, (1, 1))', '(1, nx)'], {}), '((1, (1, 1)), (1, nx))\n', (5817, 5839), True, 'from scipy.sparse import csr_matrix as sparse\n'), ((7306, 7315), 'numpy.array', 'array', (['[]'], {}), '([])\n', (7311, 7315), False, 'from numpy import array, Inf, zeros, shape, tril, any\n'), ((7328, 7337), 'numpy.array', 'array', (['[]'], {}), '([])\n', (7333, 7337), False, 'from numpy import array, Inf, zeros, shape, tril, any\n'), ((7462, 7489), 'pymosek.mosekopt', 'mosekopt', (['"""symbcon echo(0)"""'], {}), "('symbcon echo(0)')\n", (7470, 7489), False, 'from pymosek import mosekopt\n'), ((8685, 8711), 'sys.stdout.write', 'stdout.write', (["('%s\\n' % msg)"], {}), "('%s\\n' % msg)\n", (8697, 8711), False, 'from sys import stdout, stderr\n'), ((8884, 8893), 'numpy.array', 'array', (['[]'], {}), '([])\n', (8889, 8893), False, 'from numpy import array, Inf, zeros, shape, tril, any\n'), ((9270, 9279), 'numpy.array', 'array', (['[]'], {}), '([])\n', (9275, 9279), False, 'from numpy import array, Inf, zeros, shape, tril, any\n'), ((3985, 4073), 'sys.stderr.write', 'stderr.write', (['"""qps_mosek: LP problem must include constraints or variable bounds\n"""'], {}), "(\n 'qps_mosek: LP problem must include constraints or variable bounds\\n')\n", (3997, 4073), False, 'from sys import stdout, stderr\n'), ((4424, 4437), 'numpy.shape', 'shape', (["p['H']"], {}), "(p['H'])\n", (4429, 4437), False, 'from numpy import array, Inf, zeros, shape, tril, any\n'), ((9195, 9204), 'numpy.array', 'array', (['[]'], {}), '([])\n', (9200, 9204), False, 'from numpy import array, Inf, zeros, shape, tril, any\n'), ((9234, 9243), 'numpy.array', 'array', (['[]'], {}), '([])\n', (9239, 9243), False, 'from numpy import array, Inf, zeros, shape, tril, any\n'), ((3793, 3804), 'numpy.any', 'any', (["p['H']"], {}), "(p['H'])\n", (3796, 3804), False, 'from numpy import array, Inf, zeros, shape, tril, any\n'), ((5313, 5327), 'scipy.sparse.csr_matrix', 'sparse', (["p['H']"], {}), "(p['H'])\n", (5319, 5327), True, 'from scipy.sparse import csr_matrix as sparse\n'), ((4147, 4160), 'numpy.shape', 'shape', (["p['A']"], {}), "(p['A'])\n", (4152, 4160), False, 'from numpy import array, Inf, zeros, shape, tril, any\n'), ((6350, 6356), 'numpy.any', 'any', (['H'], {}), '(H)\n', (6353, 6356), False, 'from numpy import array, Inf, zeros, shape, tril, any\n')] |
"""
Module correlations provides functions to calculate 2D fields
auto-correlations.
A brief description of the algorithm can be found at:
https://yketa.github.io/UBC_2018_Wiki/#Discrete%20field%20auto-correlation
"""
import numpy as np
from active_particles.maths import Grid
def corField2D_scalar(field):
"""
2D correlation field of a scalar field. Correlations are calculated with
use of Fast Fourier Transform.
Parameters
----------
field : 2D array like
Scalar field to extract correlations from.
Points are supposed to be uniformly distributed.
Returns
-------
C : 2D numpy array
Unnormalised correlation field.
C[0, 0] is the origin, points are uniformly distributed.
Norm : float
Norm of correlation field.
"""
FFT = np.fft.fft2(field) # FFT of scalar field
C = np.real(np.fft.ifft2(np.conj(FFT)*FFT)) # Unnormalised correlation field
Norm = np.sum(field**2) # Norm of correlation field
return C, Norm
def corField2D_scalar_average(field_list):
"""
2D correlation field, averaged from a list of scalar fields. Correlations
are calculated with use of Fast Fourier Transform.
Parameters
----------
field_list : list of 2D array like
List of scalar fields to extract correlations from.
Points are supposed to be uniformly distributed.
Returns
-------
C : 2D numpy array
Normalised averaged correlation field.
C[0, 0] is the origin, points are uniformly distributed.
"""
C = (lambda c, Norm: c/Norm)(*tuple(
np.sum(list(map(corField2D_scalar, field_list)), axis=0)
)) # normalised averaged correlation field
return C
def corField2D_vector(field):
"""
2D correlation field of a vector field. Correlations are calculated with
use of Fast Fourier Transform.
Parameters
----------
field : (n, n, 2) shaped array like
Vector field to extract correlations from.
Points are supposed to be uniformly distributed.
Returns
-------
C : 2D numpy array
Unnormalised correlation field.
C[0, 0] is the origin, points are uniformly distributed.
xCL : float
Unnormalised longitudinal correlation of field projected on the first
direction of space at distance equal to field grid spacing.
yCL : float
Unnormalised longitudinal correlation of field projected on the second
direction of space at distance equal to field grid spacing.
xCT : float
Unnormalised transversal correlation of field projected on the first
direction of space at distance equal to field grid spacing.
yCT : float
Unnormalised transversal correlation of field projected on the second
direction of space at distance equal to field grid spacing.
Norm : float
Norm of correlation field.
"""
xfield = field[:, :, 0] # projection of field on the first direction of space
xC, xNorm = corField2D_scalar(xfield) # unnormalised correlation field and its norm associated to field projection on the first direction of space
yfield = field[:, :, 1] # projection of field on the second direction of space
yC, yNorm = corField2D_scalar(yfield) # unnormalised correlation field and its norm associated to field projection on the second direction of space
C = xC + yC # correlation field of field
xCL, yCL = xC[0, 1], yC[1, 0] # longitudinal correlations in first and second directions of space
xCT, yCT = xC[1, 0], yC[0, 1] # transversal correlations in first and second directions of space
Norm = xNorm + yNorm # norm of correlation field
return C, xCL, yCL, xCT, yCT, Norm
def corField2D_vector_average(field_list):
"""
2D correlation field, averaged from a list of vector fields. Correlations
are calculated with use of Fast Fourier Transform.
Parameters
----------
field_list : list of (n, n, 2) shaped array like
List of vector fields to extract correlations from.
Points are supposed to be uniformly distributed.
Returns
-------
C : 2D numpy array
Normalised averaged correlation field.
C[0, 0] is the origin, points are uniformly distributed.
CL : float
Normalised averaged longitudinal correlation of field at distance equal
to field grid spacing.
CT : float
Normalised averaged transversal correlation of field at distance equal
to field grid spacing.
"""
C, CL, CT = (lambda c, xCL, yCL, xCT, yCT, Norm: (
c/Norm,
(xCL + yCL)/(2*Norm),
(xCT + yCT)/(2*Norm)
))(*tuple(np.sum(list(map(corField2D_vector, field_list)), axis=0))) # normalised averaged correlation field, longitudinal and transversal correlations
return C, CL, CT
def corField2D_vector_average_Cnn(field_list, Cnn):
"""
2D correlation field, averaged from a list of vector fields. Correlations
are calculated with use of Fast Fourier Transform.
Compared to correlations.corField2D_vector_average, this function also
divides values of longitudial and transversal correlations, in each
direction of space, by the value of the corresponding normalised
density correlation.
WARNING: This correction with the density correlation is not applied to the
correlation field.
Parameters
----------
field_list : list of (n, n, 2) shaped array like
List of vector fields to extract correlations from.
Points are supposed to be uniformly distributed.
Cnn : (n, n) shaped array like
Normalised density correlation.
Returns
-------
C : 2D numpy array
Normalised averaged correlation field.
C[0, 0] is the origin, points are uniformly distributed.
CL : float
Normalised averaged longitudinal correlation of field at distance equal
to field grid spacing, corrected with density correlation.
CT : float
Normalised averaged transversal correlation of field at distance equal
to field grid spacing, corrected with density correlation.
"""
C, CL, CT = (lambda c, xCL, yCL, xCT, yCT, Norm: (
c/Norm,
(xCL/Cnn[0, 1] + yCL/Cnn[1, 0])/(2*Norm),
(xCT/Cnn[1, 0] + yCT/Cnn[0, 1])/(2*Norm)
))(*tuple(np.sum(list(map(corField2D_vector, field_list)), axis=0))) # normalised averaged correlation field, longitudinal and transversal correlations
return C, CL, CT
class CorGrid:
"""
Manipulate 2D correlation grids.
We consider [0, 0] as the origin of the grid and periodic boundary
conditions.
"""
def __init__(self, grid, box_size, display_size=None):
"""
Initiates grid and display grid.
Parameters
----------
grid : array-like
2D correlation grid.
box_size : float or float array-like
Length of grid in one or all dimensions.
display_size : float
Length of display grid in one or all dimensions. (default: None)
NOTE: None correponds to original grid size.
"""
self.grid = np.array(grid)
self.shape = np.array(self.grid.shape[:2]) # grid shape in 2 first dimensions
self.box_size = np.array(box_size, ndmin=1)
if display_size == None:
self.display_size = self.box_size
else: self.display_size = np.array(display_size, ndmin=1)
self.middle_cases = np.array(self.shape/2, dtype=int) # number of boxes correspond to half of the grid in all directions
self.half_display_size_cases = np.array(
self.display_size*(np.array(self.shape)/self.box_size)/2,
dtype=int) # number of boxes in all or all dimensions corresponding to half of display_size
self.display_grid = Grid(np.roll(
np.roll(self.grid, self.middle_cases[1], axis=0),
self.middle_cases[0], axis=1)[
self.middle_cases[1] - self.half_display_size_cases[1]:
self.middle_cases[1] + self.half_display_size_cases[1] + 1,
self.middle_cases[0] - self.half_display_size_cases[0]:
self.middle_cases[0] + self.half_display_size_cases[0] + 1],
extent=(-self.display_size[0]/2, self.display_size[0]/2,
-self.display_size[-1]/2, self.display_size[-1]/2))
def integrate_over_angles(self, r, projection=lambda angle: 1,
points_theta=100, linear_interpolation=False):
"""
Returns intergration of values of display grid over all angles,
projected on projection, at radius r.
Parameters
----------
r : float
Radius.
projection : function of angle
Projector. (default: 1)
points_theta : int
Number of values of angles for integration.
linear_interpolation : bool
Get value by linear interpolation of neighbouring grid boxes.
(default: False)
Returns
-------
integration : float
Integration of display grid.
"""
if r > np.min(self.display_size): return None # integration over regions not in display grid
theta = np.linspace(0, 2*np.pi, points_theta) # angles for integration
return np.trapz(
list(map(
lambda angle: (self.display_grid.get_value_polar(r, angle,
linear_interpolation=linear_interpolation)
*projection(angle)),
theta)),
theta)
def get_value_cartesian(self, x, y, linear_interpolation=False):
"""
Get value of grid at position in cartesian coordinates.
Parameters
----------
x : float
x-coordinate
y : float
y-coordinate
linear_interpolation : bool
Get value by linear interpolation of neighbouring grid boxes.
(default: False)
Returns
-------
value : *
Value at (x, y) with or without linear interpolation.
"""
return self.display_grid.get_value_cartesian(x, y,
linear_interpolation=linear_interpolation)
def get_value_polar(self, r, angle, centre=(0, 0),
linear_interpolation=False):
"""
Get value of grid at position in polar coordinates.
Parameters
----------
r : float
Radius from centre.
angle : float
Angle from x-direction.
centre : float tuple
Origin for calculation. (default: (0, 0))
linear_interpolation : bool
Get value by linear interpolation of neighbouring grid boxes.
(default: False)
Returns
-------
value : *
Value at (r, angle) from centre with or without linear
interpolation.
"""
return self.display_grid.get_value_polar(r, angle, centre=(0, 0),
linear_interpolation=linear_interpolation)
| [
"numpy.conj",
"numpy.sum",
"numpy.roll",
"numpy.min",
"numpy.array",
"numpy.fft.fft2",
"numpy.linspace"
] | [((820, 838), 'numpy.fft.fft2', 'np.fft.fft2', (['field'], {}), '(field)\n', (831, 838), True, 'import numpy as np\n'), ((978, 996), 'numpy.sum', 'np.sum', (['(field ** 2)'], {}), '(field ** 2)\n', (984, 996), True, 'import numpy as np\n'), ((7293, 7307), 'numpy.array', 'np.array', (['grid'], {}), '(grid)\n', (7301, 7307), True, 'import numpy as np\n'), ((7329, 7358), 'numpy.array', 'np.array', (['self.grid.shape[:2]'], {}), '(self.grid.shape[:2])\n', (7337, 7358), True, 'import numpy as np\n'), ((7419, 7446), 'numpy.array', 'np.array', (['box_size'], {'ndmin': '(1)'}), '(box_size, ndmin=1)\n', (7427, 7446), True, 'import numpy as np\n'), ((7621, 7656), 'numpy.array', 'np.array', (['(self.shape / 2)'], {'dtype': 'int'}), '(self.shape / 2, dtype=int)\n', (7629, 7656), True, 'import numpy as np\n'), ((9412, 9451), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'points_theta'], {}), '(0, 2 * np.pi, points_theta)\n', (9423, 9451), True, 'import numpy as np\n'), ((7560, 7591), 'numpy.array', 'np.array', (['display_size'], {'ndmin': '(1)'}), '(display_size, ndmin=1)\n', (7568, 7591), True, 'import numpy as np\n'), ((9307, 9332), 'numpy.min', 'np.min', (['self.display_size'], {}), '(self.display_size)\n', (9313, 9332), True, 'import numpy as np\n'), ((912, 924), 'numpy.conj', 'np.conj', (['FFT'], {}), '(FFT)\n', (919, 924), True, 'import numpy as np\n'), ((8043, 8091), 'numpy.roll', 'np.roll', (['self.grid', 'self.middle_cases[1]'], {'axis': '(0)'}), '(self.grid, self.middle_cases[1], axis=0)\n', (8050, 8091), True, 'import numpy as np\n'), ((7804, 7824), 'numpy.array', 'np.array', (['self.shape'], {}), '(self.shape)\n', (7812, 7824), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# uses a lot of global variables, its a hack I know
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons
ftable = {
'300':(300,0,0),
'300-400':(300,400,0),
'GDG':(196,294,392),
'Gm' :(196,247,294),
'Play':(0,0,0)
}
# vout = gain*np.arctan(offset+level*vin)
gaintype = 'linear' # linear, clipped, tube
clipping = 50
labelShow = False
gtable = {
# label gain bias offset level
'linear' : (50.0, 1, 0, 1.0),
'clipped' : (50.0, 1, 0, 1.0),
'bias0' : (50.0, 1, 0, 1.0),
'even1' : (50.0, 1, 0.65, 0.6),
'even20' : (50.0, 1, 1e-1, 1.0),
'even40' : (50.0, 1, 1e-2, 1.0),
'even60' : (50.0, 1, 1e-3, 1.0),
'odd20' : (50.0, 1, 1e-1, 1.0),
'odd40' : (50.0, 1, 1e-2, 1.0),
}
freq1,freq2,freq3 = ftable['300']
gain,bias,offset,level = gtable[gaintype]
Fs = 22050.0; # sampling rate
Ts = 1.0/Fs; # sampling interval
t = np.arange(0,1,Ts) # time vector
n = len(t) # length of the signal
k = np.arange(n)
T = n/Fs
fftfrq = k/T # two sides frequency range
fftfrq = fftfrq[range(n/2)] # one side frequency range
noise = np.random.normal(0.0,0.1,Fs)/100.0
ampl1 = 1.0
phase1 = 0.0
ampl2 = 0.0
phase2 = 0.0
ampl3 = 0.0
phase3 = 0.0
transfermax = 20
transferplot = None
transferallvin = transfermax*np.arange(-1.0,1.0,2*Ts)
transfervallplot= None
vin1 = ampl1*np.sin(2*np.pi*freq1*t+phase1)
vin2 = ampl2*np.sin(2*np.pi*freq2*t+phase2)
vin3 = ampl3*np.sin(2*np.pi*freq3*t+phase3)
vin = vin1 + vin2 + vin3
fig,axa = plt.subplots(2,2)
fig.canvas.set_window_title("ValveStudio imdExplorer")
def voutCalc():
global gaintype,vout
if gaintype == 'linear':
vout = gain*(offset + level*vin) + noise
if gaintype == 'clipped':
vout = np.clip(gain*(offset + level*vin) + noise,-clipping,clipping)
if gaintype == 'tube':
vout = gain*np.arctan(bias*(offset+level*vin)) + noise # adding a noise to have a noise floor
voutCalc()
def updatetransfer():
global transferallvin,transferallvout,transfervin,offset,axa
if gaintype == 'linear':
transferallvout = gain*(bias*transferallvin)
if gaintype == 'clipped':
transferallvout = np.clip(gain*(bias*transferallvin),-clipping,clipping)
if gaintype == 'tube':
transferallvout = gain*np.arctan(bias*(transferallvin))
'''
if linear:
transferallvout = gain*(offset+level*transferallvin)
else:
transferallvout = gain*np.arctan(offset+level*transferallvin)
'''
if transfervallplot: # this checks if plot exists yet
# print len(vin),len(transferallvin)
# transfervallplot.set_xdata(vin)
transfervallplot.set_ydata(transferallvout)
if transferplot:
if gaintype == 'linear' or gaintype == 'clipped':
r = np.logical_and(transferallvin>=(offset+level*vin).min(), transferallvin<=(offset+level*vin).max())
if gaintype == 'tube':
r = np.logical_and(transferallvin>=offset+level*vin.min(), transferallvin<=offset+level*vin.max())
transferplot.set_xdata(transferallvin[r])
transferplot.set_ydata(transferallvout[r])
# axa[1,0].relim()
# axa[1,0].autoscale_view(False,True,True)
updatetransfer() # should do this all functions
fig.text(0.70,0.965,"vout = gain * (offset + level*vin)\nvout = gain * atan(bias*(offset + (level * vin)))")
vin3plot, = axa[0,0].plot(t,vin3,label='%dHz'%freq3)
vin2plot, = axa[0,0].plot(t,vin2,label='%dHz'%freq2)
vin1plot, = axa[0,0].plot(t,vin1,label='%dHz'%freq1)
axa[0,0].set_xlim(0,5.0/freq1)
axa[0,0].set_ylim(-2.0,2.0)
handles, labels = axa[0,0].get_legend_handles_labels()
axa[0,0].legend(handles[::-1], labels[::-1])
vinplot, = axa[0,1].plot(t,(offset + level*vin),label='Vin')
voutplot, = axa[0,1].plot(t,vout,label='Vout')
axa[0,1].set_xlim(0,10.0/freq1)
handles, labels = axa[0,1].get_legend_handles_labels()
axa[0,1].legend(handles[::-1], labels[::-1])
#axa[0,1].set_ylim(-100.0,100.0)
axa[0,1].relim()
axa[0,1].autoscale_view(True,True,True)
transfervallplot, = axa[1,0].plot(transferallvin,transferallvout,color='blue')
transferplot, = axa[1,0].plot(vin,vout,color='green',linewidth=3)
axa[1,0].set_xlim(-transfermax,transfermax)
axa[1,0].set_ylim(-120.0,120.0)
#axa[1,0].relim()
#axa[1,0].autoscale_view(False,True,True)
def play():
import pyaudio
# need to recalc a temp version because repeated playing clicks at end-start discontinuity
play_t = np.arange(0,25,Ts) # time vector
play_vin1 = ampl1*np.sin(2*np.pi*freq1*play_t+phase1)
play_vin2 = ampl2*np.sin(2*np.pi*freq2*play_t+phase2)
play_vin3 = ampl3*np.sin(2*np.pi*freq3*play_t+phase3)
play_vin = play_vin1 + play_vin2 + play_vin3
if gaintype == 'linear':
play_vout = gain*(offset + level*play_vin)
if gaintype == 'clipped':
play_vout = np.clip(gain*(offset + level*play_vin),-clipping,clipping)
if gaintype == 'tube':
play_vout = gain*np.arctan(bias*(offset+level*play_vin))
p = pyaudio.PyAudio()
stream = p.open(format = pyaudio.paFloat32, channels = 1, rate = int(Fs), output = True)
data = (play_vout/np.absolute(play_vout).max()).astype(np.float32)
stream.write(data)
stream.stop_stream()
stream.close()
p.terminate()
fftout = np.fft.fft(vout)/n # fft computing and normalization
fftout = fftout[range(n/2)]
fftmag = 20*np.log10(np.abs(fftout))
fftann = []
fftplot, = axa[1,1].semilogx(fftfrq,fftmag,'r',label="Peaks") # plotting the spectrum
axa[1,1].set_xlabel('Freq (Hz)')
axa[1,1].set_xlim(10,10000)
axa[1,1].grid(True,'both')
axa[1,1].set_ylabel('Vout dB')
axa[1,1].set_ylim(-120,40)
axa[1,1].relim()
axa[1,1].autoscale_view(True,True,True)
handles, labels = axa[1,1].get_legend_handles_labels()
axa[1,1].legend(handles[::-1], labels[::-1])
def updatefft():
global fftfrq,fftann
global vout,fftplot,ax,n,noise
fftout = np.fft.fft(vout)/n # fft computing and normalization
fftout = fftout[range(n/2)]
fftmag = 20*np.log10(np.abs(fftout))
peakindices = fftmag > -90
peakfrqs = fftfrq[peakindices]
peaks = fftmag[peakindices]
peaksgtdc = len(peaks[peakfrqs > 10])
handles, labels = axa[1,1].get_legend_handles_labels()
fftplot.set_ydata(fftmag)
if peaksgtdc == 1:
plabel = "Peak"
else:
plabel = "Peaks"
axa[1,1].legend(handles[::-1], ["%d %s"%(peaksgtdc,plabel)])
for i,ann in enumerate(fftann):
ann.remove()
fftann[:] = []
if peaksgtdc < 20:
'''
peakindices = fftmag > -30
peakfrqs = fftfrq[peakindices]
peaks = fftmag[peakindices]
peaksgtdc = len(peaks[peakfrqs > 10])
'''
for i in range(len(peakfrqs)):
ann = axa[1,1].annotate("%.0f,%.1f"%(peakfrqs[i],peaks[i]),
xy=(peakfrqs[i],peaks[i]),
xycoords='data',
xytext=(-5,8),
textcoords='offset points',
verticalalignment='left',
rotation=90,
bbox=dict(boxstyle="round", fc="1.0"),
size=10)
fftann.append(ann)
updatefft()
def updatevout():
global vin,vout,gain,offset,level,voutplot,axa
voutCalc()
voutmean = vout.mean()
vout = vout - voutmean + np.random.normal(0.0,0.1,Fs)/100.0
vinplot.set_ydata(offset + level*vin)
voutplot.set_ydata(vout)
axa[0,1].relim()
axa[0,1].autoscale_view(False,True,True)
updatetransfer()
updatefft()
def updategain(val,update=True):
global gain
gain = val
updatevout()
def updatebias(val,update=True):
global bias
bias = val
updatevout()
def updateoffset(val,update=True):
global offset
offset = val
updatevout()
def updatelevel(val,update=True):
global level
level = val
updatevout()
def updatevin():
global vin,vin1,vin2,vin3,vinplot
vin = vin1 + vin2 + vin3
vinplot.set_ydata(vin)
updatevout()
def updatevin1plot():
global vin1,vin1plot,freq1,ampl1,phase1
vin1 = ampl1*np.sin(2*np.pi*freq1*t+phase1)
vin1plot.set_ydata(vin1)
vin1plot.set_label("%dHz"%freq1)
handles, labels = axa[0,0].get_legend_handles_labels()
axa[0,0].legend(handles[::-1], labels[::-1])
updatevin()
def updatefreq1(val):
global freq1
freq1 = int(val)
updatevin1plot()
def updateampl1(val):
global ampl1
ampl1 = val
updatevin1plot()
def updatephase1(val):
global phase1
phase1 = val
updatevin1plot()
def updatevin2plot():
global vin2,vin2plot,freq2,ampl2,phase2
vin2 = ampl2*np.sin(2*np.pi*freq2*t+phase2)
vin2plot.set_ydata(vin2)
vin2plot.set_label("%dHz"%freq2)
handles, labels = axa[0,0].get_legend_handles_labels()
axa[0,0].legend(handles[::-1], labels[::-1])
updatevin()
def updatefreq2(val):
global freq2
freq2 = int(val)
updatevin2plot()
def updateampl2(val):
global ampl2
ampl2 = val
updatevin2plot()
def updatephase2(val):
global phase2
phase2 = val
updatevin2plot()
def updatevin3plot():
global vin3,vin3plot,freq3,ampl3,phase3
vin3 = ampl3*np.sin(2*np.pi*freq3*t+phase3)
vin3plot.set_ydata(vin3)
vin3plot.set_label("%dHz"%freq3)
handles, labels = axa[0,0].get_legend_handles_labels()
axa[0,0].legend(handles[::-1], labels[::-1])
updatevin()
def updatefreq3(val):
global freq3
freq3 = int(val)
updatevin3plot()
def updateampl3(val):
global ampl3
ampl3 = val
updatevin3plot()
def updatephase3(val):
global phase3
phase3 = val
updatevin3plot()
axfreq1 = plt.axes([0.25, 0.1150, 0.65, 0.01])
sfreq1 = Slider(axfreq1, 'Freq1', 10.0, 2000, valinit=freq1,valfmt='%1d')
sfreq1.on_changed(updatefreq1)
axampl1 = plt.axes([0.25, 0.1025, 0.65, 0.01])
sampl1 = Slider(axampl1, 'Ampl1', 0.0, 2.0, valinit=ampl1)
sampl1.on_changed(updateampl1)
axphase1 = plt.axes([0.25, 0.09, 0.65, 0.01])
sphase1 = Slider(axphase1, 'Phase1', -90, 90, valinit=0,valfmt='%1d')
sphase1.on_changed(updatephase1)
axfreq2 = plt.axes([0.25, 0.0750, 0.65, 0.01])
sfreq2 = Slider(axfreq2, 'Freq2', 10.0, 2000, valinit=freq2,valfmt='%1d')
sfreq2.on_changed(updatefreq2)
axampl2 = plt.axes([0.25, 0.0625, 0.65, 0.01])
sampl2 = Slider(axampl2, 'Ampl2', 0.0, 2.0, valinit=ampl2)
sampl2.on_changed(updateampl2)
axphase2 = plt.axes([0.25, 0.05, 0.65, 0.01])
sphase2 = Slider(axphase2, 'Phase2', -90, 90, valinit=0,valfmt='%1d')
sphase2.on_changed(updatephase2)
axfreq3 = plt.axes([0.25, 0.0350, 0.65, 0.01])
sfreq3 = Slider(axfreq3, 'Freq3', 10.0, 2000, valinit=freq3,valfmt='%1d')
sfreq3.on_changed(updatefreq3)
axampl3 = plt.axes([0.25, 0.0225, 0.65, 0.01])
sampl3 = Slider(axampl3, 'Ampl3', 0.0, 2.0, valinit=ampl3)
sampl3.on_changed(updateampl3)
axphase3 = plt.axes([0.25, 0.0100, 0.65, 0.01])
sphase3 = Slider(axphase3, 'Phase3', -90, 90, valinit=0,valfmt='%1d')
sphase3.on_changed(updatephase3)
axgain = plt.axes([0.25, 0.1675, 0.65, 0.01])
sgain = Slider(axgain, 'gain', 0.0, 100, valinit=gain)
sgain.on_changed(updategain)
axbias = plt.axes([0.25, 0.1550, 0.65, 0.01])
sbias = Slider(axbias, 'bias', 0.0, 4.0, valinit=bias)
sbias.on_changed(updatebias)
axoffset = plt.axes([0.25, 0.1425, 0.65, 0.01])
soffset = Slider(axoffset, 'offset', -5, 5, valinit=offset)
soffset.on_changed(updateoffset)
axlevel = plt.axes([0.25, 0.13, 0.65, 0.01])
slevel = Slider(axlevel, 'level', 0.0, 10, valinit=level)
slevel.on_changed(updatelevel)
def freqSet(label):
if label == 'Play':
play()
return
freq1,freq2,freq3 = ftable[label]
ampl1,ampl2,ampl3 = [1.0,1.0,1.0]
updatefreq1(freq1)
updatefreq2(freq2)
updatefreq3(freq3)
updateampl1(ampl1)
updateampl2(ampl2)
updateampl3(ampl3)
sfreq1.set_val(freq1)
sfreq2.set_val(freq2)
sfreq3.set_val(freq3)
sampl1.set_val(ampl1)
sampl2.set_val(ampl2)
sampl3.set_val(ampl3)
fig.canvas.draw_idle()
freqradiox = plt.axes([0.025, 0.02, 0.07, 0.02*len(ftable)])
freqradio = RadioButtons(freqradiox, sorted(ftable.keys()), active=0)
for circ in freqradio.circles:
circ.set_radius(0.01*len(ftable))
freqradio.on_clicked(freqSet)
def gainSet(label):
global gaintype
if label == 'linear':
gaintype = 'linear'
else:
if label == 'clipped':
gaintype = 'clipped'
else:
gaintype = 'tube'
gain,bias,offset,level = gtable[label]
updategain(gain,False)
updateoffset(offset,False)
updatelevel(level,True)
updatevout()
sgain.set_val(gain)
soffset.set_val(offset)
slevel.set_val(level)
fig.canvas.draw_idle()
gainradiox = plt.axes([0.1, 0.02, 0.08, 0.015*len(gtable)])
tempgtable = gtable.copy()
del tempgtable['linear']
del tempgtable['clipped']
tempgtable = sorted(tempgtable.keys())
tempgtable.insert(0,"clipped")
tempgtable.insert(0,"linear")
gainradio = RadioButtons(gainradiox, tempgtable, active=0)
for circ in gainradio.circles:
circ.set_radius(0.002*len(gtable))
gainradio.on_clicked(gainSet)
mng = plt.get_current_fig_manager()
# mng.resize(*mng.window.maxsize())
mng.resize(1920,1080)
plt.tight_layout(rect=[0, 0.1, 1, 1])
plt.show()
| [
"matplotlib.pyplot.tight_layout",
"numpy.absolute",
"matplotlib.pyplot.show",
"numpy.abs",
"matplotlib.widgets.RadioButtons",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.get_current_fig_manager",
"matplotlib.widgets.Slider",
"numpy.fft.fft",
"numpy.clip",
"numpy.sin",
"numpy.arange",
"numpy... | [((1062, 1081), 'numpy.arange', 'np.arange', (['(0)', '(1)', 'Ts'], {}), '(0, 1, Ts)\n', (1071, 1081), True, 'import numpy as np\n'), ((1139, 1151), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1148, 1151), True, 'import numpy as np\n'), ((1674, 1692), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {}), '(2, 2)\n', (1686, 1692), True, 'import matplotlib.pyplot as plt\n'), ((9757, 9792), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.115, 0.65, 0.01]'], {}), '([0.25, 0.115, 0.65, 0.01])\n', (9765, 9792), True, 'import matplotlib.pyplot as plt\n'), ((9804, 9869), 'matplotlib.widgets.Slider', 'Slider', (['axfreq1', '"""Freq1"""', '(10.0)', '(2000)'], {'valinit': 'freq1', 'valfmt': '"""%1d"""'}), "(axfreq1, 'Freq1', 10.0, 2000, valinit=freq1, valfmt='%1d')\n", (9810, 9869), False, 'from matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons\n'), ((9912, 9948), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.1025, 0.65, 0.01]'], {}), '([0.25, 0.1025, 0.65, 0.01])\n', (9920, 9948), True, 'import matplotlib.pyplot as plt\n'), ((9959, 10008), 'matplotlib.widgets.Slider', 'Slider', (['axampl1', '"""Ampl1"""', '(0.0)', '(2.0)'], {'valinit': 'ampl1'}), "(axampl1, 'Ampl1', 0.0, 2.0, valinit=ampl1)\n", (9965, 10008), False, 'from matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons\n'), ((10054, 10088), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.09, 0.65, 0.01]'], {}), '([0.25, 0.09, 0.65, 0.01])\n', (10062, 10088), True, 'import matplotlib.pyplot as plt\n'), ((10100, 10160), 'matplotlib.widgets.Slider', 'Slider', (['axphase1', '"""Phase1"""', '(-90)', '(90)'], {'valinit': '(0)', 'valfmt': '"""%1d"""'}), "(axphase1, 'Phase1', -90, 90, valinit=0, valfmt='%1d')\n", (10106, 10160), False, 'from matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons\n'), ((10204, 10239), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.075, 0.65, 0.01]'], {}), '([0.25, 0.075, 0.65, 0.01])\n', (10212, 10239), True, 'import matplotlib.pyplot as plt\n'), ((10251, 10316), 'matplotlib.widgets.Slider', 'Slider', (['axfreq2', '"""Freq2"""', '(10.0)', '(2000)'], {'valinit': 'freq2', 'valfmt': '"""%1d"""'}), "(axfreq2, 'Freq2', 10.0, 2000, valinit=freq2, valfmt='%1d')\n", (10257, 10316), False, 'from matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons\n'), ((10360, 10396), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.0625, 0.65, 0.01]'], {}), '([0.25, 0.0625, 0.65, 0.01])\n', (10368, 10396), True, 'import matplotlib.pyplot as plt\n'), ((10407, 10456), 'matplotlib.widgets.Slider', 'Slider', (['axampl2', '"""Ampl2"""', '(0.0)', '(2.0)'], {'valinit': 'ampl2'}), "(axampl2, 'Ampl2', 0.0, 2.0, valinit=ampl2)\n", (10413, 10456), False, 'from matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons\n'), ((10502, 10536), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.05, 0.65, 0.01]'], {}), '([0.25, 0.05, 0.65, 0.01])\n', (10510, 10536), True, 'import matplotlib.pyplot as plt\n'), ((10547, 10607), 'matplotlib.widgets.Slider', 'Slider', (['axphase2', '"""Phase2"""', '(-90)', '(90)'], {'valinit': '(0)', 'valfmt': '"""%1d"""'}), "(axphase2, 'Phase2', -90, 90, valinit=0, valfmt='%1d')\n", (10553, 10607), False, 'from matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons\n'), ((10652, 10687), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.035, 0.65, 0.01]'], {}), '([0.25, 0.035, 0.65, 0.01])\n', (10660, 10687), True, 'import matplotlib.pyplot as plt\n'), ((10699, 10764), 'matplotlib.widgets.Slider', 'Slider', (['axfreq3', '"""Freq3"""', '(10.0)', '(2000)'], {'valinit': 'freq3', 'valfmt': '"""%1d"""'}), "(axfreq3, 'Freq3', 10.0, 2000, valinit=freq3, valfmt='%1d')\n", (10705, 10764), False, 'from matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons\n'), ((10808, 10844), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.0225, 0.65, 0.01]'], {}), '([0.25, 0.0225, 0.65, 0.01])\n', (10816, 10844), True, 'import matplotlib.pyplot as plt\n'), ((10855, 10904), 'matplotlib.widgets.Slider', 'Slider', (['axampl3', '"""Ampl3"""', '(0.0)', '(2.0)'], {'valinit': 'ampl3'}), "(axampl3, 'Ampl3', 0.0, 2.0, valinit=ampl3)\n", (10861, 10904), False, 'from matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons\n'), ((10950, 10984), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.01, 0.65, 0.01]'], {}), '([0.25, 0.01, 0.65, 0.01])\n', (10958, 10984), True, 'import matplotlib.pyplot as plt\n'), ((10997, 11057), 'matplotlib.widgets.Slider', 'Slider', (['axphase3', '"""Phase3"""', '(-90)', '(90)'], {'valinit': '(0)', 'valfmt': '"""%1d"""'}), "(axphase3, 'Phase3', -90, 90, valinit=0, valfmt='%1d')\n", (11003, 11057), False, 'from matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons\n'), ((11100, 11136), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.1675, 0.65, 0.01]'], {}), '([0.25, 0.1675, 0.65, 0.01])\n', (11108, 11136), True, 'import matplotlib.pyplot as plt\n'), ((11146, 11192), 'matplotlib.widgets.Slider', 'Slider', (['axgain', '"""gain"""', '(0.0)', '(100)'], {'valinit': 'gain'}), "(axgain, 'gain', 0.0, 100, valinit=gain)\n", (11152, 11192), False, 'from matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons\n'), ((11233, 11268), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.155, 0.65, 0.01]'], {}), '([0.25, 0.155, 0.65, 0.01])\n', (11241, 11268), True, 'import matplotlib.pyplot as plt\n'), ((11279, 11325), 'matplotlib.widgets.Slider', 'Slider', (['axbias', '"""bias"""', '(0.0)', '(4.0)'], {'valinit': 'bias'}), "(axbias, 'bias', 0.0, 4.0, valinit=bias)\n", (11285, 11325), False, 'from matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons\n'), ((11368, 11404), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.1425, 0.65, 0.01]'], {}), '([0.25, 0.1425, 0.65, 0.01])\n', (11376, 11404), True, 'import matplotlib.pyplot as plt\n'), ((11416, 11465), 'matplotlib.widgets.Slider', 'Slider', (['axoffset', '"""offset"""', '(-5)', '(5)'], {'valinit': 'offset'}), "(axoffset, 'offset', -5, 5, valinit=offset)\n", (11422, 11465), False, 'from matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons\n'), ((11511, 11545), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.13, 0.65, 0.01]'], {}), '([0.25, 0.13, 0.65, 0.01])\n', (11519, 11545), True, 'import matplotlib.pyplot as plt\n'), ((11556, 11604), 'matplotlib.widgets.Slider', 'Slider', (['axlevel', '"""level"""', '(0.0)', '(10)'], {'valinit': 'level'}), "(axlevel, 'level', 0.0, 10, valinit=level)\n", (11562, 11604), False, 'from matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons\n'), ((13056, 13102), 'matplotlib.widgets.RadioButtons', 'RadioButtons', (['gainradiox', 'tempgtable'], {'active': '(0)'}), '(gainradiox, tempgtable, active=0)\n', (13068, 13102), False, 'from matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons\n'), ((13210, 13239), 'matplotlib.pyplot.get_current_fig_manager', 'plt.get_current_fig_manager', ([], {}), '()\n', (13237, 13239), True, 'import matplotlib.pyplot as plt\n'), ((13299, 13336), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'rect': '[0, 0.1, 1, 1]'}), '(rect=[0, 0.1, 1, 1])\n', (13315, 13336), True, 'import matplotlib.pyplot as plt\n'), ((13338, 13348), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13346, 13348), True, 'import matplotlib.pyplot as plt\n'), ((1268, 1298), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(0.1)', 'Fs'], {}), '(0.0, 0.1, Fs)\n', (1284, 1298), True, 'import numpy as np\n'), ((1457, 1485), 'numpy.arange', 'np.arange', (['(-1.0)', '(1.0)', '(2 * Ts)'], {}), '(-1.0, 1.0, 2 * Ts)\n', (1466, 1485), True, 'import numpy as np\n'), ((1519, 1557), 'numpy.sin', 'np.sin', (['(2 * np.pi * freq1 * t + phase1)'], {}), '(2 * np.pi * freq1 * t + phase1)\n', (1525, 1557), True, 'import numpy as np\n'), ((1563, 1601), 'numpy.sin', 'np.sin', (['(2 * np.pi * freq2 * t + phase2)'], {}), '(2 * np.pi * freq2 * t + phase2)\n', (1569, 1601), True, 'import numpy as np\n'), ((1607, 1645), 'numpy.sin', 'np.sin', (['(2 * np.pi * freq3 * t + phase3)'], {}), '(2 * np.pi * freq3 * t + phase3)\n', (1613, 1645), True, 'import numpy as np\n'), ((4605, 4625), 'numpy.arange', 'np.arange', (['(0)', '(25)', 'Ts'], {}), '(0, 25, Ts)\n', (4614, 4625), True, 'import numpy as np\n'), ((5154, 5171), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (5169, 5171), False, 'import pyaudio\n'), ((5433, 5449), 'numpy.fft.fft', 'np.fft.fft', (['vout'], {}), '(vout)\n', (5443, 5449), True, 'import numpy as np\n'), ((1913, 1980), 'numpy.clip', 'np.clip', (['(gain * (offset + level * vin) + noise)', '(-clipping)', 'clipping'], {}), '(gain * (offset + level * vin) + noise, -clipping, clipping)\n', (1920, 1980), True, 'import numpy as np\n'), ((2343, 2403), 'numpy.clip', 'np.clip', (['(gain * (bias * transferallvin))', '(-clipping)', 'clipping'], {}), '(gain * (bias * transferallvin), -clipping, clipping)\n', (2350, 2403), True, 'import numpy as np\n'), ((4662, 4705), 'numpy.sin', 'np.sin', (['(2 * np.pi * freq1 * play_t + phase1)'], {}), '(2 * np.pi * freq1 * play_t + phase1)\n', (4668, 4705), True, 'import numpy as np\n'), ((4720, 4763), 'numpy.sin', 'np.sin', (['(2 * np.pi * freq2 * play_t + phase2)'], {}), '(2 * np.pi * freq2 * play_t + phase2)\n', (4726, 4763), True, 'import numpy as np\n'), ((4778, 4821), 'numpy.sin', 'np.sin', (['(2 * np.pi * freq3 * play_t + phase3)'], {}), '(2 * np.pi * freq3 * play_t + phase3)\n', (4784, 4821), True, 'import numpy as np\n'), ((4994, 5058), 'numpy.clip', 'np.clip', (['(gain * (offset + level * play_vin))', '(-clipping)', 'clipping'], {}), '(gain * (offset + level * play_vin), -clipping, clipping)\n', (5001, 5058), True, 'import numpy as np\n'), ((5539, 5553), 'numpy.abs', 'np.abs', (['fftout'], {}), '(fftout)\n', (5545, 5553), True, 'import numpy as np\n'), ((6052, 6068), 'numpy.fft.fft', 'np.fft.fft', (['vout'], {}), '(vout)\n', (6062, 6068), True, 'import numpy as np\n'), ((8206, 8244), 'numpy.sin', 'np.sin', (['(2 * np.pi * freq1 * t + phase1)'], {}), '(2 * np.pi * freq1 * t + phase1)\n', (8212, 8244), True, 'import numpy as np\n'), ((8747, 8785), 'numpy.sin', 'np.sin', (['(2 * np.pi * freq2 * t + phase2)'], {}), '(2 * np.pi * freq2 * t + phase2)\n', (8753, 8785), True, 'import numpy as np\n'), ((9288, 9326), 'numpy.sin', 'np.sin', (['(2 * np.pi * freq3 * t + phase3)'], {}), '(2 * np.pi * freq3 * t + phase3)\n', (9294, 9326), True, 'import numpy as np\n'), ((2456, 2488), 'numpy.arctan', 'np.arctan', (['(bias * transferallvin)'], {}), '(bias * transferallvin)\n', (2465, 2488), True, 'import numpy as np\n'), ((5105, 5150), 'numpy.arctan', 'np.arctan', (['(bias * (offset + level * play_vin))'], {}), '(bias * (offset + level * play_vin))\n', (5114, 5150), True, 'import numpy as np\n'), ((6166, 6180), 'numpy.abs', 'np.abs', (['fftout'], {}), '(fftout)\n', (6172, 6180), True, 'import numpy as np\n'), ((7450, 7480), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(0.1)', 'Fs'], {}), '(0.0, 0.1, Fs)\n', (7466, 7480), True, 'import numpy as np\n'), ((2022, 2062), 'numpy.arctan', 'np.arctan', (['(bias * (offset + level * vin))'], {}), '(bias * (offset + level * vin))\n', (2031, 2062), True, 'import numpy as np\n'), ((5287, 5309), 'numpy.absolute', 'np.absolute', (['play_vout'], {}), '(play_vout)\n', (5298, 5309), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from glob import glob
N_BLOCKS = 4
# Calculating the reaction time means
rt_data = {block: [] for block in range(1, N_BLOCKS+1)} # Range values chosen because block numbers start at 1, range iterator at 0
rt_means = {}
files = glob('reaction_times_*.csv')
for file in files:
df = pd.read_csv(file)
# Extracting the relevant values (RTs) for each block of each file
for block in range(1, N_BLOCKS+1):
df_block = df.loc[df['Block'] == block]
rt_data[block].append(df_block['RT\ms'].mean())
for block, values in rt_data.items():
rt_means[block] = np.mean(values)
# Plotting the figure
conditions = ['Ipsolateral', 'Contralateral']
mean_reaction_times_right = [rt_means[3], rt_means[1]]
mean_reaction_times_left = [rt_means[2], rt_means[4]]
# Plotting factorial plot for the right hand data
plt.plot(conditions, mean_reaction_times_right)
plt.scatter(conditions, mean_reaction_times_right)
# Plotting factorial plot for the left hand data
plt.plot(conditions, mean_reaction_times_left)
plt.scatter(conditions, mean_reaction_times_left)
plt.ylabel('Reaction Times [ms]')
plt.xlabel('Conditions')
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"numpy.mean",
"glob.glob",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((301, 329), 'glob.glob', 'glob', (['"""reaction_times_*.csv"""'], {}), "('reaction_times_*.csv')\n", (305, 329), False, 'from glob import glob\n'), ((898, 945), 'matplotlib.pyplot.plot', 'plt.plot', (['conditions', 'mean_reaction_times_right'], {}), '(conditions, mean_reaction_times_right)\n', (906, 945), True, 'import matplotlib.pyplot as plt\n'), ((946, 996), 'matplotlib.pyplot.scatter', 'plt.scatter', (['conditions', 'mean_reaction_times_right'], {}), '(conditions, mean_reaction_times_right)\n', (957, 996), True, 'import matplotlib.pyplot as plt\n'), ((1047, 1093), 'matplotlib.pyplot.plot', 'plt.plot', (['conditions', 'mean_reaction_times_left'], {}), '(conditions, mean_reaction_times_left)\n', (1055, 1093), True, 'import matplotlib.pyplot as plt\n'), ((1094, 1143), 'matplotlib.pyplot.scatter', 'plt.scatter', (['conditions', 'mean_reaction_times_left'], {}), '(conditions, mean_reaction_times_left)\n', (1105, 1143), True, 'import matplotlib.pyplot as plt\n'), ((1145, 1178), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Reaction Times [ms]"""'], {}), "('Reaction Times [ms]')\n", (1155, 1178), True, 'import matplotlib.pyplot as plt\n'), ((1179, 1203), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Conditions"""'], {}), "('Conditions')\n", (1189, 1203), True, 'import matplotlib.pyplot as plt\n'), ((1205, 1215), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1213, 1215), True, 'import matplotlib.pyplot as plt\n'), ((358, 375), 'pandas.read_csv', 'pd.read_csv', (['file'], {}), '(file)\n', (369, 375), True, 'import pandas as pd\n'), ((652, 667), 'numpy.mean', 'np.mean', (['values'], {}), '(values)\n', (659, 667), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import networkx as nx
import json, pytz, os
from collections import Counter
from tqdm import tqdm
from .tennis_utils import *
from .handler_utils import *
TIMEZONE = {
"rg17": pytz.timezone('Europe/Paris'),
"uo17": pytz.timezone('America/New_York')
}
QUALIFIER_START = {
"rg17": 1495576800, # 2017-05-24 0:00 Paris (2017-05-22 and 2017-05-23 is missing from data)
"uo17": 1503374400 # 2017-08-22 0:00 New York
}
TOURNAMENT_START = {
"rg17": 1495922400, # 2017-05-28 0:00 Paris
"uo17": 1503892800 # 2017-08-28 0:00 New York
}
DATES_WITH_QUALIFIERS = {
"rg17": ["2017-05-%.2i" % i for i in range(24,32)] + ["2017-06-%.2i" % i for i in range(1,12)],
"uo17": ["2017-08-%.2i" % i for i in range(22,32)] + ["2017-09-%.2i" % i for i in range(1,11)]
}
DATES_WITHOUT_QUALIFIERS = {
"rg17": ["2017-05-%.2i" % i for i in range(28,32)] + ["2017-06-%.2i" % i for i in range(1,12)],
"uo17": ["2017-08-%.2i" % i for i in range(28,32)] + ["2017-09-%.2i" % i for i in range(1,11)]
}
DATES_WITH_NO_GAMES = {
"rg17": ["2017-05-27"],
"uo17": ["2017-08-26","2017-08-27"]
}
class TennisDataHandler():
def __init__(self, data_dir, data_id, include_qualifiers=True, verbose=False):
self.verbose = verbose
self.data_id = data_id
self.data_dir = data_dir + "/" + data_id
if not os.path.exists(self.data_dir):
bashCommand = """mkdir -p %s; cd %s; wget https://dms.sztaki.hu/~fberes/tennis/%s.zip; unzip %s.zip""" % (data_dir, data_dir, data_id, data_id)
print(bashCommand)
print("Downloading data from 'https://dms.sztaki.hu/~fberes/tennis' STARTED...")
os.system(bashCommand)
print("Data was DOWNLOADED!")
self.include_qualifiers = include_qualifiers
self._load_files(self.data_id, self.data_dir)
self._filter_data()
self._extract_mappings()
self.weighted_edges, self.weighted_edges_grouped, self.edges_grouped = prepare_edges(self.mentions, "date")
#self._prepare_edges()
self.daily_p_dict, self.daily_p_df = extract_daily_players(self.schedule, self.player_accounts)
def _load_files(self, data_id, data_dir):
mention_file_path = "%s/%s_mentions_with_names.csv" % (data_dir, data_id)
tennis_match_file_path = "%s/%s_schedule.csv" % (data_dir, data_id)
player_assigments_path = "%s/%s_player_accounts.json" % (data_dir, data_id)
mentions = pd.read_csv(mention_file_path, sep="|")
mentions = mentions[["epoch","src","trg","src_screen_str", "trg_screen_str"]].sort_values("epoch")
self.mentions = mentions
if self.verbose:
print("\n### Load Twitter mentions ###")
print(self.mentions.head(3))
sep = "|" if data_id == "rg17" else ";"
self.schedule = pd.read_csv(tennis_match_file_path, sep=sep)
if self.verbose:
print("\n### Load event schedule ###")
print(self.schedule.head(3))
with open(player_assigments_path) as f:
self.player_accounts = json.load(f)
if self.verbose:
print("\n### Load player accounts ###")
print("<NAME>al accounts:", self.player_accounts["<NAME>"])
if self.verbose:
print("Done")
def _filter_data(self):
if self.include_qualifiers:
self.start_time = QUALIFIER_START[self.data_id]
self.dates = DATES_WITH_QUALIFIERS[self.data_id]
else:
self.start_time = TOURNAMENT_START[self.data_id]
self.dates = DATES_WITHOUT_QUALIFIERS[self.data_id]
self.end_time = self.start_time + 86400 * len(self.dates)
self.dates_with_no_games = DATES_WITH_NO_GAMES[self.data_id]
if self.verbose:
print("\n### Filter data ###")
print("Start time:", self.start_time)
print("End time:", self.end_time)
print("Number of days:", len(self.dates))
print("Dates:", self.dates)
print("Dates with no games:", self.dates_with_no_games)
mentions = self.mentions
mentions = mentions[(mentions["epoch"] >= self.start_time) & (mentions["epoch"] <= self.end_time)]
mentions = mentions.assign(date=mentions["epoch"].apply(lambda x: epoch2date(x, TIMEZONE[self.data_id])))
self.number_of_edges = len(mentions)
self.number_of_nodes = len(set(mentions["src"]).union(set(mentions["trg"])))
self.mentions = mentions
if self.verbose:
print("Number of mentions (edges):", self.number_of_edges)
print("Number of accounts (nodes):", self.number_of_nodes)
#print("Min epoch:", mentions["epoch"].min(), "Max epoch:", mentions["epoch"].max())
def _extract_mappings(self):
# account to id
mentions = self.mentions
targets = list(zip(mentions["trg_screen_str"], mentions["trg"]))
sources = list(zip(mentions["src_screen_str"], mentions["src"]))
self.account_to_id = dict(sources+targets)
#print(len(self.account_to_id))
#self.id_to_account = dict(zip(self.account_to_id.values(), self.account_to_id.keys()))
rev_targets = list(zip(mentions["trg"],mentions["trg_screen_str"]))
rev_sources = list(zip(mentions["src"],mentions["src_screen_str"]))
self.id_to_account = dict(rev_sources+rev_targets)
nodes = list(self.account_to_id.values())
# tennis account to player
tennis_account_to_player = {}
alternative_players = {}
alternative_players["uo17"] = {
"<NAME>":"<NAME>",
"<NAME>":"Co<NAME>",
"<NAME>":"<NAME>",
"<NAME>":"<NAME>",
"<NAME>":"<NAME>",
"<NAME>":"<NAME>",
"<NAME>":"<NAME>"
}
# reverse alternative name mapping for rg17
alternative_players["rg17"] = dict(zip(alternative_players["uo17"].values(),alternative_players["uo17"].keys()))
for p, account_names in self.player_accounts.items():
cleaned_p = alternative_players[self.data_id].get(p, p)
for a_name in account_names:
tennis_account_to_player[a_name] = cleaned_p
self.tennis_account_to_player = tennis_account_to_player
def summary(self):
"""Show the data summary"""
return {
"data_id":self.data_id,
"include_qualifiers": self.include_qualifiers,
"dates": self.dates,
"dates_with_no_game": self.dates_with_no_games,
"start_time": self.start_time,
"end_time": self.end_time,
"number_of_edges": self.number_of_edges,
"number_of_nodes": self.number_of_nodes
}
def visualize(self, kind="graph", figsize=(12,8)):
"""Visualize the data. Choose from 'graph' and 'players' options for the 'kind' argument."""
fig = None
if kind == "graph":
fig = visu_graph(self, figsize)
elif kind == "players":
fig = visu_players(self, figsize)
else:
raise RuntimeError("Choose 'kind' parameter from 'players' or 'graph'!")
return fig
def get_daily_players(self, date_id):
"""Get daily tennis players"""
if not date_id in self.dates:
raise RuntimeError("Invalid date_id! Not present in collected dates:", self.dates)
elif date_id in self.dates_with_no_games:
raise RuntimeError("There was no game on this day!")
else:
return self.daily_p_dict[date_id]
def show_daily_players(self):
"""Show daily information about tennis players"""
return self.daily_p_df[self.daily_p_df["date"].isin(self.dates)]
def get_daily_relevance_labels(self, binary=True):
if binary:
label_value_dict = {"current":1.0, "previous":0.0, "next":0.0}
else:
label_value_dict = {"current":2.0, "previous":1.0, "next":1.0}
daily_found_player_dict = dict(zip(self.daily_p_df["date"], self.daily_p_df["found_players"]))
for d in self.dates_with_no_games:
daily_found_player_dict[d] = []
mapper_dicts = (self.tennis_account_to_player, self.account_to_id, daily_found_player_dict)
daily_label_dicts = get_daily_label_dicts(label_value_dict, self.dates, self.mentions, mapper_dicts, self.verbose)
return daily_label_dicts
def export_relevance_labels(self, output_dir, binary=True, only_pos_label=False):
"""Export label files for each date. Use 'only_pos_label=True' if you want to export only the relevant nodes per day."""
daily_label_dicts = self.get_daily_relevance_labels(binary)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print("%s folder was created." % output_dir)
with open("%s/summary.json" % output_dir, 'w') as f:
json.dump(self.summary(), f, indent=" ", sort_keys=False)
#pd.DataFrame(list(self.account_to_id.items())).sort_values(0).to_csv("%s/account_to_id.csv" % output_dir, index=False)
#pd.DataFrame(list(self.tennis_account_to_player.items())).sort_values(0).to_csv("%s/tennis_account_to_player.csv" % output_dir, index=False)
print("Exporting files STARTED")
for i, date in enumerate(self.dates):
sorted_user_labels = []
for u in sorted(daily_label_dicts[date].keys()):
label_value = daily_label_dicts[date][u]
if only_pos_label:
# export only positive user labels
if label_value > 0.0:
sorted_user_labels.append((u, label_value))
else:
sorted_user_labels.append((u, label_value))
print(date, len(sorted_user_labels))
scores2file(sorted_user_labels,"%s/labels_%i.csv" % (output_dir, i))
print("Exporting files DONE")
def export_edges(self, output_dir, sep="|"):
"""Export edges (mentions) into file. Only time and node identifiers will be expoerted!"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print("%s folder was created." % output_dir)
with open("%s/summary.json" % output_dir, 'w') as f:
json.dump(self.summary(), f, indent=" ", sort_keys=False)
self.mentions[["epoch","src","trg"]].to_csv("%s/edges.csv" % output_dir, index=False, header=False, sep=sep)
def get_account_recoder(self, k=None, src_col="src_screen_str", trg_col="trg_screen_str", exclude_final_day=True):
mentions = self.mentions.copy()
enabled_dates = self.dates.copy()
if exclude_final_day:
enabled_dates = enabled_dates[:-1]
mentions = mentions[mentions["date"].isin(enabled_dates)]
mention_activity = list(mentions[src_col]) + list(mentions[trg_col])
cnt = Counter(mention_activity)
if k == None:
accounts, counts = zip(*cnt.most_common())
else:
accounts, counts = zip(*cnt.most_common(k))
node_mapping = dict(zip(accounts,range(len(accounts))))
return node_mapping
def _get_snapshot_edges(self, snapshot_id, grouped_data, edge_type="temporal", account_to_index=None):
edges_grouped, weighted_edges_grouped = grouped_data
snap_edges = []
if edge_type == "temporal":
df = edges_grouped[snapshot_id]
src, trg = reindex_edges(df, self.id_to_account, account_to_index)
weights = list(np.ones(len(df)))
else:
df = weighted_edges_grouped[snapshot_id]
src, trg = reindex_edges(df, self.id_to_account, account_to_index)
if edge_type == "weighted":
weights = list(df["weight"])
else:
weights = list(np.ones(len(df)))
snap_edges = list(zip(src, trg))
weights = weights[:len(snap_edges)]
G = nx.Graph()
G.add_edges_from(snap_edges)
if account_to_index == None:
X = calculate_node_features(G, None)
else:
X = calculate_node_features(G, len(account_to_index))
return snap_edges, weights, X
def extract_snapshots(self, delta_t):
start_epoch = self.start_time
days = len(self.dates)
to_epoch = start_epoch+days*86400+delta_t
splits=list(range(start_epoch,to_epoch,delta_t))
mentions = self.mentions.copy()
mentions = mentions[mentions["date"].isin(self.dates)]
epochs = np.array(mentions["epoch"])
snapshots_ids = pd.cut(epochs, splits, right=False, labels=range(len(splits)-1))
mentions["snapshot_id"] = snapshots_ids
return mentions
def get_data(self, binary_label=True, edge_type="weighted", max_snapshot_idx=None, top_k_nodes=None):
snapshots = self.dates
labels = self.get_daily_relevance_labels(binary=binary_label)
grouped_data = (self.edges_grouped, self.weighted_edges_grouped)
return self._prepare_json_data(snapshots, self.mentions, grouped_data, labels, edge_type, max_snapshot_idx, top_k_nodes)
def get_regression_data(self, delta_t=3*3600, edge_type="weighted", max_snapshot_idx=None, top_k_nodes=None):
mentions = self.extract_snapshots(delta_t)
snapshots = sorted(list(mentions["snapshot_id"].unique()))
labels = regression_labels(mentions, "snapshot_id")
weighted_edges, weighted_edges_grouped, edges_grouped = prepare_edges(mentions, "snapshot_id")
grouped_data = (edges_grouped, weighted_edges_grouped)
return self._prepare_json_data(snapshots, mentions, grouped_data, labels, edge_type, max_snapshot_idx, top_k_nodes)
def _prepare_json_data(self, snapshots, mentions, grouped_data, labels, edge_type, max_snapshot_idx, top_k_nodes):
snaps = snapshots.copy()
account_to_index = self.get_account_recoder(k=top_k_nodes)
data = {}
idx = 0
if max_snapshot_idx != None:
snaps = snaps[:max_snapshot_idx]
for idx, snapshot_id in tqdm(enumerate(snaps)):
edges, weights, X = self._get_snapshot_edges(snapshot_id, grouped_data, edge_type, account_to_index)
X = list([X[node] for node in range(len(account_to_index))])
X = X[:len(account_to_index)]
y = reindex_labels(labels[snapshot_id], self.id_to_account, account_to_index)
y = list([y.get(node,0) for node in range(len(account_to_index))])
y = y[:len(account_to_index)]
data[str(idx)] = {
"index":idx,
#"date":date,
"edges": edges,
"weights": weights,
"y": y,
"X": X,
}
#if self.include_qualifiers:
# data[str(idx)]["game_day"] = not date in self.dates_with_no_games
idx += 1
data["time_periods"] = len(data)
data["node_ids"] = account_to_index
return data
def to_json(self, path, task="classification", delta_t=3*3600, edge_type="weighted", max_snapshot_idx=None, top_k_nodes=None):
if task == "classification":
print("Preparing classification data...")
data = self.get_data(True, edge_type, max_snapshot_idx, top_k_nodes)
else:
print("Preparing regression data...")
data = self.get_regression_data(delta_t, edge_type, max_snapshot_idx, top_k_nodes)
with open(path, 'w') as f:
json.dump(data, f)
print("done")
| [
"json.dump",
"json.load",
"os.makedirs",
"pandas.read_csv",
"os.path.exists",
"os.system",
"networkx.Graph",
"pytz.timezone",
"numpy.array",
"collections.Counter"
] | [((220, 249), 'pytz.timezone', 'pytz.timezone', (['"""Europe/Paris"""'], {}), "('Europe/Paris')\n", (233, 249), False, 'import json, pytz, os\n'), ((263, 296), 'pytz.timezone', 'pytz.timezone', (['"""America/New_York"""'], {}), "('America/New_York')\n", (276, 296), False, 'import json, pytz, os\n'), ((2510, 2549), 'pandas.read_csv', 'pd.read_csv', (['mention_file_path'], {'sep': '"""|"""'}), "(mention_file_path, sep='|')\n", (2521, 2549), True, 'import pandas as pd\n'), ((2881, 2925), 'pandas.read_csv', 'pd.read_csv', (['tennis_match_file_path'], {'sep': 'sep'}), '(tennis_match_file_path, sep=sep)\n', (2892, 2925), True, 'import pandas as pd\n'), ((11043, 11068), 'collections.Counter', 'Counter', (['mention_activity'], {}), '(mention_activity)\n', (11050, 11068), False, 'from collections import Counter\n'), ((12104, 12114), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (12112, 12114), True, 'import networkx as nx\n'), ((12699, 12726), 'numpy.array', 'np.array', (["mentions['epoch']"], {}), "(mentions['epoch'])\n", (12707, 12726), True, 'import numpy as np\n'), ((1387, 1416), 'os.path.exists', 'os.path.exists', (['self.data_dir'], {}), '(self.data_dir)\n', (1401, 1416), False, 'import json, pytz, os\n'), ((1710, 1732), 'os.system', 'os.system', (['bashCommand'], {}), '(bashCommand)\n', (1719, 1732), False, 'import json, pytz, os\n'), ((3126, 3138), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3135, 3138), False, 'import json, pytz, os\n'), ((8829, 8855), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (8843, 8855), False, 'import json, pytz, os\n'), ((8869, 8892), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (8880, 8892), False, 'import json, pytz, os\n'), ((10228, 10254), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (10242, 10254), False, 'import json, pytz, os\n'), ((10268, 10291), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (10279, 10291), False, 'import json, pytz, os\n'), ((15712, 15730), 'json.dump', 'json.dump', (['data', 'f'], {}), '(data, f)\n', (15721, 15730), False, 'import json, pytz, os\n')] |
import os
from itertools import product
import rasterio as rio
from rasterio import windows
import skimage
import skimage.io
import numpy as np
from rasterio.plot import reshape_as_image
import rasterio.mask
from rasterio.features import rasterize
import pandas as pd
import geopandas as gpd
from shapely.geometry import mapping, Point, Polygon
from shapely.ops import cascaded_union
######################## load image tif ###############################
from typing import Any
def open_tif_image(input_path):
# type: (function) -> np.array
"""Function to open tif images.
Parameters:
input_path (string) = path where the image file is located;
return
np.array of the tif image"""
# get the image_path.
image_path = input_path
# read image
im = skimage.io.imread(image_path, plugin="tifffile")
return im
def get_list_file_names(folder_path):
""" Function to get the name of all files in folder.
Parameters:
folder_path (string) = path to the folder where all the .tif image files are located
return
(list) with all the files within the folder_path """
image_names = next(os.walk(folder_path))[2]
if ".DS_Store" in image_names:
image_names.remove(".DS_Store")
print("Number of images: {}".format(len(image_names)))
return image_names
def get_list_of_images_and_masks_with_same_dimension(image_names, images_path, masks_path, size):
"""Function to get the arrays of the .tif images and masks from a folder
Parameters:
image_names(list) = list of file names.
images_path(string) = path to the folder where .tif image files are located.
masks_path(string) = path to the folder where all the .tif mask files are located.
size (int) = size of the height and width of the dimensions of the image.
return
(array) of images, masks
"""
images = []
masks = []
i = 0
for image in sorted(image_names):
current_image = open_tif_image(images_path + image) # type: np.array
current_mask = open_tif_image(masks_path + image) # type: np.array
i+= 1
if current_image.shape[0] == size and current_image.shape[1] == size and current_mask.shape[0] == size and current_mask.shape[1] == size:
images.append(current_image)
masks.append(current_mask)
print("Images shape: {}, Mask shape: {}".format(len(images), len(masks)))
image = np.array(images, dtype="uint32")
mask = np.expand_dims(np.array(masks, dtype="uint32"), axis=3)
print("Images shape: {}, Mask shape: {}".format(image.shape, mask.shape))
return image, mask
def save_array(image_array, output_file_name, outputfolder):
"""Function to save a numpy array to a specific folder and name.
Parameters:
image_array = np.array file.
output_file_name = Name of the file that will be saved.
outputfolder - path to the folder where the data will be saved.
"""
np.save(outputfolder + output_file_name , image_array)
print("Image saved on {}".format(outputfolder))
######################## Pre Processing ###############################
def patch_images(input_path, output_path, size):
"""Function to patch the images in an specific size to a folder.
Parameters:
input_path(string) = path where the image file is located
output_path(string) = path where the image tiles is located
size (int) = crop size(width and height size will be the same during the crop)
"""
size = size
i = 0
in_path = input_path
out_path = output_path
output_filename = 'tile_{}.tif'
def get_tiles(ds, width=size, height=size):
nols, nrows = ds.meta['width'], ds.meta['height']
offsets = product(range(0, nols, width), range(0, nrows, height))
big_window = windows.Window(col_off=0, row_off=0, width=nols, height=nrows)
for col_off, row_off in offsets:
window =windows.Window(col_off=col_off, row_off=row_off, width=width, height=height).intersection(big_window)
transform = windows.transform(window, ds.transform)
yield window, transform
with rio.open(input_path) as inds:
tile_width, tile_height = size, size
meta = inds.meta.copy()
for window, transform in get_tiles(inds):
print(window)
meta['transform'] = transform
meta['width'], meta['height'] = window.width, window.height
i += 1
outpath = os.path.join(out_path,output_filename.format(i))
with rio.open(outpath, 'w', **meta) as outds:
outds.write(inds.read(window=window))
def get_nonzero_files(images_array,masks_array):
"""
Function to evaluate all mask arrays and return just the files that have non zero masks.
Parameters:
images_array = array of images.
mask_array = array of masks.
:return images, masks
"""
# 5. Delete all zeros
# Delete files with just zeros in the mask
all_zeros = []
for i in range(masks_array.shape[0]):
if masks_array[i].max() == 0:
all_zeros.append(i)
print("There are: {} arrays with just 0 values, and {} arrays with non zero values ".format(len(all_zeros), (
images_array.shape[0] - len(all_zeros))))
images = []
masks = []
for i in range(images_array.shape[0]):
if i not in all_zeros:
images.append(images_array[i])
masks.append(masks_array[i])
# Convert to array
images = np.array(images, dtype="float32")
masks = np.array(masks, dtype="float32")
print("Image shape: {}, Mask shape: {}".format(images.shape, masks.shape))
return images, masks
def generate_mask(raster_path, shape_path, output_path, file_name):
""" Function to generate a mask from polygons with the same dimensions of an image.
raster_path = path to .tif image;
shape_path = path to shapefile or geojson.
output_path = path to save the binary mask.
file_name = name of the saved file.
"""
# Carregar o Raster
with rio.open(raster_path, "r") as src:
raster_img = src.read()
raster_meta = src.meta
# Carregar o shapefile ou GeoJson
train_df = gpd.read_file(shape_path)
# Verificar se o CRS é o mesmo
if train_df.crs != src.crs:
print(
" Raster CRS : {} Vetor CRS : {}.\n Convert to the same CRS!".format(
src.crs, train_df.crs))
# Função para gerar a máscara
def poly_from_utm(polygon, transform):
poly_pts = []
poly = cascaded_union(polygon)
for i in np.array(poly.exterior.coords):
poly_pts.append(~transform * tuple(i))
new_poly = Polygon(poly_pts)
return new_poly
poly_shp = []
im_size = (src.meta['height'], src.meta['width'])
for num, row in train_df.iterrows():
if row['geometry'].geom_type == 'Polygon':
poly = poly_from_utm(row['geometry'], src.meta['transform'])
poly_shp.append(poly)
else:
for p in row['geometry']:
poly = poly_from_utm(p, src.meta['transform'])
poly_shp.append(poly)
mask = rasterize(shapes=poly_shp,
out_shape=im_size)
# Salvar
mask = mask.astype("uint16")
bin_mask_meta = src.meta.copy()
bin_mask_meta.update({'count': 1})
os.chdir(output_path)
with rio.open(file_name, 'w', **bin_mask_meta) as dst:
dst.write(mask * 255, 1)
| [
"rasterio.open",
"numpy.save",
"shapely.ops.cascaded_union",
"shapely.geometry.Polygon",
"rasterio.windows.Window",
"os.walk",
"numpy.array",
"rasterio.windows.transform",
"rasterio.features.rasterize",
"os.chdir",
"skimage.io.imread",
"geopandas.read_file"
] | [((822, 870), 'skimage.io.imread', 'skimage.io.imread', (['image_path'], {'plugin': '"""tifffile"""'}), "(image_path, plugin='tifffile')\n", (839, 870), False, 'import skimage\n'), ((2530, 2562), 'numpy.array', 'np.array', (['images'], {'dtype': '"""uint32"""'}), "(images, dtype='uint32')\n", (2538, 2562), True, 'import numpy as np\n'), ((3059, 3112), 'numpy.save', 'np.save', (['(outputfolder + output_file_name)', 'image_array'], {}), '(outputfolder + output_file_name, image_array)\n', (3066, 3112), True, 'import numpy as np\n'), ((5668, 5701), 'numpy.array', 'np.array', (['images'], {'dtype': '"""float32"""'}), "(images, dtype='float32')\n", (5676, 5701), True, 'import numpy as np\n'), ((5714, 5746), 'numpy.array', 'np.array', (['masks'], {'dtype': '"""float32"""'}), "(masks, dtype='float32')\n", (5722, 5746), True, 'import numpy as np\n'), ((6383, 6408), 'geopandas.read_file', 'gpd.read_file', (['shape_path'], {}), '(shape_path)\n', (6396, 6408), True, 'import geopandas as gpd\n'), ((7354, 7399), 'rasterio.features.rasterize', 'rasterize', ([], {'shapes': 'poly_shp', 'out_shape': 'im_size'}), '(shapes=poly_shp, out_shape=im_size)\n', (7363, 7399), False, 'from rasterio.features import rasterize\n'), ((7548, 7569), 'os.chdir', 'os.chdir', (['output_path'], {}), '(output_path)\n', (7556, 7569), False, 'import os\n'), ((2589, 2620), 'numpy.array', 'np.array', (['masks'], {'dtype': '"""uint32"""'}), "(masks, dtype='uint32')\n", (2597, 2620), True, 'import numpy as np\n'), ((3947, 4009), 'rasterio.windows.Window', 'windows.Window', ([], {'col_off': '(0)', 'row_off': '(0)', 'width': 'nols', 'height': 'nrows'}), '(col_off=0, row_off=0, width=nols, height=nrows)\n', (3961, 4009), False, 'from rasterio import windows\n'), ((4285, 4305), 'rasterio.open', 'rio.open', (['input_path'], {}), '(input_path)\n', (4293, 4305), True, 'import rasterio as rio\n'), ((6231, 6257), 'rasterio.open', 'rio.open', (['raster_path', '"""r"""'], {}), "(raster_path, 'r')\n", (6239, 6257), True, 'import rasterio as rio\n'), ((6731, 6754), 'shapely.ops.cascaded_union', 'cascaded_union', (['polygon'], {}), '(polygon)\n', (6745, 6754), False, 'from shapely.ops import cascaded_union\n'), ((6772, 6802), 'numpy.array', 'np.array', (['poly.exterior.coords'], {}), '(poly.exterior.coords)\n', (6780, 6802), True, 'import numpy as np\n'), ((6875, 6892), 'shapely.geometry.Polygon', 'Polygon', (['poly_pts'], {}), '(poly_pts)\n', (6882, 6892), False, 'from shapely.geometry import mapping, Point, Polygon\n'), ((7579, 7620), 'rasterio.open', 'rio.open', (['file_name', '"""w"""'], {}), "(file_name, 'w', **bin_mask_meta)\n", (7587, 7620), True, 'import rasterio as rio\n'), ((1205, 1225), 'os.walk', 'os.walk', (['folder_path'], {}), '(folder_path)\n', (1212, 1225), False, 'import os\n'), ((4198, 4237), 'rasterio.windows.transform', 'windows.transform', (['window', 'ds.transform'], {}), '(window, ds.transform)\n', (4215, 4237), False, 'from rasterio import windows\n'), ((4691, 4721), 'rasterio.open', 'rio.open', (['outpath', '"""w"""'], {}), "(outpath, 'w', **meta)\n", (4699, 4721), True, 'import rasterio as rio\n'), ((4072, 4148), 'rasterio.windows.Window', 'windows.Window', ([], {'col_off': 'col_off', 'row_off': 'row_off', 'width': 'width', 'height': 'height'}), '(col_off=col_off, row_off=row_off, width=width, height=height)\n', (4086, 4148), False, 'from rasterio import windows\n')] |
import numpy as np
import pytest
import rnn
def test_sigmoid():
assert rnn.sigmoid(0) == 0.5
assert rnn.sigmoid(100) == 1.0
assert rnn.sigmoid(-100) < 0.001
assert rnn.sigmoid(-100) >= 0
def test_logistic_loss():
assert np.allclose(rnn.logistic_loss(1.0, 0.9999999999), [0])
assert np.allclose(rnn.logistic_loss(0.0, 0.0000000001), [0])
assert rnn.logistic_loss(1.0, 0.01) > 1
assert rnn.logistic_loss(0.0, 0.99) > 1
def test_getitem_and_setitem_work():
nn = rnn.RNN(n_a=4, n_x=1)
for i in range(len(nn)):
nn[i] = float(i)
for i in range(len(nn)):
assert nn[i] == float(i)
def test_getitem_raises_index_error():
with pytest.raises(IndexError):
rnn.RNN(n_a=1, n_x=1)[50]
def test_forward_prop_works():
nn = rnn.RNN(4, 1)
x = np.array([[0.3]])
y, a = nn.forward_prop(x)
assert y.shape == (1, 1)
assert a.shape == (4, 1)
| [
"rnn.logistic_loss",
"rnn.RNN",
"pytest.raises",
"numpy.array",
"rnn.sigmoid"
] | [((502, 523), 'rnn.RNN', 'rnn.RNN', ([], {'n_a': '(4)', 'n_x': '(1)'}), '(n_a=4, n_x=1)\n', (509, 523), False, 'import rnn\n'), ((798, 811), 'rnn.RNN', 'rnn.RNN', (['(4)', '(1)'], {}), '(4, 1)\n', (805, 811), False, 'import rnn\n'), ((820, 837), 'numpy.array', 'np.array', (['[[0.3]]'], {}), '([[0.3]])\n', (828, 837), True, 'import numpy as np\n'), ((78, 92), 'rnn.sigmoid', 'rnn.sigmoid', (['(0)'], {}), '(0)\n', (89, 92), False, 'import rnn\n'), ((111, 127), 'rnn.sigmoid', 'rnn.sigmoid', (['(100)'], {}), '(100)\n', (122, 127), False, 'import rnn\n'), ((146, 163), 'rnn.sigmoid', 'rnn.sigmoid', (['(-100)'], {}), '(-100)\n', (157, 163), False, 'import rnn\n'), ((183, 200), 'rnn.sigmoid', 'rnn.sigmoid', (['(-100)'], {}), '(-100)\n', (194, 200), False, 'import rnn\n'), ((257, 293), 'rnn.logistic_loss', 'rnn.logistic_loss', (['(1.0)', '(0.9999999999)'], {}), '(1.0, 0.9999999999)\n', (274, 293), False, 'import rnn\n'), ((323, 352), 'rnn.logistic_loss', 'rnn.logistic_loss', (['(0.0)', '(1e-10)'], {}), '(0.0, 1e-10)\n', (340, 352), False, 'import rnn\n'), ((377, 405), 'rnn.logistic_loss', 'rnn.logistic_loss', (['(1.0)', '(0.01)'], {}), '(1.0, 0.01)\n', (394, 405), False, 'import rnn\n'), ((421, 449), 'rnn.logistic_loss', 'rnn.logistic_loss', (['(0.0)', '(0.99)'], {}), '(0.0, 0.99)\n', (438, 449), False, 'import rnn\n'), ((695, 720), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (708, 720), False, 'import pytest\n'), ((730, 751), 'rnn.RNN', 'rnn.RNN', ([], {'n_a': '(1)', 'n_x': '(1)'}), '(n_a=1, n_x=1)\n', (737, 751), False, 'import rnn\n')] |
from numpy import ones, zeros, sort, argsort
def HFPquantile(x,conf,p=None):
# This function computes the quantile of a Flexible probabilities
# distribution
# INPUTS
# x :[vector](1 x t_end) scenarios
# conf :[vector](1 x n_ql) confidence levels
# p (optional):[vector](1 x t_end) Flexible Probabilities
# OPS
# q_HFP :[vector](1 x n_ql) quantiles
## Code
n_ql = conf.shape[1]
# if the third argument is missing, the Flexible Probabilities are set to be uniform
if p is None:
p = (1/x.shape[1])*ones((1,x.shape[1]))
x_sort, y = sort(x), argsort(x)
p_sort = p[0,y]
q_HFP = zeros((1,n_ql))
cum = 0
j = 0
t = 0
while j < n_ql:
while cum < conf[0,j] and t < x.shape[1]:
cum = cum + p_sort[0,t]
t = t+1
if t == 0:
q_HFP[0,j] = x_sort[0,0]
else:
q_HFP[0,j] = x_sort[0,t-1]
j = j+1
return q_HFP | [
"numpy.argsort",
"numpy.sort",
"numpy.zeros",
"numpy.ones"
] | [((681, 697), 'numpy.zeros', 'zeros', (['(1, n_ql)'], {}), '((1, n_ql))\n', (686, 697), False, 'from numpy import ones, zeros, sort, argsort\n'), ((627, 634), 'numpy.sort', 'sort', (['x'], {}), '(x)\n', (631, 634), False, 'from numpy import ones, zeros, sort, argsort\n'), ((636, 646), 'numpy.argsort', 'argsort', (['x'], {}), '(x)\n', (643, 646), False, 'from numpy import ones, zeros, sort, argsort\n'), ((589, 610), 'numpy.ones', 'ones', (['(1, x.shape[1])'], {}), '((1, x.shape[1]))\n', (593, 610), False, 'from numpy import ones, zeros, sort, argsort\n')] |
import numpy as np
import pandas as pd
from enum import Enum
from syntactical_analysis.sa_utils import *
__all__ = [
'Parser',
]
class Parser:
def __init__(self):
self.predictive_parser_table_data = [
[0, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27],
[1, 1, 1, 1, 27, 27, 27, 27, 27, 27, 1, 27, 2, 27, 27, 27, 27, 27],
[3, 3, 3, 3, 27, 27, 27, 27, 27, 27, 3, 27, 27, 27, 27, 27, 27, 27],
[27, 27, 27, 27, 27, 4, 5, 27, 27, 27, 27, 26, 27, 27, 27, 27, 27, 26],
[6, 6, 6, 6, 27, 27, 27, 27, 27, 27, 6, 27, 27, 27, 27, 27, 27, 27],
[27, 27, 27, 27, 27, 26, 26, 7, 8, 9, 27, 26, 27, 27, 27, 27, 27, 26],
[11, 12, 13, 14, 27, 27, 27, 27, 27, 27, 10, 27, 27, 27, 27, 27, 27, 27],
[27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 15, 27, 27, 27, 27, 27],
[27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 16, 17, 27, 26],
[27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 18, 27, 27, 27, 27, 27],
[20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 19, 27, 27, 27, 27, 27],
[21, 22, 23, 24, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27],
[27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 26, 27, 27, 25, 26],
]
self.predictive_parser_table = [[None for _ in range(len(self.predictive_parser_table_data[0]))]
for _ in range(len(self.predictive_parser_table_data))]
for i in range(len(self.predictive_parser_table_data)):
for j in range(len(self.predictive_parser_table_data[0])):
self.predictive_parser_table[i][j] = ProdRules(self.predictive_parser_table_data[i][j], None)
self.predictive_parser_table = np.array(self.predictive_parser_table)
self.list_terminals = [i for i in Terminals if i.name not in ["INVALID", "EPSILON"]]
# stack keeps track of what is remaining to be checked
self.stack = []
@staticmethod
def change_terminal(lexeme):
if lexeme == "i":
return Terminals.IDENTIFIER
elif lexeme == "n":
return Terminals.NUMBER
elif lexeme == "r":
return Terminals.REAL
elif lexeme == "g":
return Terminals.GREEK
elif lexeme == "=":
return Terminals.EQUAL
elif lexeme == "+":
return Terminals.PLUS
elif lexeme == "-":
return Terminals.MINUS
elif lexeme in ["*", r"\times", r"\ast"]:
return Terminals.MULTIPLY
elif lexeme in [r"/", r"\div"]:
return Terminals.DIVISION
elif lexeme == "%":
return Terminals.MOD
elif lexeme in ["(", r"\("]:
return Terminals.LEFT_ROUNDB
elif lexeme in [")", r"\)"]:
return Terminals.RIGHT_ROUNDB
elif lexeme in ["{", r"\{"]:
return Terminals.LEFT_CURLYB
elif lexeme in ["}", r"\}"]:
return Terminals.RIGHT_CURLYB
elif lexeme in [r"\cup"]:
return Terminals.UNION
elif lexeme in [r"\cap"]:
return Terminals.INTERSECTION
elif lexeme in [',', r'\comma']:
return Terminals.COMMA
elif lexeme == "$":
return Terminals.DOLLAR
else:
return Terminals.NONE
@staticmethod
def change_input(token, lexeme):
if token == "IDENTIFIER":
return Terminals.IDENTIFIER
elif token == "INTEGER":
return Terminals.NUMBER
elif token == "REAL":
return Terminals.REAL
elif token == "GREEK":
return Terminals.GREEK
elif token == "KEYWORD":
if lexeme in ["and", r"\&"]:
return Terminals.AND
elif lexeme in ["or", r"\|"]:
return Terminals.OR
else:
return Terminals.NONE
elif token == "OPERATOR":
if lexeme == "+":
return Terminals.PLUS
elif lexeme == "-":
return Terminals.MINUS
elif lexeme in ["*", r"\times", r"\ast"]:
return Terminals.MULTIPLY
elif lexeme in [r"/", r"\div"]:
return Terminals.DIVISION
elif lexeme in ["%", r"\%"]:
return Terminals.MOD
elif lexeme in [r"\cup"]:
return Terminals.UNION
elif lexeme in [r"\cap"]:
return Terminals.INTERSECTION
elif lexeme == "$":
return Terminals.DOLLAR
elif lexeme == "=":
return Terminals.EQUAL
else:
return Terminals.NONE
elif token == "SEPARATOR":
if lexeme in ["(", r"\("]:
return Terminals.LEFT_ROUNDB
elif lexeme in [")", r"\)"]:
return Terminals.RIGHT_ROUNDB
elif lexeme in ["{", r"\{"]:
return Terminals.LEFT_CURLYB
elif lexeme in ["}", r"\}"]:
return Terminals.RIGHT_CURLYB
elif lexeme in [",", r"\comma"]:
return Terminals.COMMA
elif lexeme == "$":
return Terminals.DOLLAR
elif lexeme == ";":
return Terminals.DOLLAR
else:
return Terminals.NONE
else:
return Terminals.NONE
# helper fn to push contents to stack
def pop(self):
temp = self.stack[-1]
del self.stack[-1]
return temp
# Helper function used to push contents to the stack
def push(self, t):
self.stack.append(t)
# Helper function used to push the contents to the stack according to the Production Rules
def push_rules(self, rule):
if rule == ProdRules(0):
self.push(NonTerminals.V)
self.push(Terminals.EQUAL)
self.push(Terminals.IDENTIFIER)
elif rule == ProdRules(1):
self.push(NonTerminals.E)
elif rule == ProdRules(2):
self.push(NonTerminals.D)
elif rule == ProdRules(3):
self.push(NonTerminals.Q)
self.push(NonTerminals.T)
elif rule == ProdRules(4):
self.push(NonTerminals.Q)
self.push(NonTerminals.T)
self.push(Terminals.PLUS)
elif rule == ProdRules(5):
self.push(NonTerminals.Q)
self.push(NonTerminals.T)
self.push(Terminals.MINUS)
elif rule == ProdRules(6):
self.push(NonTerminals.R)
self.push(NonTerminals.F)
elif rule == ProdRules(7):
self.push(NonTerminals.R)
self.push(NonTerminals.F)
self.push(Terminals.MULTIPLY)
elif rule == ProdRules(8):
self.push(NonTerminals.R)
self.push(NonTerminals.F)
self.push(Terminals.DIVISION)
elif rule == ProdRules(9):
self.push(NonTerminals.R)
self.push(NonTerminals.F)
self.push(Terminals.MOD)
elif rule == ProdRules(10):
self.push(Terminals.RIGHT_ROUNDB)
self.push(NonTerminals.E)
self.push(Terminals.LEFT_ROUNDB)
elif rule == ProdRules(11):
self.push(Terminals.IDENTIFIER)
elif rule == ProdRules(12):
self.push(Terminals.NUMBER)
elif rule == ProdRules(13):
self.push(Terminals.REAL)
elif rule == ProdRules(14):
self.push(Terminals.GREEK)
elif rule == ProdRules(15):
self.push(NonTerminals.O)
self.push(NonTerminals.Z)
elif rule == ProdRules(16):
self.push(NonTerminals.O)
self.push(NonTerminals.Z)
self.push(Terminals.UNION)
elif rule == ProdRules(17):
self.push(NonTerminals.O)
self.push(NonTerminals.Z)
self.push(Terminals.INTERSECTION)
elif rule == ProdRules(18):
self.push(Terminals.RIGHT_CURLYB)
self.push(NonTerminals.J)
self.push(Terminals.LEFT_CURLYB)
elif rule == ProdRules(19):
self.push(NonTerminals.Z)
elif rule == ProdRules(20):
self.push(NonTerminals.M)
elif rule == ProdRules(21):
self.push(NonTerminals.K)
self.push(Terminals.IDENTIFIER)
elif rule == ProdRules(22):
self.push(NonTerminals.K)
self.push(Terminals.NUMBER)
elif rule == ProdRules(23):
self.push(NonTerminals.K)
self.push(Terminals.REAL)
elif rule == ProdRules(24):
self.push(NonTerminals.K)
self.push(Terminals.GREEK)
elif rule == ProdRules(25):
self.push(NonTerminals.J)
self.push(Terminals.COMMA)
elif rule == ProdRules(26):
# This statement does nothing, used
# so the code does not given runtime error
# when function is called for epsilon
pass
else:
self.push(NonTerminals.NONE)
def generate_tree(self, tokens_lexemes):
# reset stack
self.stack = []
tokens_lexemes.append(Token(value=4, lexeme="$"))
per_ip = [tokens_lexemes[0]]
result = []
# used to keep track of the length of the tokens_lexemes dataframe
i = 0
# push S to stack for the first line
self.push(NonTerminals.S)
# executes until the stack becomes empty and the tokens_lexemes dataframe reaches the end
while i < len(tokens_lexemes) and len(self.stack) != 0:
# print(stack)
# changes input to the instance of Terminals class; will be used further as column no for parser table
x = self.change_input(tokens_lexemes[i].name, tokens_lexemes[i].lexeme)
# pops the first value of the stack; will be used further as row no for parser table
y = self.pop()
# this will raise a Syntax Error which will be caught by Error Handling
if x == Terminals.NONE or y == NonTerminals.NONE:
raise SyntaxError("Token: {}\nLexeme: {} \nSyntax Error: Invalid Syntax"
.format(tokens_lexemes[i].name,tokens_lexemes[i].lexeme))
elif y in self.list_terminals: # executes if y and x are both equal
if x == y:
result.append(per_ip)
i += 1
else:
raise SyntaxError("Token: {}\nLexeme: {} \nSyntax Error: Invalid Syntax"
.format(tokens_lexemes[i].name,tokens_lexemes[i].lexeme))
# used to keep record of the token and lexeme for the current iteration;
# will be added to op_df when the input changes
per_ip = [tokens_lexemes[i]]
# executes when y and x are not equal
else:
# calculates the Production Rule according to y and x from the parser table
new_value = self.predictive_parser_table[y.value][x.value]
# adds the Prod Rule used to the op_ls list
per_ip.append((y, new_value))
# pushes the contents of the Prod Rule to the stack
self.push_rules(new_value)
result.append(per_ip)
return result
def main():
# import inside main() fn to prevent any overwrite in the global variables
from syntactical_analysis.lexer import Lexer
expression = ['z', '=', 'x', '+', 'y']
# lexer
lexer = Lexer()
tokens = lexer.generate_tokens(expression)
# parser
parser = Parser()
tree = parser.generate_tree(tokens)
for i in tree:
print('Lexeme: ', i[0].lexeme, '', i)
if __name__ == '__main__':
main()
| [
"numpy.array",
"syntactical_analysis.lexer.Lexer"
] | [((11678, 11685), 'syntactical_analysis.lexer.Lexer', 'Lexer', ([], {}), '()\n', (11683, 11685), False, 'from syntactical_analysis.lexer import Lexer\n'), ((1812, 1850), 'numpy.array', 'np.array', (['self.predictive_parser_table'], {}), '(self.predictive_parser_table)\n', (1820, 1850), True, 'import numpy as np\n')] |
import numpy as np
from scipy.integrate import odeint
from .name2idx import V
from .set_model import diffeq, param_values, initial_values
class Simulation(object):
tspan = np.linspace(0,480,4801)
t = np.array(tspan)/60 # min -> hour
Ton = np.linspace(0,0.5,6) # 30 s pulse
Toff = np.linspace(0,479.5,4796)
x = param_values()
y0 = initial_values()
Y = odeint(diffeq,y0,tspan,args=tuple(x))
totalNumPSmad2_sustained = (Y[:,V.PSmad2c] + 2*Y[:,V.PSmad2_PSmad2_c] + Y[:,V.PSmad2_PSmad4_c])*2.3*602 \
+ (Y[:,V.PSmad2n] + 2*Y[:,V.PSmad2_PSmad2_n] + Y[:,V.PSmad2_Smad4_n])*602
pulse = odeint(diffeq,y0,Ton,args=tuple(x))
Y0 = pulse[-1,:]
# washout
Y0[V.TGF_beta_ex] = 0
washout = odeint(diffeq,Y0,Toff,args=tuple(x))
Y = np.vstack((np.delete(pulse,-1,axis=0),washout))
totalNumPSmad2_singlePulse = (Y[:,V.PSmad2c] + 2*Y[:,V.PSmad2_PSmad2_c] + Y[:,V.PSmad2_PSmad4_c])*2.3*602 \
+ (Y[:,V.PSmad2n] + 2*Y[:,V.PSmad2_PSmad2_n] + Y[:,V.PSmad2_Smad4_n])*602 | [
"numpy.array",
"numpy.delete",
"numpy.linspace"
] | [((179, 204), 'numpy.linspace', 'np.linspace', (['(0)', '(480)', '(4801)'], {}), '(0, 480, 4801)\n', (190, 204), True, 'import numpy as np\n'), ((255, 277), 'numpy.linspace', 'np.linspace', (['(0)', '(0.5)', '(6)'], {}), '(0, 0.5, 6)\n', (266, 277), True, 'import numpy as np\n'), ((302, 329), 'numpy.linspace', 'np.linspace', (['(0)', '(479.5)', '(4796)'], {}), '(0, 479.5, 4796)\n', (313, 329), True, 'import numpy as np\n'), ((211, 226), 'numpy.array', 'np.array', (['tspan'], {}), '(tspan)\n', (219, 226), True, 'import numpy as np\n'), ((872, 900), 'numpy.delete', 'np.delete', (['pulse', '(-1)'], {'axis': '(0)'}), '(pulse, -1, axis=0)\n', (881, 900), True, 'import numpy as np\n')] |
"""
Containing utility functions for data processing
"""
import random
from scipy import sparse
from rdkit import Chem
import networkx as nx
import numpy as np
from mx_mg.data import data_struct
__all__ = ['get_graph_from_smiles_list', 'get_mol_from_graph', 'get_mol_from_graph_list', 'get_d']
def get_graph_from_smiles(smiles):
mol = Chem.MolFromSmiles(smiles)
# build graph
atom_types, atom_ranks, bonds, bond_types = [], [], [], []
for a, r in zip(mol.GetAtoms(), Chem.CanonicalRankAtoms(mol)):
atom_types.append(data_struct.get_mol_spec().get_atom_type(a))
atom_ranks.append(r)
for b in mol.GetBonds():
idx_1, idx_2, bt = b.GetBeginAtomIdx(), b.GetEndAtomIdx(), data_struct.get_mol_spec().get_bond_type(b)
bonds.append([idx_1, idx_2])
bond_types.append(bt)
# build nx graph
graph = nx.Graph()
graph.add_nodes_from(range(len(atom_types)))
graph.add_edges_from(bonds)
return graph, atom_types, atom_ranks, bonds, bond_types
def get_graph_from_smiles_list(smiles_list):
graph_list = []
for smiles in smiles_list:
mol = Chem.MolFromSmiles(smiles)
# build graph
atom_types, bonds, bond_types = [], [], []
for a in mol.GetAtoms():
atom_types.append(data_struct.get_mol_spec().get_atom_type(a))
for b in mol.GetBonds():
idx_1, idx_2, bt = b.GetBeginAtomIdx(), b.GetEndAtomIdx(), data_struct.get_mol_spec().get_bond_type(b)
bonds.append([idx_1, idx_2])
bond_types.append(bt)
X_0 = np.array(atom_types, dtype=np.int32)
A_0 = np.concatenate([np.array(bonds, dtype=np.int32),
np.array(bond_types, dtype=np.int32)[:, np.newaxis]],
axis=1)
graph_list.append([X_0, A_0])
return graph_list
def traverse_graph(graph, atom_ranks, current_node=None, step_ids=None, p=0.9, log_p=0.0):
if current_node is None:
next_nodes = range(len(atom_ranks))
step_ids = [-1, ] * len(next_nodes)
next_node_ranks = atom_ranks
else:
next_nodes = graph.neighbors(current_node) # get neighbor nodes
next_nodes = [n for n in next_nodes if step_ids[n] < 0] # filter visited nodes
next_node_ranks = [atom_ranks[n] for n in next_nodes] # get ranks for neighbors
next_nodes = [n for n, r in sorted(zip(next_nodes, next_node_ranks), key=lambda _x:_x[1])] # sort by rank
# iterate through neighbors
while len(next_nodes) > 0:
if len(next_nodes)==1:
next_node = next_nodes[0]
elif random.random() >= (1 - p):
next_node = next_nodes[0]
log_p += np.log(p)
else:
next_node = next_nodes[random.randint(1, len(next_nodes) - 1)]
log_p += np.log((1.0 - p) / (len(next_nodes) - 1))
step_ids[next_node] = max(step_ids) + 1
_, log_p = traverse_graph(graph, atom_ranks, next_node, step_ids, p, log_p)
next_nodes = [n for n in next_nodes if step_ids[n] < 0] # filter visited nodes
return step_ids, log_p
def single_reorder(X_0, A_0, step_ids):
X_0, A_0 = np.copy(X_0), np.copy(A_0)
step_ids = np.array(step_ids, dtype=np.int32)
# sort by step_ids
sorted_ids = np.argsort(step_ids)
X_0 = X_0[sorted_ids]
A_0[:, 0], A_0[:, 1] = step_ids[A_0[:, 0]], step_ids[A_0[:, 1]]
max_b, min_b = np.amax(A_0[:, :2], axis=1), np.amin(A_0[:, :2], axis=1)
A_0 = A_0[np.lexsort([-min_b, max_b]), :]
# separate append and connect
max_b, min_b = np.amax(A_0[:, :2], axis=1), np.amin(A_0[:, :2], axis=1)
is_append = np.concatenate([np.array([True]), max_b[1:] > max_b[:-1]])
A_0 = np.concatenate([np.where(is_append[:, np.newaxis],
np.stack([min_b, max_b], axis=1),
np.stack([max_b, min_b], axis=1)),
A_0[:, -1:]], axis=1)
return X_0, A_0
def single_expand(X_0, A_0):
X_0, A_0 = np.copy(X_0), np.copy(A_0)
# expand X
is_append_iter = np.less(A_0[:, 0], A_0[:, 1]).astype(np.int32)
NX = np.cumsum(np.pad(is_append_iter, [[1, 0]], mode='constant', constant_values=1))
shift = np.cumsum(np.pad(NX, [[1, 0]], mode='constant')[:-1])
X_index = np.arange(NX.sum(), dtype=np.int32) - np.repeat(shift, NX)
X = X_0[X_index]
# expand A
_, A_index = np.tril_indices(A_0.shape[0])
A = A_0[A_index, :]
NA = np.arange(A_0.shape[0] + 1)
# get action
# action_type, atom_type, bond_type, append_pos, connect_pos
action_type = 1 - is_append_iter
atom_type = np.where(action_type == 0, X_0[A_0[:, 1]], 0)
bond_type = A_0[:, 2]
append_pos = np.where(action_type == 0, A_0[:, 0], 0)
connect_pos = np.where(action_type == 1, A_0[:, 1], 0)
actions = np.stack([action_type, atom_type, bond_type, append_pos, connect_pos],
axis=1)
last_action = [[2, 0, 0, 0, 0]]
actions = np.append(actions, last_action, axis=0)
action_0 = np.array([X_0[0]], dtype=np.int32)
# }}}
# {{{ Get mask
last_atom_index = shift + NX - 1
last_atom_mask = np.zeros_like(X)
last_atom_mask[last_atom_index] = np.where(
np.pad(is_append_iter, [[1, 0]], mode='constant', constant_values=1) == 1,
np.ones_like(last_atom_index),
np.ones_like(last_atom_index) * 2)
# }}}
return action_0, X, NX, A, NA, actions, last_atom_mask
def get_d(A, X):
_to_sparse = lambda _A, _X: sparse.coo_matrix((np.ones([_A.shape[0] * 2], dtype=np.int32),
(np.concatenate([_A[:, 0], _A[:, 1]], axis=0),
np.concatenate([_A[:, 1], _A[:, 0]], axis=0))),
shape=[_X.shape[0], ] * 2)
A_sparse = _to_sparse(A, X)
d2 = A_sparse * A_sparse
d3 = d2 * A_sparse
# get D_2
D_2 = np.stack(d2.nonzero(), axis=1)
D_2 = D_2[D_2[:, 0] < D_2[:, 1], :]
# get D_3
D_3 = np.stack(d3.nonzero(), axis=1)
D_3 = D_3[D_3[:, 0] < D_3[:, 1], :]
# remove D_1 elements from D_3
D_3_sparse = _to_sparse(D_3, X)
D_3_sparse = D_3_sparse - D_3_sparse.multiply(A_sparse)
D_3 = np.stack(D_3_sparse.nonzero(), axis=1)
D_3 = D_3[D_3[:, 0] < D_3[:, 1], :]
return D_2, D_3
def merge_single_0(X_0, A_0, NX_0, NA_0):
# shift_ids
cumsum = np.cumsum(np.pad(NX_0, [[1, 0]], mode='constant')[:-1])
A_0[:, :2] += np.stack([np.repeat(cumsum, NA_0), ] * 2, axis=1)
# get D
D_0_2, D_0_3 = get_d(A_0, X_0)
# split A
A_split = []
for i in range(data_struct.get_mol_spec().num_bond_types):
A_i = A_0[A_0[:, 2] == i, :2]
A_split.append(A_i)
A_split.extend([D_0_2, D_0_3])
A_0 = A_split
# NX_rep
NX_rep_0 = np.repeat(np.arange(NX_0.shape[0]), NX_0)
return X_0, A_0, NX_0, NX_rep_0
def merge_single(X, A,
NX, NA,
mol_ids, rep_ids, iw_ids,
action_0, actions,
last_append_mask,
log_p):
X, A, NX, NX_rep = merge_single_0(X, A, NX, NA)
cumsum = np.cumsum(np.pad(NX, [[1, 0]], mode='constant')[:-1])
actions[:, -2] += cumsum * (actions[:, 0] == 0)
actions[:, -1] += cumsum * (actions[:, 0] == 1)
mol_ids_rep = np.repeat(mol_ids, NX)
rep_ids_rep = np.repeat(rep_ids, NX)
return X, A,\
mol_ids_rep, rep_ids_rep, iw_ids,\
last_append_mask,\
NX, NX_rep,\
action_0, actions, \
log_p
def process_single(smiles, k, p):
graph, atom_types, atom_ranks, bonds, bond_types = get_graph_from_smiles(smiles)
# original
X_0 = np.array(atom_types, dtype=np.int32)
A_0 = np.concatenate([np.array(bonds, dtype=np.int32),
np.array(bond_types, dtype=np.int32)[:, np.newaxis]],
axis=1)
X, A = [], []
NX, NA = [], []
mol_ids, rep_ids, iw_ids = [], [], []
action_0, actions = [], []
last_append_mask = []
log_p = []
# random sampling decoding route
for i in range(k):
step_ids_i, log_p_i = traverse_graph(graph, atom_ranks, p=p)
X_i, A_i = single_reorder(X_0, A_0, step_ids_i)
action_0_i, X_i, NX_i, A_i, NA_i, actions_i, last_atom_mask_i = single_expand(X_i, A_i)
# appends
X.append(X_i)
A.append(A_i)
NX.append(NX_i)
NA.append(NA_i)
action_0.append(action_0_i)
actions.append(actions_i)
last_append_mask.append(last_atom_mask_i)
mol_ids.append(np.zeros_like(NX_i, dtype=np.int32))
rep_ids.append(np.ones_like(NX_i, dtype=np.int32) * i)
iw_ids.append(np.ones_like(NX_i, dtype=np.int32) * i)
log_p.append(log_p_i)
# concatenate
X = np.concatenate(X, axis=0)
A = np.concatenate(A, axis = 0)
NX = np.concatenate(NX, axis = 0)
NA = np.concatenate(NA, axis = 0)
action_0 = np.concatenate(action_0, axis = 0)
actions = np.concatenate(actions, axis = 0)
last_append_mask = np.concatenate(last_append_mask, axis = 0)
mol_ids = np.concatenate(mol_ids, axis = 0)
rep_ids = np.concatenate(rep_ids, axis = 0)
iw_ids = np.concatenate(iw_ids, axis = 0)
log_p = np.array(log_p, dtype=np.float32)
return X, A, NX, NA, mol_ids, rep_ids, iw_ids, action_0, actions, last_append_mask, log_p
# noinspection PyArgumentList
def get_mol_from_graph(X, A, sanitize=True):
try:
mol = Chem.RWMol(Chem.Mol())
X, A = X.tolist(), A.tolist()
for i, atom_type in enumerate(X):
mol.AddAtom(data_struct.get_mol_spec().index_to_atom(atom_type))
for atom_id1, atom_id2, bond_type in A:
data_struct.get_mol_spec().index_to_bond(mol, atom_id1, atom_id2, bond_type)
except:
return None
if sanitize:
try:
mol = mol.GetMol()
Chem.SanitizeMol(mol)
return mol
except:
return None
else:
return mol
def get_mol_from_graph_list(graph_list, sanitize=True):
mol_list = [get_mol_from_graph(X, A, sanitize) for X, A in graph_list]
return mol_list
| [
"numpy.amin",
"rdkit.Chem.CanonicalRankAtoms",
"rdkit.Chem.Mol",
"numpy.ones",
"numpy.argsort",
"numpy.arange",
"mx_mg.data.data_struct.get_mol_spec",
"numpy.pad",
"numpy.zeros_like",
"numpy.copy",
"rdkit.Chem.SanitizeMol",
"numpy.append",
"numpy.less",
"numpy.repeat",
"numpy.tril_indice... | [((344, 370), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles'], {}), '(smiles)\n', (362, 370), False, 'from rdkit import Chem\n'), ((861, 871), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (869, 871), True, 'import networkx as nx\n'), ((3211, 3245), 'numpy.array', 'np.array', (['step_ids'], {'dtype': 'np.int32'}), '(step_ids, dtype=np.int32)\n', (3219, 3245), True, 'import numpy as np\n'), ((3287, 3307), 'numpy.argsort', 'np.argsort', (['step_ids'], {}), '(step_ids)\n', (3297, 3307), True, 'import numpy as np\n'), ((4412, 4441), 'numpy.tril_indices', 'np.tril_indices', (['A_0.shape[0]'], {}), '(A_0.shape[0])\n', (4427, 4441), True, 'import numpy as np\n'), ((4475, 4502), 'numpy.arange', 'np.arange', (['(A_0.shape[0] + 1)'], {}), '(A_0.shape[0] + 1)\n', (4484, 4502), True, 'import numpy as np\n'), ((4639, 4684), 'numpy.where', 'np.where', (['(action_type == 0)', 'X_0[A_0[:, 1]]', '(0)'], {}), '(action_type == 0, X_0[A_0[:, 1]], 0)\n', (4647, 4684), True, 'import numpy as np\n'), ((4728, 4768), 'numpy.where', 'np.where', (['(action_type == 0)', 'A_0[:, 0]', '(0)'], {}), '(action_type == 0, A_0[:, 0], 0)\n', (4736, 4768), True, 'import numpy as np\n'), ((4787, 4827), 'numpy.where', 'np.where', (['(action_type == 1)', 'A_0[:, 1]', '(0)'], {}), '(action_type == 1, A_0[:, 1], 0)\n', (4795, 4827), True, 'import numpy as np\n'), ((4842, 4920), 'numpy.stack', 'np.stack', (['[action_type, atom_type, bond_type, append_pos, connect_pos]'], {'axis': '(1)'}), '([action_type, atom_type, bond_type, append_pos, connect_pos], axis=1)\n', (4850, 4920), True, 'import numpy as np\n'), ((4994, 5033), 'numpy.append', 'np.append', (['actions', 'last_action'], {'axis': '(0)'}), '(actions, last_action, axis=0)\n', (5003, 5033), True, 'import numpy as np\n'), ((5050, 5084), 'numpy.array', 'np.array', (['[X_0[0]]'], {'dtype': 'np.int32'}), '([X_0[0]], dtype=np.int32)\n', (5058, 5084), True, 'import numpy as np\n'), ((5174, 5190), 'numpy.zeros_like', 'np.zeros_like', (['X'], {}), '(X)\n', (5187, 5190), True, 'import numpy as np\n'), ((7379, 7401), 'numpy.repeat', 'np.repeat', (['mol_ids', 'NX'], {}), '(mol_ids, NX)\n', (7388, 7401), True, 'import numpy as np\n'), ((7420, 7442), 'numpy.repeat', 'np.repeat', (['rep_ids', 'NX'], {}), '(rep_ids, NX)\n', (7429, 7442), True, 'import numpy as np\n'), ((7757, 7793), 'numpy.array', 'np.array', (['atom_types'], {'dtype': 'np.int32'}), '(atom_types, dtype=np.int32)\n', (7765, 7793), True, 'import numpy as np\n'), ((8876, 8901), 'numpy.concatenate', 'np.concatenate', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (8890, 8901), True, 'import numpy as np\n'), ((8910, 8935), 'numpy.concatenate', 'np.concatenate', (['A'], {'axis': '(0)'}), '(A, axis=0)\n', (8924, 8935), True, 'import numpy as np\n'), ((8947, 8973), 'numpy.concatenate', 'np.concatenate', (['NX'], {'axis': '(0)'}), '(NX, axis=0)\n', (8961, 8973), True, 'import numpy as np\n'), ((8985, 9011), 'numpy.concatenate', 'np.concatenate', (['NA'], {'axis': '(0)'}), '(NA, axis=0)\n', (8999, 9011), True, 'import numpy as np\n'), ((9029, 9061), 'numpy.concatenate', 'np.concatenate', (['action_0'], {'axis': '(0)'}), '(action_0, axis=0)\n', (9043, 9061), True, 'import numpy as np\n'), ((9078, 9109), 'numpy.concatenate', 'np.concatenate', (['actions'], {'axis': '(0)'}), '(actions, axis=0)\n', (9092, 9109), True, 'import numpy as np\n'), ((9135, 9175), 'numpy.concatenate', 'np.concatenate', (['last_append_mask'], {'axis': '(0)'}), '(last_append_mask, axis=0)\n', (9149, 9175), True, 'import numpy as np\n'), ((9192, 9223), 'numpy.concatenate', 'np.concatenate', (['mol_ids'], {'axis': '(0)'}), '(mol_ids, axis=0)\n', (9206, 9223), True, 'import numpy as np\n'), ((9240, 9271), 'numpy.concatenate', 'np.concatenate', (['rep_ids'], {'axis': '(0)'}), '(rep_ids, axis=0)\n', (9254, 9271), True, 'import numpy as np\n'), ((9287, 9317), 'numpy.concatenate', 'np.concatenate', (['iw_ids'], {'axis': '(0)'}), '(iw_ids, axis=0)\n', (9301, 9317), True, 'import numpy as np\n'), ((9332, 9365), 'numpy.array', 'np.array', (['log_p'], {'dtype': 'np.float32'}), '(log_p, dtype=np.float32)\n', (9340, 9365), True, 'import numpy as np\n'), ((489, 517), 'rdkit.Chem.CanonicalRankAtoms', 'Chem.CanonicalRankAtoms', (['mol'], {}), '(mol)\n', (512, 517), False, 'from rdkit import Chem\n'), ((1126, 1152), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles'], {}), '(smiles)\n', (1144, 1152), False, 'from rdkit import Chem\n'), ((1573, 1609), 'numpy.array', 'np.array', (['atom_types'], {'dtype': 'np.int32'}), '(atom_types, dtype=np.int32)\n', (1581, 1609), True, 'import numpy as np\n'), ((3168, 3180), 'numpy.copy', 'np.copy', (['X_0'], {}), '(X_0)\n', (3175, 3180), True, 'import numpy as np\n'), ((3182, 3194), 'numpy.copy', 'np.copy', (['A_0'], {}), '(A_0)\n', (3189, 3194), True, 'import numpy as np\n'), ((3421, 3448), 'numpy.amax', 'np.amax', (['A_0[:, :2]'], {'axis': '(1)'}), '(A_0[:, :2], axis=1)\n', (3428, 3448), True, 'import numpy as np\n'), ((3450, 3477), 'numpy.amin', 'np.amin', (['A_0[:, :2]'], {'axis': '(1)'}), '(A_0[:, :2], axis=1)\n', (3457, 3477), True, 'import numpy as np\n'), ((3578, 3605), 'numpy.amax', 'np.amax', (['A_0[:, :2]'], {'axis': '(1)'}), '(A_0[:, :2], axis=1)\n', (3585, 3605), True, 'import numpy as np\n'), ((3607, 3634), 'numpy.amin', 'np.amin', (['A_0[:, :2]'], {'axis': '(1)'}), '(A_0[:, :2], axis=1)\n', (3614, 3634), True, 'import numpy as np\n'), ((4019, 4031), 'numpy.copy', 'np.copy', (['X_0'], {}), '(X_0)\n', (4026, 4031), True, 'import numpy as np\n'), ((4033, 4045), 'numpy.copy', 'np.copy', (['A_0'], {}), '(A_0)\n', (4040, 4045), True, 'import numpy as np\n'), ((4149, 4217), 'numpy.pad', 'np.pad', (['is_append_iter', '[[1, 0]]'], {'mode': '"""constant"""', 'constant_values': '(1)'}), "(is_append_iter, [[1, 0]], mode='constant', constant_values=1)\n", (4155, 4217), True, 'import numpy as np\n'), ((4337, 4357), 'numpy.repeat', 'np.repeat', (['shift', 'NX'], {}), '(shift, NX)\n', (4346, 4357), True, 'import numpy as np\n'), ((5330, 5359), 'numpy.ones_like', 'np.ones_like', (['last_atom_index'], {}), '(last_atom_index)\n', (5342, 5359), True, 'import numpy as np\n'), ((6880, 6904), 'numpy.arange', 'np.arange', (['NX_0.shape[0]'], {}), '(NX_0.shape[0])\n', (6889, 6904), True, 'import numpy as np\n'), ((3492, 3519), 'numpy.lexsort', 'np.lexsort', (['[-min_b, max_b]'], {}), '([-min_b, max_b])\n', (3502, 3519), True, 'import numpy as np\n'), ((3667, 3683), 'numpy.array', 'np.array', (['[True]'], {}), '([True])\n', (3675, 3683), True, 'import numpy as np\n'), ((4083, 4112), 'numpy.less', 'np.less', (['A_0[:, 0]', 'A_0[:, 1]'], {}), '(A_0[:, 0], A_0[:, 1])\n', (4090, 4112), True, 'import numpy as np\n'), ((4241, 4278), 'numpy.pad', 'np.pad', (['NX', '[[1, 0]]'], {'mode': '"""constant"""'}), "(NX, [[1, 0]], mode='constant')\n", (4247, 4278), True, 'import numpy as np\n'), ((5247, 5315), 'numpy.pad', 'np.pad', (['is_append_iter', '[[1, 0]]'], {'mode': '"""constant"""', 'constant_values': '(1)'}), "(is_append_iter, [[1, 0]], mode='constant', constant_values=1)\n", (5253, 5315), True, 'import numpy as np\n'), ((5369, 5398), 'numpy.ones_like', 'np.ones_like', (['last_atom_index'], {}), '(last_atom_index)\n', (5381, 5398), True, 'import numpy as np\n'), ((6465, 6504), 'numpy.pad', 'np.pad', (['NX_0', '[[1, 0]]'], {'mode': '"""constant"""'}), "(NX_0, [[1, 0]], mode='constant')\n", (6471, 6504), True, 'import numpy as np\n'), ((6678, 6704), 'mx_mg.data.data_struct.get_mol_spec', 'data_struct.get_mol_spec', ([], {}), '()\n', (6702, 6704), False, 'from mx_mg.data import data_struct\n'), ((7213, 7250), 'numpy.pad', 'np.pad', (['NX', '[[1, 0]]'], {'mode': '"""constant"""'}), "(NX, [[1, 0]], mode='constant')\n", (7219, 7250), True, 'import numpy as np\n'), ((7820, 7851), 'numpy.array', 'np.array', (['bonds'], {'dtype': 'np.int32'}), '(bonds, dtype=np.int32)\n', (7828, 7851), True, 'import numpy as np\n'), ((8656, 8691), 'numpy.zeros_like', 'np.zeros_like', (['NX_i'], {'dtype': 'np.int32'}), '(NX_i, dtype=np.int32)\n', (8669, 8691), True, 'import numpy as np\n'), ((9572, 9582), 'rdkit.Chem.Mol', 'Chem.Mol', ([], {}), '()\n', (9580, 9582), False, 'from rdkit import Chem\n'), ((9986, 10007), 'rdkit.Chem.SanitizeMol', 'Chem.SanitizeMol', (['mol'], {}), '(mol)\n', (10002, 10007), False, 'from rdkit import Chem\n'), ((1640, 1671), 'numpy.array', 'np.array', (['bonds'], {'dtype': 'np.int32'}), '(bonds, dtype=np.int32)\n', (1648, 1671), True, 'import numpy as np\n'), ((2615, 2630), 'random.random', 'random.random', ([], {}), '()\n', (2628, 2630), False, 'import random\n'), ((2702, 2711), 'numpy.log', 'np.log', (['p'], {}), '(p)\n', (2708, 2711), True, 'import numpy as np\n'), ((3804, 3836), 'numpy.stack', 'np.stack', (['[min_b, max_b]'], {'axis': '(1)'}), '([min_b, max_b], axis=1)\n', (3812, 3836), True, 'import numpy as np\n'), ((3871, 3903), 'numpy.stack', 'np.stack', (['[max_b, min_b]'], {'axis': '(1)'}), '([max_b, min_b], axis=1)\n', (3879, 3903), True, 'import numpy as np\n'), ((5544, 5586), 'numpy.ones', 'np.ones', (['[_A.shape[0] * 2]'], {'dtype': 'np.int32'}), '([_A.shape[0] * 2], dtype=np.int32)\n', (5551, 5586), True, 'import numpy as np\n'), ((6539, 6562), 'numpy.repeat', 'np.repeat', (['cumsum', 'NA_0'], {}), '(cumsum, NA_0)\n', (6548, 6562), True, 'import numpy as np\n'), ((7879, 7915), 'numpy.array', 'np.array', (['bond_types'], {'dtype': 'np.int32'}), '(bond_types, dtype=np.int32)\n', (7887, 7915), True, 'import numpy as np\n'), ((8716, 8750), 'numpy.ones_like', 'np.ones_like', (['NX_i'], {'dtype': 'np.int32'}), '(NX_i, dtype=np.int32)\n', (8728, 8750), True, 'import numpy as np\n'), ((8778, 8812), 'numpy.ones_like', 'np.ones_like', (['NX_i'], {'dtype': 'np.int32'}), '(NX_i, dtype=np.int32)\n', (8790, 8812), True, 'import numpy as np\n'), ((546, 572), 'mx_mg.data.data_struct.get_mol_spec', 'data_struct.get_mol_spec', ([], {}), '()\n', (570, 572), False, 'from mx_mg.data import data_struct\n'), ((716, 742), 'mx_mg.data.data_struct.get_mol_spec', 'data_struct.get_mol_spec', ([], {}), '()\n', (740, 742), False, 'from mx_mg.data import data_struct\n'), ((1703, 1739), 'numpy.array', 'np.array', (['bond_types'], {'dtype': 'np.int32'}), '(bond_types, dtype=np.int32)\n', (1711, 1739), True, 'import numpy as np\n'), ((5640, 5684), 'numpy.concatenate', 'np.concatenate', (['[_A[:, 0], _A[:, 1]]'], {'axis': '(0)'}), '([_A[:, 0], _A[:, 1]], axis=0)\n', (5654, 5684), True, 'import numpy as np\n'), ((5738, 5782), 'numpy.concatenate', 'np.concatenate', (['[_A[:, 1], _A[:, 0]]'], {'axis': '(0)'}), '([_A[:, 1], _A[:, 0]], axis=0)\n', (5752, 5782), True, 'import numpy as np\n'), ((9803, 9829), 'mx_mg.data.data_struct.get_mol_spec', 'data_struct.get_mol_spec', ([], {}), '()\n', (9827, 9829), False, 'from mx_mg.data import data_struct\n'), ((1290, 1316), 'mx_mg.data.data_struct.get_mol_spec', 'data_struct.get_mol_spec', ([], {}), '()\n', (1314, 1316), False, 'from mx_mg.data import data_struct\n'), ((1439, 1465), 'mx_mg.data.data_struct.get_mol_spec', 'data_struct.get_mol_spec', ([], {}), '()\n', (1463, 1465), False, 'from mx_mg.data import data_struct\n'), ((9689, 9715), 'mx_mg.data.data_struct.get_mol_spec', 'data_struct.get_mol_spec', ([], {}), '()\n', (9713, 9715), False, 'from mx_mg.data import data_struct\n')] |
# License: Apache-2.0
import copy
import warnings
from typing import Union
import databricks.koalas as ks
import numpy as np
import pandas as pd
from ..data_cleaning.drop_columns import DropColumns
from ..transformers.transformer import Transformer
from ..util import util
from ._base_encoder import _BaseEncoder
class MultiClassEncoder(_BaseEncoder):
"""Encode the categorical columns with a binary encoder passed by the user.
*N* categorical columns are mapped into *N * (n - 1)* numerical columns
where *n* is the number of classes.
Parameters
----------
encoder : Transformer
Binary Encoder.
dtype : type, default to np.float64.
Numerical datatype of the output data.
Examples
--------
* fit & transform with `pandas`
>>> import pandas as pd
>>> from gators.encoders import MultiClassEncoder
>>> from gators.encoders import WOEEncoder
>>> X = pd.DataFrame({
... 'A': ['Q', 'Q', 'Q', 'W', 'W', 'W'],
... 'B': ['Q', 'Q', 'W', 'W', 'W', 'W'],
... 'C': ['Q', 'Q', 'Q', 'Q', 'W', 'W'],
... 'D': [1, 2, 3, 4, 5, 6]})
>>> y = pd.Series([0, 0, 1, 2, 1, 2], name='TARGET')
>>> obj = MultiClassEncoder(WOEEncoder())
>>> obj.fit_transform(X, y)
D A__TARGET_1_WOEEncoder B__TARGET_1_WOEEncoder C__TARGET_1_WOEEncoder A__TARGET_2_WOEEncoder B__TARGET_2_WOEEncoder C__TARGET_2_WOEEncoder
0 1.0 0.0 0.000000 -0.405465 0.000000 0.000000 -0.405465
1 2.0 0.0 0.000000 -0.405465 0.000000 0.000000 -0.405465
2 3.0 0.0 0.693147 -0.405465 0.000000 0.693147 -0.405465
3 4.0 0.0 0.693147 -0.405465 1.386294 0.693147 -0.405465
4 5.0 0.0 0.693147 0.693147 1.386294 0.693147 0.693147
5 6.0 0.0 0.693147 0.693147 1.386294 0.693147 0.693147
* fit & transform with `koalas`
>>> import databricks.koalas as ks
>>> from gators.encoders import MultiClassEncoder
>>> from gators.encoders import WOEEncoder
>>> X = ks.DataFrame({
... 'A': ['Q', 'Q', 'Q', 'W', 'W', 'W'],
... 'B': ['Q', 'Q', 'W', 'W', 'W', 'W'],
... 'C': ['Q', 'Q', 'Q', 'Q', 'W', 'W'],
... 'D': [1, 2, 3, 4, 5, 6]})
>>> y = ks.Series([0, 0, 1, 2, 1, 2], name='TARGET')
>>> obj = MultiClassEncoder(WOEEncoder())
>>> obj.fit_transform(X, y)
D A__TARGET_1_WOEEncoder B__TARGET_1_WOEEncoder C__TARGET_1_WOEEncoder A__TARGET_2_WOEEncoder B__TARGET_2_WOEEncoder C__TARGET_2_WOEEncoder
0 1.0 0.0 0.000000 -0.405465 0.000000 0.000000 -0.405465
1 2.0 0.0 0.000000 -0.405465 0.000000 0.000000 -0.405465
2 3.0 0.0 0.693147 -0.405465 0.000000 0.693147 -0.405465
3 4.0 0.0 0.693147 -0.405465 1.386294 0.693147 -0.405465
4 5.0 0.0 0.693147 0.693147 1.386294 0.693147 0.693147
5 6.0 0.0 0.693147 0.693147 1.386294 0.693147 0.693147
* fit with `pandas` & transform with `NumPy`
>>> import pandas as pd
>>> from gators.encoders import MultiClassEncoder
>>> from gators.encoders import WOEEncoder
>>> X = pd.DataFrame({
... 'A': ['Q', 'Q', 'Q', 'W', 'W', 'W'],
... 'B': ['Q', 'Q', 'W', 'W', 'W', 'W'],
... 'C': ['Q', 'Q', 'Q', 'Q', 'W', 'W'],
... 'D': [1, 2, 3, 4, 5, 6]})
>>> y = pd.Series([0, 0, 1, 2, 1, 2], name='TARGET')
>>> obj = MultiClassEncoder(WOEEncoder())
>>> _ = obj.fit(X, y)
>>> obj.transform_numpy(X.to_numpy())
array([[ 1. , 0. , 0. , -0.40546511, 0. ,
0. , -0.40546511],
[ 2. , 0. , 0. , -0.40546511, 0. ,
0. , -0.40546511],
[ 3. , 0. , 0.69314718, -0.40546511, 0. ,
0.69314718, -0.40546511],
[ 4. , 0. , 0.69314718, -0.40546511, 1.38629436,
0.69314718, -0.40546511],
[ 5. , 0. , 0.69314718, 0.69314718, 1.38629436,
0.69314718, 0.69314718],
[ 6. , 0. , 0.69314718, 0.69314718, 1.38629436,
0.69314718, 0.69314718]])
* fit with `koalas` & transform with `NumPy`
>>> import databricks.koalas as ks
>>> from gators.encoders import MultiClassEncoder
>>> from gators.encoders import WOEEncoder
>>> X = ks.DataFrame({
... 'A': ['Q', 'Q', 'Q', 'W', 'W', 'W'],
... 'B': ['Q', 'Q', 'W', 'W', 'W', 'W'],
... 'C': ['Q', 'Q', 'Q', 'Q', 'W', 'W'],
... 'D': [1, 2, 3, 4, 5, 6]})
>>> y = ks.Series([0, 0, 1, 2, 1, 2], name='TARGET')
>>> obj = MultiClassEncoder(WOEEncoder())
>>> _ = obj.fit(X, y)
>>> obj.transform_numpy(X.to_numpy())
array([[ 1. , 0. , 0. , -0.40546511, 0. ,
0. , -0.40546511],
[ 2. , 0. , 0. , -0.40546511, 0. ,
0. , -0.40546511],
[ 3. , 0. , 0.69314718, -0.40546511, 0. ,
0.69314718, -0.40546511],
[ 4. , 0. , 0.69314718, -0.40546511, 1.38629436,
0.69314718, -0.40546511],
[ 5. , 0. , 0.69314718, 0.69314718, 1.38629436,
0.69314718, 0.69314718],
[ 6. , 0. , 0.69314718, 0.69314718, 1.38629436,
0.69314718, 0.69314718]])
"""
def __init__(self, encoder: Transformer, dtype: type = np.float64):
if not isinstance(encoder, Transformer):
raise TypeError("`encoder` should be a transformer.")
_BaseEncoder.__init__(self, dtype=dtype)
self.encoder = encoder
self.drop_columns = None
self.label_names = []
self.encoder_dict = {}
self.columns = []
self.idx_columns = np.ndarray([])
self.column_names = []
self.column_mapping = {}
self.name = type(encoder).__name__
def fit(
self, X: Union[pd.DataFrame, ks.DataFrame], y: Union[pd.Series, ks.Series]
) -> "MultiClassEncoder":
"""Fit the transformer on the dataframe `X`.
Parameters
----------
X : Union[pd.DataFrame, ks.DataFrame].
Input dataframe.
y : Union[pd.Series, ks.Series], default to None.
Labels.
Returns
-------
MultiClassEncoder
Instance of itself.
"""
self.check_dataframe(X)
self.check_y(X, y)
self.check_multiclass_target(y)
self.columns = util.get_datatype_columns(X, object)
self.check_nans(X, self.columns)
self.drop_columns = DropColumns(self.columns).fit(X)
if not self.columns:
warnings.warn(
f"""`X` does not contain object columns:
`{self.__class__.__name__}` is not needed"""
)
return self
self.idx_columns = util.get_idx_columns(
columns=X.columns,
selected_columns=self.columns,
)
y_name = y.name
if isinstance(X, pd.DataFrame):
y_one_hot = pd.get_dummies(y, prefix=y_name)
else:
y_one_hot = ks.get_dummies(y, prefix=y_name)
y_one_hot = y_one_hot.drop(y_one_hot.columns[0], axis=1)
self.label_names = y_one_hot.columns
for label_name in self.label_names:
self.encoder_dict[label_name] = copy.copy(self.encoder)
self.encoder_dict[label_name].fit(X[self.columns], y_one_hot[label_name])
return self
def transform(
self, X: Union[pd.DataFrame, ks.DataFrame]
) -> Union[pd.DataFrame, ks.DataFrame]:
"""Transform the dataframe `X`.
Parameters
----------
X : Union[pd.DataFrame, ks.DataFrame].
Input dataframe.
Returns
-------
Union[pd.DataFrame, ks.DataFrame]
Transformed dataframe.
"""
self.check_dataframe(X)
if not self.columns:
self.idx_columns = np.array([])
return X
for i, label_name in enumerate(self.label_names):
dummy = self.encoder_dict[label_name].transform(X[self.columns].copy())[
self.encoder_dict[label_name].columns
]
column_names = [f"{col}__{label_name}_{self.name}" for col in dummy.columns]
dummy.columns = column_names
self.column_names.extend(column_names)
for name, col in zip(column_names, self.columns):
self.column_mapping[name] = col
X = X.join(dummy, how="inner").sort_index()
return self.drop_columns.transform(X).astype(self.dtype)
def transform_numpy(self, X: np.ndarray) -> np.ndarray:
"""Transform the NumPy array `X`.
Parameters
----------
X : np.ndarray
Input array.
Returns
-------
np.ndarray
Transformed array.
"""
self.check_array(X)
if not self.columns:
return X
X_encoded_list = []
for i, label_name in enumerate(self.label_names):
dummy = self.encoder_dict[label_name].transform_numpy(
X[:, self.idx_columns].copy()
)
X_encoded_list.append(dummy)
X_new = np.concatenate(
[self.drop_columns.transform_numpy(X)] + X_encoded_list, axis=1
)
return X_new.astype(self.dtype)
| [
"numpy.ndarray",
"pandas.get_dummies",
"copy.copy",
"numpy.array",
"warnings.warn",
"databricks.koalas.get_dummies"
] | [((6899, 6913), 'numpy.ndarray', 'np.ndarray', (['[]'], {}), '([])\n', (6909, 6913), True, 'import numpy as np\n'), ((7799, 7925), 'warnings.warn', 'warnings.warn', (['f"""`X` does not contain object columns:\n `{self.__class__.__name__}` is not needed"""'], {}), '(\n f"""`X` does not contain object columns:\n `{self.__class__.__name__}` is not needed"""\n )\n', (7812, 7925), False, 'import warnings\n'), ((8191, 8223), 'pandas.get_dummies', 'pd.get_dummies', (['y'], {'prefix': 'y_name'}), '(y, prefix=y_name)\n', (8205, 8223), True, 'import pandas as pd\n'), ((8262, 8294), 'databricks.koalas.get_dummies', 'ks.get_dummies', (['y'], {'prefix': 'y_name'}), '(y, prefix=y_name)\n', (8276, 8294), True, 'import databricks.koalas as ks\n'), ((8493, 8516), 'copy.copy', 'copy.copy', (['self.encoder'], {}), '(self.encoder)\n', (8502, 8516), False, 'import copy\n'), ((9107, 9119), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (9115, 9119), True, 'import numpy as np\n')] |
import numpy as np
np.random.seed(0)
import pandas as pd
import matplotlib.pyplot as plt
import gym
import tensorflow as tf
tf.random.set_seed(0)
from tensorflow import keras
class Chart:
def __init__(self):
self.fig, self.ax = plt.subplots(1, 1)
def plot(self, episode_rewards):
self.ax.clear()
self.ax.plot(episode_rewards)
self.ax.set_xlabel('iteration')
self.ax.set_ylabel('episode reward')
self.fig.canvas.draw()
env = gym.make('Pendulum-v0')
env.seed(0)
class DQNReplayer:
def __init__(self, capacity):
self.memory = pd.DataFrame(index=range(capacity),
columns=['observation', 'action', 'reward',
'next_observation', 'done'])
self.i = 0
self.count = 0
self.capacity = capacity
def store(self, *args):
self.memory.loc[self.i] = args
self.i = (self.i + 1) % self.capacity
self.count = min(self.count + 1, self.capacity)
def sample(self, size):
indices = np.random.choice(self.count, size=size)
return (np.stack(self.memory.loc[indices, field]) for field in
self.memory.columns)
class OrnsteinUhlenbeckProcess:
def __init__(self, size, mu=0., sigma=1., theta=.15, dt=.01):
self.size = size
self.mu = mu
self.sigma = sigma
self.theta = theta
self.dt = dt
def __call__(self):
n = np.random.normal(size=self.size)
self.x += (self.theta * (self.mu - self.x) * self.dt +
self.sigma * np.sqrt(self.dt) * n)
return self.x
def reset(self, x=0.):
self.x = x * np.ones(self.size)
class DDPGAgent:
def __init__(self, env, actor_kwargs, critic_kwargs,
replayer_capacity=20000, replayer_initial_transitions=2000,
gamma=0.99, batches=1, batch_size=64,
net_learning_rate=0.005, noise_scale=0.1, explore=True):
observation_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
observation_action_dim = observation_dim + action_dim
self.action_low = env.action_space.low
self.action_high = env.action_space.high
self.gamma = gamma
self.net_learning_rate = net_learning_rate
self.explore = explore
self.batches = batches
self.batch_size = batch_size
self.replayer = DQNReplayer(replayer_capacity)
self.replayer_initial_transitions = replayer_initial_transitions
self.noise = OrnsteinUhlenbeckProcess(size=(action_dim,),
sigma=noise_scale)
self.noise.reset()
self.actor_evaluate_net = self.build_network(
input_size=observation_dim, **actor_kwargs)
self.actor_target_net = self.build_network(
input_size=observation_dim, **actor_kwargs)
self.critic_evaluate_net = self.build_network(
input_size=observation_action_dim, **critic_kwargs)
self.critic_target_net = self.build_network(
input_size=observation_action_dim, **critic_kwargs)
self.update_target_net(self.actor_target_net,
self.actor_evaluate_net)
self.update_target_net(self.critic_target_net,
self.critic_evaluate_net)
def update_target_net(self, target_net, evaluate_net,
learning_rate=1.):
target_weights = target_net.get_weights()
evaluate_weights = evaluate_net.get_weights()
average_weights = [(1. - learning_rate) * t + learning_rate * e
for t, e in zip(target_weights, evaluate_weights)]
target_net.set_weights(average_weights)
def build_network(self, input_size, hidden_sizes, output_size=1,
activation=tf.nn.relu, output_activation=None,
loss=tf.losses.mse, learning_rate=0.001):
model = keras.Sequential()
for layer, hidden_size in enumerate(hidden_sizes):
kwargs = {'input_shape': (input_size,)} if layer == 0 else {}
model.add(keras.layers.Dense(units=hidden_size,
activation=activation, **kwargs))
model.add(keras.layers.Dense(units=output_size,
activation=output_activation))
optimizer = tf.optimizers.Adam(learning_rate)
model.compile(optimizer=optimizer, loss=loss)
return model
def decide(self, observation):
if self.explore and self.replayer.count < \
self.replayer_initial_transitions:
return np.random.uniform(self.action_low, self.action_high)
action = self.actor_evaluate_net.predict(
observation[np.newaxis])[0]
if self.explore:
noise = self.noise()
action = np.clip(action + noise, self.action_low, self.action_high)
return action
def learn(self, observation, action, reward, next_observation, done):
self.replayer.store(observation, action, reward, next_observation,
done)
if self.replayer.count >= self.replayer_initial_transitions:
if done:
self.noise.reset() # 为下一回合重置噪声过程
for batch in range(self.batches):
observations, actions, rewards, next_observations, \
dones = self.replayer.sample(self.batch_size)
# 训练执行者网络
observation_tensor = tf.convert_to_tensor(observations,
dtype=tf.float32)
with tf.GradientTape() as tape:
action_tensor = self.actor_evaluate_net(
observation_tensor)
input_tensor = tf.concat([observation_tensor,
action_tensor], axis=1)
q_tensor = self.critic_evaluate_net(input_tensor)
loss_tensor = -tf.reduce_mean(q_tensor)
grad_tensors = tape.gradient(loss_tensor,
self.actor_evaluate_net.variables)
self.actor_evaluate_net.optimizer.apply_gradients(zip(
grad_tensors, self.actor_evaluate_net.variables))
# 训练评论者网络
next_actions = self.actor_target_net.predict(
next_observations)
observation_actions = np.hstack([observations, actions])
next_observation_actions = np.hstack(
[next_observations, next_actions])
next_qs = self.critic_target_net.predict(
next_observation_actions)[:, 0]
targets = rewards + self.gamma * next_qs * (1. - dones)
self.critic_evaluate_net.fit(observation_actions, targets,
verbose=0)
self.update_target_net(self.actor_target_net,
self.actor_evaluate_net, self.net_learning_rate)
self.update_target_net(self.critic_target_net,
self.critic_evaluate_net, self.net_learning_rate)
def play_qlearning(env, agent, train=False, render=False):
episode_reward = 0
observation = env.reset()
while True:
if render:
env.render()
action = agent.decide(observation)
next_observation, reward, done, _ = env.step(action)
episode_reward += reward
if train:
agent.learn(observation, action, reward, next_observation, done)
if done:
break
observation = next_observation
return episode_reward
actor_kwargs = {'hidden_sizes' : [32, 64], 'learning_rate' : 0.0001}
critic_kwargs = {'hidden_sizes' : [64, 128], 'learning_rate' : 0.001}
agent = DDPGAgent(env, actor_kwargs=actor_kwargs,
critic_kwargs=critic_kwargs)
# 训练
episodes = 50
episode_rewards = []
chart = Chart()
for episode in range(episodes):
episode_reward = play_qlearning(env, agent, train=True)
episode_rewards.append(episode_reward)
chart.plot(episode_rewards)
# 测试
agent.explore = False # 取消探索
episode_rewards = [play_qlearning(env, agent) for _ in range(100)]
print('平均回合奖励 = {} / {} = {}'.format(sum(episode_rewards),
len(episode_rewards), np.mean(episode_rewards)))
import pickle
import datetime as dt
# if True: # save
judge = False
if judge: # load
timestamp = dt.datetime.now().strftime('%Y%m%dT%H%M%S%f')
filepath = 'DDPG-' + timestamp + '.pkl'
d = {'env' : env, 'np.random.state': np.random.get_state()}
with open(filepath, 'wb') as f:
pickle.dump(d, f)
else:
filepath = 'DDPG-20200512T224622297082.pkl'
with open(filepath, 'rb') as f:
d = pickle.load(f)
env = d['env']
np.random.set_state(d['np.random.state'])
print(filepath)
class TD3Agent(DDPGAgent):
def __init__(self, env, actor_kwargs, critic_kwargs, replayer_capacity=20000, replayer_initial_transitions=3000,
gamma=0.99, batches=1, batch_size=64, net_learning_rate=0.005, noise_scale=0.1, explore=True):
super().__init__(env, actor_kwargs, critic_kwargs, replayer_capacity, replayer_initial_transitions, gamma,
batches, batch_size, net_learning_rate, noise_scale, explore)
observation_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
observation_action_dim = observation_dim + action_dim
self.action_low = env.action_space.low
self.action_high = env.action_space.high
self.gamma = gamma
self.net_learning_rate = net_learning_rate
self.explore = explore
self.batches = batches
self.batch_size = batch_size
self.replayer = DQNReplayer(replayer_capacity)
self.replayer_initial_transitions = replayer_initial_transitions
self.noise = OrnsteinUhlenbeckProcess(size=(action_dim,),
sigma=noise_scale)
self.noise.reset()
self.actor_evaluate_net = self.build_network(
input_size=observation_dim, **actor_kwargs)
self.actor_target_net = self.build_network(
input_size=observation_dim, **actor_kwargs)
self.critic0_evaluate_net = self.build_network(
input_size=observation_action_dim, **critic_kwargs)
self.critic0_target_net = self.build_network(
input_size=observation_action_dim, **critic_kwargs)
self.critic1_evaluate_net = self.build_network(
input_size=observation_action_dim, **critic_kwargs)
self.critic1_target_net = self.build_network(
input_size=observation_action_dim, **critic_kwargs)
self.update_target_net(self.actor_target_net,
self.actor_evaluate_net)
self.update_target_net(self.critic0_target_net,
self.critic0_evaluate_net)
self.update_target_net(self.critic1_target_net,
self.critic1_evaluate_net)
def learn(self, observation, action, reward, next_observation, done):
self.replayer.store(observation, action, reward, next_observation,
done)
if self.replayer.count >= self.replayer_initial_transitions:
if done:
self.noise.reset()
for batch in range(self.batches):
observations, actions, rewards, next_observations, \
dones = self.replayer.sample(self.batch_size)
# 训练执行者
observation_tensor = tf.convert_to_tensor(observations,
dtype=tf.float32)
with tf.GradientTape() as tape:
action_tensor = self.actor_evaluate_net(
observation_tensor)
input_tensor = tf.concat([observation_tensor,
action_tensor], axis=1)
q_tensor = self.critic0_evaluate_net(input_tensor)
loss_tensor = -tf.reduce_mean(q_tensor)
grad_tensors = tape.gradient(loss_tensor,
self.actor_evaluate_net.variables)
self.actor_evaluate_net.optimizer.apply_gradients(zip(
grad_tensors, self.actor_evaluate_net.variables))
# 训练评论者
next_actions = self.actor_target_net.predict(
next_observations)
observation_actions = np.hstack([observations, actions])
next_observation_actions = np.hstack(
[next_observations, next_actions])
next_q0s = self.critic0_target_net.predict(
next_observation_actions)[:, 0]
next_q1s = self.critic1_target_net.predict(
next_observation_actions)[:, 0]
next_qs = np.minimum(next_q0s, next_q1s)
targets = rewards + self.gamma * next_qs * (1. - dones)
self.critic0_evaluate_net.fit(observation_actions,
targets[:, np.newaxis], verbose=0)
self.critic1_evaluate_net.fit(observation_actions,
targets[:, np.newaxis], verbose=0)
self.update_target_net(self.actor_target_net,
self.actor_evaluate_net, self.net_learning_rate)
self.update_target_net(self.critic0_target_net,
self.critic0_evaluate_net, self.net_learning_rate)
self.update_target_net(self.critic1_target_net,
self.critic1_evaluate_net, self.net_learning_rate)
actor_kwargs = {'hidden_sizes' : [32, 64], 'learning_rate' : 0.0001}
critic_kwargs = {'hidden_sizes' : [64, 128], 'learning_rate' : 0.001}
agent = TD3Agent(env, actor_kwargs=actor_kwargs,
critic_kwargs=critic_kwargs)
# 训练
episodes = 50
episode_rewards = []
chart = Chart()
for episode in range(episodes):
episode_reward = play_qlearning(env, agent, train=True)
episode_rewards.append(episode_reward)
chart.plot(episode_rewards)
# 测试
agent.explore = False # 取消探索
episode_rewards = [play_qlearning(env, agent) for _ in range(100)]
print('平均回合奖励 = {} / {} = {}'.format(sum(episode_rewards),
len(episode_rewards), np.mean(episode_rewards)))
env.close() | [
"tensorflow.random.set_seed",
"pickle.dump",
"numpy.random.seed",
"tensorflow.keras.layers.Dense",
"numpy.ones",
"numpy.clip",
"numpy.random.set_state",
"numpy.mean",
"pickle.load",
"numpy.random.normal",
"tensorflow.keras.Sequential",
"tensorflow.concat",
"numpy.random.choice",
"datetime.... | [((19, 36), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (33, 36), True, 'import numpy as np\n'), ((125, 146), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(0)'], {}), '(0)\n', (143, 146), True, 'import tensorflow as tf\n'), ((484, 507), 'gym.make', 'gym.make', (['"""Pendulum-v0"""'], {}), "('Pendulum-v0')\n", (492, 507), False, 'import gym\n'), ((8972, 9013), 'numpy.random.set_state', 'np.random.set_state', (["d['np.random.state']"], {}), "(d['np.random.state'])\n", (8991, 9013), True, 'import numpy as np\n'), ((242, 260), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (254, 260), True, 'import matplotlib.pyplot as plt\n'), ((1077, 1116), 'numpy.random.choice', 'np.random.choice', (['self.count'], {'size': 'size'}), '(self.count, size=size)\n', (1093, 1116), True, 'import numpy as np\n'), ((1483, 1515), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'self.size'}), '(size=self.size)\n', (1499, 1515), True, 'import numpy as np\n'), ((4035, 4053), 'tensorflow.keras.Sequential', 'keras.Sequential', ([], {}), '()\n', (4051, 4053), False, 'from tensorflow import keras\n'), ((4466, 4499), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', (['learning_rate'], {}), '(learning_rate)\n', (4484, 4499), True, 'import tensorflow as tf\n'), ((8486, 8510), 'numpy.mean', 'np.mean', (['episode_rewards'], {}), '(episode_rewards)\n', (8493, 8510), True, 'import numpy as np\n'), ((8747, 8768), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (8766, 8768), True, 'import numpy as np\n'), ((8814, 8831), 'pickle.dump', 'pickle.dump', (['d', 'f'], {}), '(d, f)\n', (8825, 8831), False, 'import pickle\n'), ((8934, 8948), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8945, 8948), False, 'import pickle\n'), ((14668, 14692), 'numpy.mean', 'np.mean', (['episode_rewards'], {}), '(episode_rewards)\n', (14675, 14692), True, 'import numpy as np\n'), ((1133, 1174), 'numpy.stack', 'np.stack', (['self.memory.loc[indices, field]'], {}), '(self.memory.loc[indices, field])\n', (1141, 1174), True, 'import numpy as np\n'), ((1704, 1722), 'numpy.ones', 'np.ones', (['self.size'], {}), '(self.size)\n', (1711, 1722), True, 'import numpy as np\n'), ((4340, 4407), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': 'output_size', 'activation': 'output_activation'}), '(units=output_size, activation=output_activation)\n', (4358, 4407), False, 'from tensorflow import keras\n'), ((4733, 4785), 'numpy.random.uniform', 'np.random.uniform', (['self.action_low', 'self.action_high'], {}), '(self.action_low, self.action_high)\n', (4750, 4785), True, 'import numpy as np\n'), ((4956, 5014), 'numpy.clip', 'np.clip', (['(action + noise)', 'self.action_low', 'self.action_high'], {}), '(action + noise, self.action_low, self.action_high)\n', (4963, 5014), True, 'import numpy as np\n'), ((8616, 8633), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (8631, 8633), True, 'import datetime as dt\n'), ((4209, 4279), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': 'hidden_size', 'activation': 'activation'}), '(units=hidden_size, activation=activation, **kwargs)\n', (4227, 4279), False, 'from tensorflow import keras\n'), ((5604, 5656), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['observations'], {'dtype': 'tf.float32'}), '(observations, dtype=tf.float32)\n', (5624, 5656), True, 'import tensorflow as tf\n'), ((6579, 6613), 'numpy.hstack', 'np.hstack', (['[observations, actions]'], {}), '([observations, actions])\n', (6588, 6613), True, 'import numpy as np\n'), ((6657, 6701), 'numpy.hstack', 'np.hstack', (['[next_observations, next_actions]'], {}), '([next_observations, next_actions])\n', (6666, 6701), True, 'import numpy as np\n'), ((11801, 11853), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['observations'], {'dtype': 'tf.float32'}), '(observations, dtype=tf.float32)\n', (11821, 11853), True, 'import tensorflow as tf\n'), ((12775, 12809), 'numpy.hstack', 'np.hstack', (['[observations, actions]'], {}), '([observations, actions])\n', (12784, 12809), True, 'import numpy as np\n'), ((12853, 12897), 'numpy.hstack', 'np.hstack', (['[next_observations, next_actions]'], {}), '([next_observations, next_actions])\n', (12862, 12897), True, 'import numpy as np\n'), ((13169, 13199), 'numpy.minimum', 'np.minimum', (['next_q0s', 'next_q1s'], {}), '(next_q0s, next_q1s)\n', (13179, 13199), True, 'import numpy as np\n'), ((1611, 1627), 'numpy.sqrt', 'np.sqrt', (['self.dt'], {}), '(self.dt)\n', (1618, 1627), True, 'import numpy as np\n'), ((5736, 5753), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (5751, 5753), True, 'import tensorflow as tf\n'), ((5903, 5957), 'tensorflow.concat', 'tf.concat', (['[observation_tensor, action_tensor]'], {'axis': '(1)'}), '([observation_tensor, action_tensor], axis=1)\n', (5912, 5957), True, 'import tensorflow as tf\n'), ((11933, 11950), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (11948, 11950), True, 'import tensorflow as tf\n'), ((12100, 12154), 'tensorflow.concat', 'tf.concat', (['[observation_tensor, action_tensor]'], {'axis': '(1)'}), '([observation_tensor, action_tensor], axis=1)\n', (12109, 12154), True, 'import tensorflow as tf\n'), ((6109, 6133), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['q_tensor'], {}), '(q_tensor)\n', (6123, 6133), True, 'import tensorflow as tf\n'), ((12307, 12331), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['q_tensor'], {}), '(q_tensor)\n', (12321, 12331), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
"""Dialog Policy Learning Module."""
from typing import List
import numpy as np
from .keras_dpl import get_model
from ..common.system_action import SystemAction
from ..common.dialog_state import DialogState
from ..common.component import Component
class DialogPolicyLearning(Component):
"""DialogPolicyLearning Module."""
def __init__(self):
"""Init DPL variables."""
self.clf = None
def forward(self,
history: List[DialogState]) -> SystemAction:
"""DPL Forward."""
x = np.array([
s.vec
for s in history
]) # .flatten()
pred = self.clf.predict(np.array([x])).flatten()
pred = pred[0]
system_action = SystemAction(history[-1].index_sys_intent[pred])
print('new system_action', str(system_action))
return system_action
def fit(self, x, y):
"""Fit the DPL."""
clf = get_model(int(y.shape[-1]))
clf.fit(x, y)
print(f'DPL FIT {clf.score(x, y)}')
self.clf = clf
| [
"numpy.array"
] | [((559, 593), 'numpy.array', 'np.array', (['[s.vec for s in history]'], {}), '([s.vec for s in history])\n', (567, 593), True, 'import numpy as np\n'), ((674, 687), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (682, 687), True, 'import numpy as np\n')] |
import argparse
import os
import os.path
import sys
sys.path.append('../')
from make_representations.cpe_apply import CPE
from utility.file_utility import FileUtility
from multiprocessing import Pool
import tqdm
from sklearn.feature_extraction.text import TfidfVectorizer
import numpy as np
from nltk import FreqDist
from proteinseq_util.biophysical import ProtSeqProp
from utility.math_utility import normalize_mat
import scipy.stats as st
from chi2analysis.chi2analysis import Chi2Analysis
from utility.math_utility import get_sym_kl_rows
from clustering.hierarchical import HierarchicalClutering
#from proteinseq_util.motif_tree_visualization import VisualizeTreeOfMotifs
class DiMotif(object):
def __init__(self, pos_fasta, neg_fasta, output_path, segmentation_schemes=10, topN=100):
'''
'''
if not isinstance(pos_fasta, str):
self.pos=pos_fasta
elif pos_fasta.split('.')[-1]=='txt':
self.pos=FileUtility.load_list(pos_fasta)
elif pos_fasta.split('.')[-1]=='fasta':
self.pos=FileUtility.read_fasta_sequences(pos_fasta)
if not isinstance(neg_fasta, str):
self.neg=neg_fasta
elif neg_fasta.split('.')[-1]=='txt':
self.neg=FileUtility.load_list(neg_fasta)
elif neg_fasta.split('.')[-1]=='fasta':
self.neg=FileUtility.read_fasta_sequences(neg_fasta)
self.seqs=[seq.lower() for seq in self.pos+self.neg]
self.labels=[1]*len(self.pos)+[0]*len(self.neg)
self.segmentation_schemes=segmentation_schemes
self.load_alpha_distribution()
self.prepare_segmentations()
print (output_path)
FileUtility.ensure_dir(output_path)
self.output_path=output_path
self.motif_extraction(topN)
def load_alpha_distribution(self):
swiss_size_change=FileUtility.load_obj('data_config/swiss_1000_samples.pickle')
all_samples=[]
for i in tqdm.tqdm(range(0,1000)):
sample=[]
for vocab in np.arange(10000,1000000,10000):
sample.append(swiss_size_change[vocab][i])
all_samples.append(-np.diff(sample))
sample_mat=np.mean(normalize_mat(all_samples),axis=0)
sample_mat_std=np.std(normalize_mat(all_samples),axis=0)
self.alpha_param = st.alpha.fit(sample_mat)
def get_alpha_samples(self):
r = st.alpha.rvs(self.alpha_param[0], size=self.segmentation_schemes)
idx=np.array(np.round(10000+(r*10000)),dtype=np.int32).tolist()
idx.sort()
return idx
def prepare_segmentations(self):
segmented_seqs=[]
vocab_sizes=self.get_alpha_samples()
for i, vocab in tqdm.tqdm(enumerate(vocab_sizes)):
f=open('data_config/swissprot_ppe','r')
CPE_Applier=CPE(f,separator='', merge_size=vocab)
for idx, seq in enumerate(self.seqs):
if i ==0:
segmented_seqs.append([CPE_Applier.segment(seq)])
else:
segmented_seqs[idx]+=[CPE_Applier.segment(seq)]
self.extended_sequences=[' '.join(l) for l in segmented_seqs]
self.possible_segmentations=['@@@'.join(l) for l in segmented_seqs]
def motif_extraction(self, topn=100):
cpe_vectorizer = TfidfVectorizer(use_idf=False, analyzer='word',
norm=None, stop_words=[], lowercase=True, binary=False, tokenizer=str.split)
tf_vec=cpe_vectorizer.fit_transform(self.extended_sequences)
vocab=cpe_vectorizer.get_feature_names()
CH=Chi2Analysis(tf_vec,self.labels,vocab)
vocab_binary=[x[0] for x in CH.extract_features_fdr(self.output_path+'/motifs.txt', N=topn, alpha=5e-2, direction=True, allow_subseq=True, binarization=True, remove_redundant_markers=False) if x[1]>0]
vocab_binary=vocab_binary[0:min(100,len(vocab_binary))]
idxs=[vocab.index(v) for v in vocab_binary]
pos_matrix=tf_vec.toarray()[0:len(self.pos),idxs]
DIST=get_sym_kl_rows(pos_matrix.T)
FileUtility.save_obj(self.output_path+'/sym_KL', DIST)
#HC=HierarchicalClutering(DIST,vocab_binary)
self.motifs=vocab_binary
#self.tree=HC.nwk
#FileUtility.save_list(self.output_path+'/motif_tree.txt', [HC.nwk])
def checkArgs(args):
'''
This function checks the input arguments and returns the errors (if exist) otherwise reads the parameters
'''
# keep all errors
err = "";
# Using the argument parser in case of -h or wrong usage the correct argument usage
# will be prompted
parser = argparse.ArgumentParser()
def file_choices(choices,fname):
ext = os.path.splitext(fname)[1][1:]
if ext not in choices:
parser.error("file doesn't end with one of {}".format(choices))
return fname
## to do : chi2 print
# positive file #################################################################################################
parser.add_argument('--pos', action='store', dest='pos_file', type=lambda s:file_choices(("txt","fasta"),s),
help='positive fasta or txt sequence file')
# negative file #######################################################################################################
parser.add_argument('--neg', action='store', dest='neg_file', type=lambda s:file_choices(("txt","fasta"),s),
help='negative fasta or txt sequence file')
# output directory #################################################################################################
parser.add_argument('--outdir', action='store', dest='output_dir', default=False, type=str,
help="directory for storing the output files, if doesn't exist will be created.")
# to override the previous files or to continue ####################################################################
parser.add_argument('--topn', action='store', dest='topn',default=100, type=int,
help='How many motifs to extract if possible?')
# to override the previous files or to continue ####################################################################
parser.add_argument('--segs', action='store', dest='segs',default=10, type=int,
help='How many segmentation samples for each seq')
parsedArgs = parser.parse_args()
if (not os.access(parsedArgs.pos_file, os.F_OK)):
err = err + "\nError: Permission denied or could not find the positive file!"
return err
if (not os.access(parsedArgs.neg_file, os.F_OK)):
err = err + "\nError: Permission denied or could not find the negative file!"
return err
try:
print('Extract motifs..')
DMF=DiMotif(parsedArgs.pos_file,parsedArgs.neg_file,parsedArgs.output_dir, topN=parsedArgs.topn, segmentation_schemes=parsedArgs.segs)
print('Visualize motifs..')
#VisualizeTreeOfMotifs(DMF.tree, DMF.motifs)
except:
print ('error occured')
if __name__ == '__main__':
err = checkArgs(sys.argv)
if err:
print(err)
exit()
| [
"utility.file_utility.FileUtility.load_obj",
"argparse.ArgumentParser",
"sklearn.feature_extraction.text.TfidfVectorizer",
"utility.math_utility.get_sym_kl_rows",
"numpy.arange",
"numpy.round",
"utility.file_utility.FileUtility.ensure_dir",
"scipy.stats.alpha.rvs",
"sys.path.append",
"utility.file... | [((52, 74), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (67, 74), False, 'import sys\n'), ((4630, 4655), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4653, 4655), False, 'import argparse\n'), ((1676, 1711), 'utility.file_utility.FileUtility.ensure_dir', 'FileUtility.ensure_dir', (['output_path'], {}), '(output_path)\n', (1698, 1711), False, 'from utility.file_utility import FileUtility\n'), ((1851, 1912), 'utility.file_utility.FileUtility.load_obj', 'FileUtility.load_obj', (['"""data_config/swiss_1000_samples.pickle"""'], {}), "('data_config/swiss_1000_samples.pickle')\n", (1871, 1912), False, 'from utility.file_utility import FileUtility\n'), ((2321, 2345), 'scipy.stats.alpha.fit', 'st.alpha.fit', (['sample_mat'], {}), '(sample_mat)\n', (2333, 2345), True, 'import scipy.stats as st\n'), ((2392, 2457), 'scipy.stats.alpha.rvs', 'st.alpha.rvs', (['self.alpha_param[0]'], {'size': 'self.segmentation_schemes'}), '(self.alpha_param[0], size=self.segmentation_schemes)\n', (2404, 2457), True, 'import scipy.stats as st\n'), ((3301, 3429), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'use_idf': '(False)', 'analyzer': '"""word"""', 'norm': 'None', 'stop_words': '[]', 'lowercase': '(True)', 'binary': '(False)', 'tokenizer': 'str.split'}), "(use_idf=False, analyzer='word', norm=None, stop_words=[],\n lowercase=True, binary=False, tokenizer=str.split)\n", (3316, 3429), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((3601, 3641), 'chi2analysis.chi2analysis.Chi2Analysis', 'Chi2Analysis', (['tf_vec', 'self.labels', 'vocab'], {}), '(tf_vec, self.labels, vocab)\n', (3613, 3641), False, 'from chi2analysis.chi2analysis import Chi2Analysis\n'), ((4036, 4065), 'utility.math_utility.get_sym_kl_rows', 'get_sym_kl_rows', (['pos_matrix.T'], {}), '(pos_matrix.T)\n', (4051, 4065), False, 'from utility.math_utility import get_sym_kl_rows\n'), ((4074, 4130), 'utility.file_utility.FileUtility.save_obj', 'FileUtility.save_obj', (["(self.output_path + '/sym_KL')", 'DIST'], {}), "(self.output_path + '/sym_KL', DIST)\n", (4094, 4130), False, 'from utility.file_utility import FileUtility\n'), ((6434, 6473), 'os.access', 'os.access', (['parsedArgs.pos_file', 'os.F_OK'], {}), '(parsedArgs.pos_file, os.F_OK)\n', (6443, 6473), False, 'import os\n'), ((6593, 6632), 'os.access', 'os.access', (['parsedArgs.neg_file', 'os.F_OK'], {}), '(parsedArgs.neg_file, os.F_OK)\n', (6602, 6632), False, 'import os\n'), ((2026, 2058), 'numpy.arange', 'np.arange', (['(10000)', '(1000000)', '(10000)'], {}), '(10000, 1000000, 10000)\n', (2035, 2058), True, 'import numpy as np\n'), ((2194, 2220), 'utility.math_utility.normalize_mat', 'normalize_mat', (['all_samples'], {}), '(all_samples)\n', (2207, 2220), False, 'from utility.math_utility import normalize_mat\n'), ((2259, 2285), 'utility.math_utility.normalize_mat', 'normalize_mat', (['all_samples'], {}), '(all_samples)\n', (2272, 2285), False, 'from utility.math_utility import normalize_mat\n'), ((2812, 2850), 'make_representations.cpe_apply.CPE', 'CPE', (['f'], {'separator': '""""""', 'merge_size': 'vocab'}), "(f, separator='', merge_size=vocab)\n", (2815, 2850), False, 'from make_representations.cpe_apply import CPE\n'), ((959, 991), 'utility.file_utility.FileUtility.load_list', 'FileUtility.load_list', (['pos_fasta'], {}), '(pos_fasta)\n', (980, 991), False, 'from utility.file_utility import FileUtility\n'), ((1246, 1278), 'utility.file_utility.FileUtility.load_list', 'FileUtility.load_list', (['neg_fasta'], {}), '(neg_fasta)\n', (1267, 1278), False, 'from utility.file_utility import FileUtility\n'), ((4708, 4731), 'os.path.splitext', 'os.path.splitext', (['fname'], {}), '(fname)\n', (4724, 4731), False, 'import os\n'), ((1061, 1104), 'utility.file_utility.FileUtility.read_fasta_sequences', 'FileUtility.read_fasta_sequences', (['pos_fasta'], {}), '(pos_fasta)\n', (1093, 1104), False, 'from utility.file_utility import FileUtility\n'), ((1348, 1391), 'utility.file_utility.FileUtility.read_fasta_sequences', 'FileUtility.read_fasta_sequences', (['neg_fasta'], {}), '(neg_fasta)\n', (1380, 1391), False, 'from utility.file_utility import FileUtility\n'), ((2149, 2164), 'numpy.diff', 'np.diff', (['sample'], {}), '(sample)\n', (2156, 2164), True, 'import numpy as np\n'), ((2479, 2506), 'numpy.round', 'np.round', (['(10000 + r * 10000)'], {}), '(10000 + r * 10000)\n', (2487, 2506), True, 'import numpy as np\n')] |
import tensorflow as tf
import keras
import argparse
import keras.backend as K
from keras.models import load_model
import cv2
import numpy as np
def print_pred(preds,classes):
preds = preds.ravel()
y = len(classes)
x=""
for i in range(y):
preds_rounded = np.around(preds,decimals=4)
x = x+classes[i]+": "+str(preds_rounded[i])+"%"
if i!=(y-1):
x = x+", "
else:
None
print(x)
def image_preprocessing(img):
img = cv2.imread(img)
img = cv2.resize(img,(224,224))
img = np.reshape(img,[1,224,224,3])
img = 1.0*img/255
return img
def inference(img,weights,dataset):
if dataset=='Srinivasan2014':
classes=['AMD', 'DME','NORMAL']
else:
classes = ['CNV', 'DME','DRUSEN','NORMAL']
processsed_img = image_preprocessing(img)
K.clear_session()
model = load_model(weights)
preds = model.predict(processsed_img,batch_size=None,steps=1)
print_pred(preds,classes)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--imgpath', type=str, required=True, help='path/to/image')
parser.add_argument('--weights', type=str, required=True, help='Weights for prediction')
parser.add_argument('--dataset', type=str, required=True, help='Choosing between 2 OCT datasets', choices=['Srinivasan2014','Kermany2018'])
args = parser.parse_args()
inference(args.imgpath, args.weights, args.dataset)
| [
"keras.models.load_model",
"cv2.resize",
"argparse.ArgumentParser",
"cv2.imread",
"numpy.around",
"numpy.reshape",
"keras.backend.clear_session"
] | [((504, 519), 'cv2.imread', 'cv2.imread', (['img'], {}), '(img)\n', (514, 519), False, 'import cv2\n'), ((530, 557), 'cv2.resize', 'cv2.resize', (['img', '(224, 224)'], {}), '(img, (224, 224))\n', (540, 557), False, 'import cv2\n'), ((566, 599), 'numpy.reshape', 'np.reshape', (['img', '[1, 224, 224, 3]'], {}), '(img, [1, 224, 224, 3])\n', (576, 599), True, 'import numpy as np\n'), ((858, 875), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (873, 875), True, 'import keras.backend as K\n'), ((888, 907), 'keras.models.load_model', 'load_model', (['weights'], {}), '(weights)\n', (898, 907), False, 'from keras.models import load_model\n'), ((1072, 1097), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1095, 1097), False, 'import argparse\n'), ((282, 310), 'numpy.around', 'np.around', (['preds'], {'decimals': '(4)'}), '(preds, decimals=4)\n', (291, 310), True, 'import numpy as np\n')] |
from sklearn.manifold import TSNE
from sklearn.manifold import MDS
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import OPTICS
from sklearn.cluster import DBSCAN
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import AffinityPropagation
from sklearn.cluster import SpectralClustering
from sklearn import metrics
import os, sys, subprocess
# Global variables (what you really need here is java_file, data_path, and source_path)
task = 1
# The name of the source file
java_file = ['BankUserConcurrentGet.java', 'BankUserConcurrentPut.java', 'BankUserMultiThreaded.java', 'BankUserStrongConsistency.java'][task-1]
# See get_source_code function of ClusterPlotter class for data_path, source_path, and cmu_cs
# The path to the folder containing different students' source files
data_path = './S20_3.3_OPE_Grading_Anon/3.3_OPE_Submissions-anonymized/'
# The path to the source file folder for each student
source_path = '/src/main/java/Project_OMP/BankUserSystem/'
# You may not need this. This is useful when the names of folders for different students share cmu_cs string.
cmu_cs = '@andrew.cmu.edu_data-consistency-ope_consistency-ope-task_'
# Choose tsne or mds for dimension reduction (lower-case)
embedding = 'mds'
'''
java_file = ['ProfileServlet.java', 'FollowerServlet.java', 'HomepageServlet.java', 'TimelineServlet.java'][task-1]
data_path = './F19_Project_3_2/task' + str(task) + '/'
cmu_cs = '@andrew.cmu.edu_social-network_p32-task' + str(task) + '_'
'''
class ClusterPlotter:
def __init__(self, features, clusters, studentID, timestamp, algo_name):
self.features = features
self.clusters = clusters
self.studentID = studentID
self.timestamp = timestamp
self.algo_name = algo_name
self.k=np.unique(clusters).shape[0]
def plot_all(self):
x = self.features[:,0]
y = self.features[:,1]
fig = plt.figure()
#fig.suptitle('All clusters together with ' + str(self.clusters.shape[0]) + ' points total')
fig.suptitle('Task ' + str(task) + ' Solutions: ' + self.algo_name)
ax = fig.add_subplot(1,1,1)
#color_points = np.zeros((x.shape[0], 4))
#for i in range (x.shape[0]):
# color_points[i,:] = self.colors[self.clusters[i]]
ax.scatter(x, y, c=self.clusters, picker=True)
#ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
fig.canvas.mpl_connect('pick_event', lambda e: self.onpick(e, 0))
def get_source_code(self, i, offset):
file_path = data_path + str(self.studentID[i]) + cmu_cs + str(self.timestamp[i]) + source_path + java_file
print('You opened a submission by', str(self.studentID[i]), 'at', str(self.timestamp[i]))
if sys.platform == "win32":
os.startfile(file_path)
else:
opener ="open" if sys.platform == "darwin" else "xdg-open"
subprocess.call([opener, file_path])
#with open(file_path.strip(), 'r') as submission_file:
# return submission_file.read()
def onpick(self, event, offset):
ind = event.ind
print('-------------------')
for i in ind:
self.get_source_code(i, offset)
def show(self):
plt.savefig(data_path + '/clusters/task' + str(task) + '/' + self.algo_name + '.png')
plt.show()
# Embed to 2D
inputCSV = pd.read_csv(data_path + 'input_task{}.csv'.format(task))
data = pd.read_csv(data_path + 'cluster_info_task{}.csv'.format(task))
data['ClusterID'] = data['ClusterID'].fillna(-1)
clusterID = data['ClusterID']
distanceMatrix = data.drop(columns=['StudentID', 'Timestamp', 'ClusterID'])
if embedding == 'tsne':
reduced = TSNE(n_components=2, metric='precomputed', learning_rate=700, perplexity=40).fit_transform(distanceMatrix)
elif embedding == 'mds':
reduced = MDS(n_components=2, dissimilarity='precomputed', metric=True).fit_transform(distanceMatrix)
else:
raise ValueError("Embedding must be either mds or tsne (lower-case)")
# Cluster solutions
cluster_methods = ['optics_xi', 'optics_dbscan', 'dbscan', 'agglomerative_clustering', 'affinity_propagation', 'spectral_clustering']
clusterID_xi = OPTICS(metric='precomputed', max_eps=0.16, xi=0.05, algorithm='brute', min_samples=3).fit_predict(distanceMatrix)
clusterID_op = OPTICS(metric='precomputed', max_eps=0.16, cluster_method='dbscan', min_samples=7).fit_predict(distanceMatrix)
clusterID_db = DBSCAN(metric='precomputed', eps=0.1).fit_predict(distanceMatrix)
clusterID_ag = AgglomerativeClustering(affinity='precomputed', linkage='average', n_clusters=2).fit_predict(distanceMatrix)
clusterID_af = AffinityPropagation(affinity='precomputed', damping=0.7).fit_predict(1 - distanceMatrix)
clusterID_sp = SpectralClustering(affinity='precomputed', n_clusters=2).fit_predict(1 - distanceMatrix)
clusterIDs = [clusterID_xi, clusterID_op, clusterID_db, clusterID_ag, clusterID_af, clusterID_sp]
# Evaluation
for clusterID in clusterIDs:
try:
print(metrics.silhouette_score(distanceMatrix, clusterID, metric='precomputed'))
except ValueError as identifier:
print("Number of labels is 1. Valid values are 2 to n_samples - 1 (inclusive)")
# Visualize (you may want to change the suffix of the save images)
for i in range(len(cluster_methods)):
c = ClusterPlotter(reduced, clusterIDs[i], data['StudentID'], data['Timestamp'], '{}_mds'.format(cluster_methods[i]))
c.plot_all()
c.show()
# Update clusters in csv
'''
data['ClusterID'] = clusterID_sp
merged = data[['StudentID', 'Timestamp', 'ClusterID']].rename(columns={"StudentID": "Source_file_id", "Timestamp": "Project_id", "ClusterID": "Cluster_Id"})
inputCSV = inputCSV.drop(columns=['Cluster_id'])
inputCSV = inputCSV.merge(merged, how='left', on=["Source_file_id", "Project_id"])
inputCSV.to_csv(data_path + 'input.csv')
''' | [
"os.startfile",
"matplotlib.pyplot.show",
"sklearn.cluster.AffinityPropagation",
"sklearn.manifold.TSNE",
"sklearn.cluster.SpectralClustering",
"numpy.unique",
"sklearn.metrics.silhouette_score",
"matplotlib.pyplot.figure",
"subprocess.call",
"sklearn.cluster.OPTICS",
"sklearn.cluster.Agglomerat... | [((1957, 1969), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1967, 1969), True, 'import matplotlib.pyplot as plt\n'), ((3384, 3394), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3392, 3394), True, 'import matplotlib.pyplot as plt\n'), ((4230, 4319), 'sklearn.cluster.OPTICS', 'OPTICS', ([], {'metric': '"""precomputed"""', 'max_eps': '(0.16)', 'xi': '(0.05)', 'algorithm': '"""brute"""', 'min_samples': '(3)'}), "(metric='precomputed', max_eps=0.16, xi=0.05, algorithm='brute',\n min_samples=3)\n", (4236, 4319), False, 'from sklearn.cluster import OPTICS\n'), ((4359, 4445), 'sklearn.cluster.OPTICS', 'OPTICS', ([], {'metric': '"""precomputed"""', 'max_eps': '(0.16)', 'cluster_method': '"""dbscan"""', 'min_samples': '(7)'}), "(metric='precomputed', max_eps=0.16, cluster_method='dbscan',\n min_samples=7)\n", (4365, 4445), False, 'from sklearn.cluster import OPTICS\n'), ((4485, 4522), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'metric': '"""precomputed"""', 'eps': '(0.1)'}), "(metric='precomputed', eps=0.1)\n", (4491, 4522), False, 'from sklearn.cluster import DBSCAN\n'), ((4566, 4651), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'affinity': '"""precomputed"""', 'linkage': '"""average"""', 'n_clusters': '(2)'}), "(affinity='precomputed', linkage='average', n_clusters=2\n )\n", (4589, 4651), False, 'from sklearn.cluster import AgglomerativeClustering\n'), ((4690, 4746), 'sklearn.cluster.AffinityPropagation', 'AffinityPropagation', ([], {'affinity': '"""precomputed"""', 'damping': '(0.7)'}), "(affinity='precomputed', damping=0.7)\n", (4709, 4746), False, 'from sklearn.cluster import AffinityPropagation\n'), ((4794, 4850), 'sklearn.cluster.SpectralClustering', 'SpectralClustering', ([], {'affinity': '"""precomputed"""', 'n_clusters': '(2)'}), "(affinity='precomputed', n_clusters=2)\n", (4812, 4850), False, 'from sklearn.cluster import SpectralClustering\n'), ((2832, 2855), 'os.startfile', 'os.startfile', (['file_path'], {}), '(file_path)\n', (2844, 2855), False, 'import os, sys, subprocess\n'), ((2953, 2989), 'subprocess.call', 'subprocess.call', (['[opener, file_path]'], {}), '([opener, file_path])\n', (2968, 2989), False, 'import os, sys, subprocess\n'), ((3742, 3818), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'metric': '"""precomputed"""', 'learning_rate': '(700)', 'perplexity': '(40)'}), "(n_components=2, metric='precomputed', learning_rate=700, perplexity=40)\n", (3746, 3818), False, 'from sklearn.manifold import TSNE\n'), ((5047, 5120), 'sklearn.metrics.silhouette_score', 'metrics.silhouette_score', (['distanceMatrix', 'clusterID'], {'metric': '"""precomputed"""'}), "(distanceMatrix, clusterID, metric='precomputed')\n", (5071, 5120), False, 'from sklearn import metrics\n'), ((1826, 1845), 'numpy.unique', 'np.unique', (['clusters'], {}), '(clusters)\n', (1835, 1845), True, 'import numpy as np\n'), ((3888, 3949), 'sklearn.manifold.MDS', 'MDS', ([], {'n_components': '(2)', 'dissimilarity': '"""precomputed"""', 'metric': '(True)'}), "(n_components=2, dissimilarity='precomputed', metric=True)\n", (3891, 3949), False, 'from sklearn.manifold import MDS\n')] |
import numpy as np
import pandas as pd
from Task import MyOneHotEncoder, SimpleCounterEncoder, FoldCounters, weights
def test_imports():
with open('Task.py', 'r') as file:
lines = ' '.join(file.readlines())
assert 'import numpy' in lines
assert lines.count('import') == 1
assert 'sklearn' not in lines
assert 'get_dummies' not in lines
def test_weights_small():
np.random.seed(1)
x = np.array([1, 1, 1, 1, 0, 4, 1, 0, 0, 3, 2, 1, 0, 3, 1, 1, 3, 4, 0, 1, 3, 4, 2, 4, 0, 3, 1, 2, 0, 4])
y = np.array([1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0])
w = weights(x, y)
ans = [0.5714285714285714, 0.4, 0.6666666666666666, 1.0, 0.2]
assert len(w) == 5
assert np.allclose(w, ans, atol=1e-8)
assert type(w) == np.ndarray
def test_weights_big():
np.random.seed(1)
x = np.random.choice([0, 1, 2, 3, 4, 5], size=(300,))
y = np.random.choice([0, 1], size=(300,))
w = weights(x, y)
ans = [0.38596491228070173, 0.5384615384615384, 0.4523809523809524, 0.3409090909090909, 0.44642857142857145,
0.42857142857142855]
assert len(w) == 6
assert np.allclose(w, ans, atol=1e-8)
assert type(w) == np.ndarray
| [
"numpy.random.seed",
"Task.weights",
"numpy.allclose",
"numpy.array",
"numpy.random.choice"
] | [((415, 432), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (429, 432), True, 'import numpy as np\n'), ((441, 545), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 0, 4, 1, 0, 0, 3, 2, 1, 0, 3, 1, 1, 3, 4, 0, 1, 3, 4, 2, 4, 0,\n 3, 1, 2, 0, 4]'], {}), '([1, 1, 1, 1, 0, 4, 1, 0, 0, 3, 2, 1, 0, 3, 1, 1, 3, 4, 0, 1, 3, 4,\n 2, 4, 0, 3, 1, 2, 0, 4])\n', (449, 545), True, 'import numpy as np\n'), ((550, 654), 'numpy.array', 'np.array', (['[1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1,\n 1, 1, 1, 1, 0]'], {}), '([1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0,\n 0, 0, 1, 1, 1, 1, 1, 0])\n', (558, 654), True, 'import numpy as np\n'), ((659, 672), 'Task.weights', 'weights', (['x', 'y'], {}), '(x, y)\n', (666, 672), False, 'from Task import MyOneHotEncoder, SimpleCounterEncoder, FoldCounters, weights\n'), ((773, 804), 'numpy.allclose', 'np.allclose', (['w', 'ans'], {'atol': '(1e-08)'}), '(w, ans, atol=1e-08)\n', (784, 804), True, 'import numpy as np\n'), ((867, 884), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (881, 884), True, 'import numpy as np\n'), ((893, 942), 'numpy.random.choice', 'np.random.choice', (['[0, 1, 2, 3, 4, 5]'], {'size': '(300,)'}), '([0, 1, 2, 3, 4, 5], size=(300,))\n', (909, 942), True, 'import numpy as np\n'), ((951, 988), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'size': '(300,)'}), '([0, 1], size=(300,))\n', (967, 988), True, 'import numpy as np\n'), ((997, 1010), 'Task.weights', 'weights', (['x', 'y'], {}), '(x, y)\n', (1004, 1010), False, 'from Task import MyOneHotEncoder, SimpleCounterEncoder, FoldCounters, weights\n'), ((1190, 1221), 'numpy.allclose', 'np.allclose', (['w', 'ans'], {'atol': '(1e-08)'}), '(w, ans, atol=1e-08)\n', (1201, 1221), True, 'import numpy as np\n')] |
import datetime
import numpy as np
import os
import sys
# LIBRARY GLOBAL MODS
CELLTYPES = os.path.dirname(os.path.dirname(__file__))
INPUT_FOLDER = CELLTYPES + os.sep + "input"
RUNS_FOLDER = CELLTYPES + os.sep + "runs" # store timestamped runs here
sys.path.append(CELLTYPES)
print("Appended to sys path", CELLTYPES) # TODO can maybe move this too simetup fn call and call once somewhere else...
def run_subdir_setup(run_subfolder=None, timedir_override=None):
current_time = datetime.datetime.now().strftime("%Y-%m-%d_%I.%M.%S%p")
experiment_dir = RUNS_FOLDER
if timedir_override is not None:
time_folder = timedir_override
else:
time_folder = current_time
if run_subfolder is None:
current_run_dir = experiment_dir + os.sep + time_folder
else:
if os.path.isabs(run_subfolder):
current_run_dir = run_subfolder + os.sep + time_folder
else:
current_run_dir = experiment_dir + os.sep + run_subfolder + os.sep + time_folder
# make subfolders in the timestamped run directory:
data_dir = os.path.join(current_run_dir, "data")
plot_data_dir = os.path.join(current_run_dir, "plot_data")
lattice_dir = os.path.join(current_run_dir, "lattice")
plot_lattice_dir = os.path.join(current_run_dir, "plot_lattice")
simsetup_dir = os.path.join(current_run_dir, "simsetup")
states_dir = os.path.join(current_run_dir, "states")
dir_list = [RUNS_FOLDER, current_run_dir, plot_data_dir, data_dir, lattice_dir,
plot_lattice_dir, simsetup_dir, states_dir]
for dirs in dir_list:
if not os.path.exists(dirs):
os.makedirs(dirs)
# io path storage to pass around
io_dict = {'basedir': current_run_dir,
'datadir': data_dir,
'plotdatadir': plot_data_dir,
'latticedir': lattice_dir,
'plotlatticedir': plot_lattice_dir,
'simsetupdir': simsetup_dir,
'statesdir': states_dir,
'runinfo': current_run_dir + os.sep + 'run_info.txt'}
# make minimal run_info settings file with first line as the base output dir
runinfo_append(io_dict, ('basedir', current_run_dir))
return io_dict
def state_write(state, row_vals, col_vals, dataname, rowname, colname, output_dir):
# here row refers to time array and col refers to gene labels (ie. name for ith element of state vector)
datapath = output_dir + os.sep + dataname + ".txt"
rowpath = output_dir + os.sep + dataname + '_' + rowname + ".txt"
colpath = output_dir + os.sep + dataname + '_' + colname + ".txt"
np.savetxt(datapath, np.array(state), delimiter=",", fmt="%d")
np.savetxt(rowpath, np.array(row_vals), delimiter=",")
np.savetxt(colpath, np.array(col_vals), delimiter=",", fmt="%s")
return datapath, rowpath, colpath
def state_read(datapath, rowpath, colpath):
# here row refers to time array and col refers to gene labels (ie. name for ith element of state vector)
state = np.loadtxt(datapath, delimiter=",")
row = np.loadtxt(rowpath, delimiter=",", dtype=float)
col = np.loadtxt(colpath, delimiter=",", dtype=str)
return state, row, col
def runinfo_append(io_dict, info_list, multi=False):
# multi: list of list flag
if multi:
with open(io_dict['runinfo'], 'a') as runinfo:
for line in info_list:
runinfo.write(','.join(str(s) for s in line) + '\n')
else:
with open(io_dict['runinfo'], 'a') as runinfo:
runinfo.write(','.join(str(s) for s in info_list) + '\n')
return
def write_general_arr(X, data_folder, fname, txt=True, compress=False):
"""
Writes general data array (txt, npy, or compressed npz)
"""
if txt:
assert not compress
fpath = data_folder + os.sep + fname + '.txt'
np.savetxt(fpath, X, delimiter=',')
else:
if compress:
fpath = data_folder + os.sep + fname + '.npy'
np.save(fpath, X)
else:
fpath = data_folder + os.sep + fname + '.npz'
np.savez(fpath, a=X)
return fpath
def read_general_arr(fpath, txt=True, compress=False):
"""
Reads general data array (txt, npy, or compressed npz)
"""
if txt:
assert not compress
return np.loadtxt(fpath, delimiter=',')
else:
X = np.load(fpath)
if compress:
return X['a']
else:
return X
| [
"sys.path.append",
"os.path.isabs",
"numpy.load",
"numpy.save",
"os.makedirs",
"os.path.dirname",
"numpy.savetxt",
"os.path.exists",
"datetime.datetime.now",
"numpy.array",
"numpy.loadtxt",
"numpy.savez",
"os.path.join"
] | [((271, 297), 'sys.path.append', 'sys.path.append', (['CELLTYPES'], {}), '(CELLTYPES)\n', (286, 297), False, 'import sys\n'), ((107, 132), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (122, 132), False, 'import os\n'), ((1106, 1143), 'os.path.join', 'os.path.join', (['current_run_dir', '"""data"""'], {}), "(current_run_dir, 'data')\n", (1118, 1143), False, 'import os\n'), ((1164, 1206), 'os.path.join', 'os.path.join', (['current_run_dir', '"""plot_data"""'], {}), "(current_run_dir, 'plot_data')\n", (1176, 1206), False, 'import os\n'), ((1225, 1265), 'os.path.join', 'os.path.join', (['current_run_dir', '"""lattice"""'], {}), "(current_run_dir, 'lattice')\n", (1237, 1265), False, 'import os\n'), ((1289, 1334), 'os.path.join', 'os.path.join', (['current_run_dir', '"""plot_lattice"""'], {}), "(current_run_dir, 'plot_lattice')\n", (1301, 1334), False, 'import os\n'), ((1354, 1395), 'os.path.join', 'os.path.join', (['current_run_dir', '"""simsetup"""'], {}), "(current_run_dir, 'simsetup')\n", (1366, 1395), False, 'import os\n'), ((1413, 1452), 'os.path.join', 'os.path.join', (['current_run_dir', '"""states"""'], {}), "(current_run_dir, 'states')\n", (1425, 1452), False, 'import os\n'), ((3048, 3083), 'numpy.loadtxt', 'np.loadtxt', (['datapath'], {'delimiter': '""","""'}), "(datapath, delimiter=',')\n", (3058, 3083), True, 'import numpy as np\n'), ((3094, 3141), 'numpy.loadtxt', 'np.loadtxt', (['rowpath'], {'delimiter': '""","""', 'dtype': 'float'}), "(rowpath, delimiter=',', dtype=float)\n", (3104, 3141), True, 'import numpy as np\n'), ((3152, 3197), 'numpy.loadtxt', 'np.loadtxt', (['colpath'], {'delimiter': '""","""', 'dtype': 'str'}), "(colpath, delimiter=',', dtype=str)\n", (3162, 3197), True, 'import numpy as np\n'), ((830, 858), 'os.path.isabs', 'os.path.isabs', (['run_subfolder'], {}), '(run_subfolder)\n', (843, 858), False, 'import os\n'), ((2673, 2688), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (2681, 2688), True, 'import numpy as np\n'), ((2739, 2757), 'numpy.array', 'np.array', (['row_vals'], {}), '(row_vals)\n', (2747, 2757), True, 'import numpy as np\n'), ((2798, 2816), 'numpy.array', 'np.array', (['col_vals'], {}), '(col_vals)\n', (2806, 2816), True, 'import numpy as np\n'), ((3882, 3917), 'numpy.savetxt', 'np.savetxt', (['fpath', 'X'], {'delimiter': '""","""'}), "(fpath, X, delimiter=',')\n", (3892, 3917), True, 'import numpy as np\n'), ((4346, 4378), 'numpy.loadtxt', 'np.loadtxt', (['fpath'], {'delimiter': '""","""'}), "(fpath, delimiter=',')\n", (4356, 4378), True, 'import numpy as np\n'), ((4401, 4415), 'numpy.load', 'np.load', (['fpath'], {}), '(fpath)\n', (4408, 4415), True, 'import numpy as np\n'), ((505, 528), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (526, 528), False, 'import datetime\n'), ((1638, 1658), 'os.path.exists', 'os.path.exists', (['dirs'], {}), '(dirs)\n', (1652, 1658), False, 'import os\n'), ((1672, 1689), 'os.makedirs', 'os.makedirs', (['dirs'], {}), '(dirs)\n', (1683, 1689), False, 'import os\n'), ((4019, 4036), 'numpy.save', 'np.save', (['fpath', 'X'], {}), '(fpath, X)\n', (4026, 4036), True, 'import numpy as np\n'), ((4121, 4141), 'numpy.savez', 'np.savez', (['fpath'], {'a': 'X'}), '(fpath, a=X)\n', (4129, 4141), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 20 10:01:52 2021
@author: leyuan
"""
import numpy as np
from functions import softmax, cross_entropy_error
class MulLayer(object):
def __init__(self):
self.x = None
self.y = None
def forward(self, x, y):
self.x = x
self.y = y
out = x * y
return out
def backward(self, dout):
dx = dout * self.y # 翻转x和y
dy = dout * self.x
return dx, dy
class AddLayer:
def __init__(self):
pass
def forward(self, x, y):
out = x + y
return out
def backward(self, dout):
dx = dout * 1
dy = dout * 1
return dx, dy
class Relu(object):
def __init__(self):
self.mask = None
def forward(self, x):
self.mask = x < 0
out = x.copy()
out[self.mask] = 0
return out
def backward(self, dout):
dout[self.mask] = 0
dx = dout
return dx
class Sigmoid(object):
def __init__(self):
self.out = None
def forward(self, x):
out = 1 / (1 + np.exp(-x))
self.out = out
return out
def backward(self, dout):
dx = dout * self.out * (1.0 - self.out)
return dx
class Affine:
def __init__(self, W, b):
self.W = W
self.b = b
self.x = None
self.dW = None
self.db = None
def forward(self, x):
self.x = x
out = np.dot(x, self.W) + self.b
return out
def backward(self, dout):
dx = np.dot(dout, self.W.T)
self.dW = np.dot(self.x.T, dout)
self.db = np.sum(dout, axis=0)
return dx
class SoftmaxWithLoss:
def __init__(self):
self.loss = None
self.y = None # softmax输出
self.t = None # 标签
def forward(self, x, t):
self.t = t
self.y = softmax(x)
self.loss = cross_entropy_error(self.y, self.t)
return self.loss
def backward(self, dout=1):
batch_size = self.t.shape[0]
if self.t.size == self.y.size: # one-hot coding
dx = (self.y - self.t) / batch_size
else: # non one-hot coding
dx = self.y.copy()
dx[np.arange(batch_size), self.t] -= 1
dx = dx / batch_size
return dx
class BatchNormalization:
"""
http://arxiv.org/abs/1502.03167
"""
def __init__(self, gamma, beta, momentum=0.9, running_mean=None, running_var=None):
self.gamma = gamma
self.beta = beta
self.momentum = momentum
self.input_shape = None # Conv層の場合は4次元、全結合層の場合は2次元
# テスト時に使用する平均と分散
self.running_mean = running_mean
self.running_var = running_var
# backward時に使用する中間データ
self.batch_size = None
self.xc = None
self.std = None
self.dgamma = None
self.dbeta = None
def forward(self, x, train_flg=True):
self.input_shape = x.shape
if x.ndim != 2:
N, C, H, W = x.shape
x = x.reshape(N, -1)
out = self.__forward(x, train_flg)
return out.reshape(*self.input_shape)
def __forward(self, x, train_flg):
if self.running_mean is None:
N, D = x.shape
self.running_mean = np.zeros(D)
self.running_var = np.zeros(D)
if train_flg:
mu = x.mean(axis=0)
xc = x - mu
var = np.mean(xc**2, axis=0)
std = np.sqrt(var + 10e-7)
xn = xc / std
self.batch_size = x.shape[0]
self.xc = xc
self.xn = xn
self.std = std
self.running_mean = self.momentum * self.running_mean + (1-self.momentum) * mu
self.running_var = self.momentum * self.running_var + (1-self.momentum) * var
else:
xc = x - self.running_mean
xn = xc / ((np.sqrt(self.running_var + 10e-7)))
out = self.gamma * xn + self.beta
return out
def backward(self, dout):
if dout.ndim != 2:
N, C, H, W = dout.shape
dout = dout.reshape(N, -1)
dx = self.__backward(dout)
dx = dx.reshape(*self.input_shape)
return dx
def __backward(self, dout):
dbeta = dout.sum(axis=0)
dgamma = np.sum(self.xn * dout, axis=0)
dxn = self.gamma * dout
dxc = dxn / self.std
dstd = -np.sum((dxn * self.xc) / (self.std * self.std), axis=0)
dvar = 0.5 * dstd / self.std
dxc += (2.0 / self.batch_size) * self.xc * dvar
dmu = np.sum(dxc, axis=0)
dx = dxc - dmu / self.batch_size
self.dgamma = dgamma
self.dbeta = dbeta
return dx
class Dropout:
"""
http://arxiv.org/abs/1207.0580
"""
def __init__(self, dropout_ratio=0.5):
self.dropout_ratio = dropout_ratio
self.mask = None
def forward(self, x, train_flg=True):
if train_flg:
self.mask = np.random.rand(*x.shape) > self.dropout_ratio
return x * self.mask
else:
return x * (1.0 - self.dropout_ratio)
def backward(self, dout):
return dout * self.mask | [
"numpy.sum",
"numpy.zeros",
"functions.cross_entropy_error",
"numpy.mean",
"numpy.arange",
"numpy.exp",
"numpy.random.rand",
"numpy.dot",
"numpy.sqrt",
"functions.softmax"
] | [((1644, 1666), 'numpy.dot', 'np.dot', (['dout', 'self.W.T'], {}), '(dout, self.W.T)\n', (1650, 1666), True, 'import numpy as np\n'), ((1685, 1707), 'numpy.dot', 'np.dot', (['self.x.T', 'dout'], {}), '(self.x.T, dout)\n', (1691, 1707), True, 'import numpy as np\n'), ((1726, 1746), 'numpy.sum', 'np.sum', (['dout'], {'axis': '(0)'}), '(dout, axis=0)\n', (1732, 1746), True, 'import numpy as np\n'), ((1975, 1985), 'functions.softmax', 'softmax', (['x'], {}), '(x)\n', (1982, 1985), False, 'from functions import softmax, cross_entropy_error\n'), ((2006, 2041), 'functions.cross_entropy_error', 'cross_entropy_error', (['self.y', 'self.t'], {}), '(self.y, self.t)\n', (2025, 2041), False, 'from functions import softmax, cross_entropy_error\n'), ((4536, 4566), 'numpy.sum', 'np.sum', (['(self.xn * dout)'], {'axis': '(0)'}), '(self.xn * dout, axis=0)\n', (4542, 4566), True, 'import numpy as np\n'), ((4807, 4826), 'numpy.sum', 'np.sum', (['dxc'], {'axis': '(0)'}), '(dxc, axis=0)\n', (4813, 4826), True, 'import numpy as np\n'), ((1553, 1570), 'numpy.dot', 'np.dot', (['x', 'self.W'], {}), '(x, self.W)\n', (1559, 1570), True, 'import numpy as np\n'), ((3446, 3457), 'numpy.zeros', 'np.zeros', (['D'], {}), '(D)\n', (3454, 3457), True, 'import numpy as np\n'), ((3489, 3500), 'numpy.zeros', 'np.zeros', (['D'], {}), '(D)\n', (3497, 3500), True, 'import numpy as np\n'), ((3622, 3646), 'numpy.mean', 'np.mean', (['(xc ** 2)'], {'axis': '(0)'}), '(xc ** 2, axis=0)\n', (3629, 3646), True, 'import numpy as np\n'), ((3663, 3683), 'numpy.sqrt', 'np.sqrt', (['(var + 1e-06)'], {}), '(var + 1e-06)\n', (3670, 3683), True, 'import numpy as np\n'), ((4644, 4697), 'numpy.sum', 'np.sum', (['(dxn * self.xc / (self.std * self.std))'], {'axis': '(0)'}), '(dxn * self.xc / (self.std * self.std), axis=0)\n', (4650, 4697), True, 'import numpy as np\n'), ((1169, 1179), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (1175, 1179), True, 'import numpy as np\n'), ((4111, 4144), 'numpy.sqrt', 'np.sqrt', (['(self.running_var + 1e-06)'], {}), '(self.running_var + 1e-06)\n', (4118, 4144), True, 'import numpy as np\n'), ((5227, 5251), 'numpy.random.rand', 'np.random.rand', (['*x.shape'], {}), '(*x.shape)\n', (5241, 5251), True, 'import numpy as np\n'), ((2334, 2355), 'numpy.arange', 'np.arange', (['batch_size'], {}), '(batch_size)\n', (2343, 2355), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import transformers
from typing import Mapping, Sequence, Text, Union
# converts texts into input matrices required by transformers
def from_tokenizer(tokenizer: transformers.PreTrainedTokenizer,
texts: Sequence[Text],
pad_token: int = 0) -> Mapping[Text, np.ndarray]:
rows = [tokenizer.encode(text,
add_special_tokens=True,
max_length=tokenizer.model_max_length,
truncation=True)
for text in texts]
shape = (len(rows), max(len(row) for row in rows))
token_ids = np.full(shape=shape, fill_value=pad_token)
is_token = np.zeros(shape=shape)
for i, row in enumerate(rows):
token_ids[i, :len(row)] = row
is_token[i, :len(row)] = 1
return dict(
word_inputs=token_ids,
mask_inputs=is_token,
segment_inputs=np.zeros(shape=shape))
def read_csv(
data_path: Text,
label_col: Text,
n_rows: Union[int, None] = None) -> pd.DataFrame:
df = pd.read_csv(data_path, nrows=n_rows)
cols = {c.lower().replace(" ", ""): c for c in df.columns}
[y_col] = [cols[c] for c in cols if label_col in c]
[x_col] = [cols[c] for c in cols if "text" in c or "tweet" in c]
df = df[[x_col, y_col]].dropna()
if pd.api.types.is_string_dtype(df[y_col]):
df[y_col] = pd.to_numeric(df[y_col].replace({"o": "0"}))
return df.rename(columns={x_col: "text", y_col: label_col})
def df_to_xy(
df: pd.DataFrame,
tokenizer: transformers.PreTrainedTokenizer,
label_col: Text) -> (np.ndarray, np.ndarray):
x = from_tokenizer(tokenizer, df["text"])
y = df[label_col].values
return x, y
def read_csvs_to_xy(
data_paths: Sequence[Text],
tokenizer: transformers.PreTrainedTokenizer,
label_col: Text,
n_rows: Union[int, None] = None) -> (np.ndarray, np.ndarray):
dfs = [read_csv(p, label_col=label_col, n_rows=n_rows) for p in data_paths]
df = pd.concat(dfs)
return df_to_xy(df, tokenizer, label_col=label_col)
| [
"numpy.full",
"pandas.read_csv",
"numpy.zeros",
"pandas.api.types.is_string_dtype",
"pandas.concat"
] | [((653, 695), 'numpy.full', 'np.full', ([], {'shape': 'shape', 'fill_value': 'pad_token'}), '(shape=shape, fill_value=pad_token)\n', (660, 695), True, 'import numpy as np\n'), ((711, 732), 'numpy.zeros', 'np.zeros', ([], {'shape': 'shape'}), '(shape=shape)\n', (719, 732), True, 'import numpy as np\n'), ((1098, 1134), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {'nrows': 'n_rows'}), '(data_path, nrows=n_rows)\n', (1109, 1134), True, 'import pandas as pd\n'), ((1367, 1406), 'pandas.api.types.is_string_dtype', 'pd.api.types.is_string_dtype', (['df[y_col]'], {}), '(df[y_col])\n', (1395, 1406), True, 'import pandas as pd\n'), ((2073, 2087), 'pandas.concat', 'pd.concat', (['dfs'], {}), '(dfs)\n', (2082, 2087), True, 'import pandas as pd\n'), ((942, 963), 'numpy.zeros', 'np.zeros', ([], {'shape': 'shape'}), '(shape=shape)\n', (950, 963), True, 'import numpy as np\n')] |
"""Centroidal Voronoi Diagrams
This script contains functions for numerically computing centroidal Voronoi diagrams.
The method used is that of [Secord 02]. The sites are assumed to be given in integer
coordinates that are on a grid of a given width and height with (0, 0) in the
bottom left hand corner of the grid. The grid size is fixed, and thus no extra scaling
occurs if sites shrink to zero size, which is a possibility based on the input. Should
this occur, you will get an error, most likely in the computation of dir_j in the
voronoi_polygons funtion. Should this occur, your best bet is to increase the resolution
of your grid.
The main function for computing the CVT is weightedCVT, which takes three parameters,
the initial point sites, the density grid, and the number of iterations of Lloyd's
algorithm to run (default is 50).
Here is an example of running the result using a grayscale image (cauchy.jpg) as the input.
from koebe.algorithms.cvt import weightedCVT
import numpy as np
from PIL import Image
import random
# Open the cauchy.jpg grayscale image. If it weren't grayscale, we'd need to convert it.
im = Image.open("data/cauchy.jpg")
w, h = im.size # Get the current image size:
# Scale the image up 5 times (at 200 x 277, my cauchy.jpg image leads to CVT cells
# hitting size 0 and thus having an error)
im = im.resize((5*w, 5*h), Image.ANTIALIAS)
w, h = im.size
# Convert the image pixels to a numpy array.
I = np.array(im)
# Convert to a float array and take the transpose so that
# the first index is x-coordinate and the second index is y-coordinate
rho = np.array(im).astype(float).T
# Convert all the pixel values to densities where white pixels are density 1
# and black pixels are density 1 / 256
for i in range(w):
for j in range(h):
rho[i][j] = (256 - rho[i][j]) / 256
# Generate 1000 random sites
sites = np.array([
[int(random.random() * w), int(random.random() * h)]
for _ in range(1000)
])
# Compute the weighted CVT of the sites
cvt_sites = weightedCVT(sites, rho)
# Plot the sites:
import matplotlib.pyplot as plt
plt.xlim(-10, w+10)
plt.ylim(-10, h+10)
# Note that the plot will be upside down since graphics positive y-direction
# points down, not up.
plt.plot(*cvt_sites.T, 'r.')
plt.show()
References:
* [Secord 02] Secord, A. "Weighted Voronoi Stippling." In Proceedings of the 2nd international
symposium on Non-photorealistic animation and rendering, pp. 37-43. ACM, 2002.
* The Voronoi cell computation is a quick-and-dirty one written by a StackOverflow user. See
the docstring of voronoi_polygons for the link.
"""
from collections import defaultdict
from scipy.spatial import Voronoi
from shapely.geometry import Polygon
import numpy as np
def weightedCVT(pts, rho, num_iterations = 50):
"""Computes a weighted centroidal voronoi diagram of a set of points with a given
density function.
Args:
pts: A set of initial voronoi point sites.
rho: A numpy array indexed by [x][y] for the density at point (x, y). Note
that rho[x][y] can be 0 but should not be negative. If a region has all
zero density it's centroid is calculated as the average of its neighbors.
num_iterations: OPTIONAL Change the number of iterations of Lloyd's algorithm.
Default is 50.
Returns:
The final location of the sites as an N x 2 matrix where N is the number of input
points.
"""
w, h = rho.shape
diameter = max(w, h) * 1.414214
# Compute the helper matrices used for fast computation of the
# CVT integrals (See [Secord 02])
P = np.cumsum(rho, axis = 0)
Q = np.cumsum(P, axis = 0)
boundary_polygon = Polygon(np.array([[0,0],[w,0],[w,h],[0, h]]))
current_sites = pts
# Lloyd's algorithm
for _ in range(num_iterations):
polys = list(voronoi_polygons(Voronoi(current_sites), diameter))
polygons = [Polygon(p).intersection(boundary_polygon) for p in polys]
centroids = []
for p in polygons:
c = wcvt_centroid(intCoords(list(p.exterior.coords)[:-1]), P, Q)
centroids.append(c)
current_sites = np.array(centroids)
return current_sites
def worldToImgPixelCoords(world_x,
world_y,
img_x,
img_y,
img_w,
img_h,
img_pixels_w,
img_pixels_h,
truncate = True):
"""Converts a point in world coordinates to image pixel coordinates for an image with top left corner
placed at (img_x, img_y) and dimensions img_w x img_h.
Args:
world_x: the x-coordinate of the point
world_y: the y-coordinate of the point
img_x: the x-coordinate of the top left hand corner of the image
img_y: the y-coordinate of the top left hand corner of the image
img_w: the width in world coordinates of the image
img_h: the height in world coordinates of the image
img_pixels_w: the number of pixels along the width of the image
img_pixels_h: the number of pixels along the height of the image
truncate: (Optional) if True does not return pixel values outside the image for world coordinates
outside the image, but instead projects onto the image boundary
Returns:
(x, y) in pixel coordinates where the left
"""
x = (world_x - img_x) * img_pixels_w / img_w
y = (img_y - world_y) * img_pixels_w / img_w
if truncate:
x = min(max(0, x), img_pixels_w)
y = min(max(0, y), img_pixels_h)
return (int(x), int(y))
def imgPixelToWorldCoords(pixel_x, pixel_y, img_x, img_y, img_w, img_h, img_pixels_w, img_pixels_h):
"""Converts a pixel coordinate to world coordinates.
Args:
pixel_x: the x-coordinate in pixels in the image
pixel_y: the y-coordinate in pixels in the image (larger is lower on screen)
img_x: the x-coordinate of the top left hand corner of the image
img_y: the y-coordinate of the top left hand corner of the image
img_w: the width in world coordinates of the image
img_h: the height in world coordinates of the image
img_pixels_w: the number of pixels along the width of the image
img_pixels_h: the number of pixels along the height of the image
Returns:
(x, y) in world coordinates for the bottom left hand corner of the pixel
"""
x = pixel_x * img_w / img_pixels_w + img_x
y = img_y - pixel_y * img_h / img_pixels_h
return (x, y)
def rasterizeSegment(start_x, start_y, end_x, end_y):
"""Implementation of Bresenham's line rasterization routine.
This is a slightly modified version of the Python implementation
one Rosetta code: https://rosettacode.org/wiki/Bitmap/Bresenham%27s_line_algorithm#Python
Args:
start_x: the x-coordinate of the start point of the segment
start_y: the y-coordinate of the start point of the segment
end_x: the x-coordinate of the end point of the segment
end_y: the y-coordinate of the end point of the segment
Returns:
A list [(x, y)] of the image pixel coordinates along the line
"""
result = []
dx = abs(end_x - start_x)
dy = abs(end_y - start_y)
x, y = start_x, start_y
sx = -1 if start_x > end_x else 1
sy = -1 if start_y > end_y else 1
if dx > dy:
err = dx / 2.0
while x != end_x:
result.append((x, y))
err -= dy
if err < 0:
y += sy
err += dx
x += sx
else:
err = dy / 2.0
while y != end_y:
result.append((x, y))
err -= dx
if err < 0:
x += sx
err += dy
y += sy
result.append((x, y))
return result
def rasterizePolygon(pts):
"""Takes a polygon as a list of coordinates and rasterizes onto an integer grid.
Args:
pts: List[(int, int)]. A list of 2D integer coordinates of the vertices of the
polygon. Note that the polygon is assumed closed, so the segment between the
last point in pts and first point in pts is also added.
Returns:
The pixel coordinates of the boundary of the polygon.
"""
result = []
n = len(pts)
for i in range(n):
result += rasterizeSegment(*pts[i], *pts[(i+1)%n])
return result
def scanPoints(pts):
"""Returns the x-coordinate extremes across each y coordinate of a convex polygon.
This is used in evaluating the bounds of the integral for the weighted CVT
computation.
Args:
pts: List[(int, int)]. A list of 2D integer coordinates of the vertices of the
polygon. Note that the polygon is assumed closed, so the segment between the
last point in pts and first point in pts is also added. Assumed to be in
convex position.
Returns:
A list containing the two points of intersection (x1, y), (x2, y) with x1 < x2
for each integer y horizontal crossed by the polygon. The list is returned in
lexigraphical order sorted by y-coordinate first and x-coordinate second.
"""
spts = sorted(set(rasterizePolygon(pts)), key=lambda t: (t[1], t[0]))
result = []
n = len(spts)
for i in range(n):
if ((spts[i-1][1] != spts[i][1] and spts[i][1] == spts[(i+1)%n][1])
or (spts[i-1][1] == spts[i][1] and spts[i][1] != spts[(i+1)%n][1])):
result.append(spts[i])
return result
def trunc(x, y, w, h):
"""Truncates x and y coordinates to live in the (0, 0) to (w, h)
Args:
x: the x-coordinate of a point
y: the y-coordinate of a point
w: the width of the truncation box
h: the height of the truncation box.
"""
return min(max(x, 0), w - 1), min(max(y, 0), h - 1)
def wcvt_denominator(spts, P):
"""The denominator of the centroidal voronoi diagram centroid computation.
Args:
spts: The scan points (see scanPoints()) for the region.
P: The pre-computed partial integral from [Secord 02]
Returns:
The denominator of the integral.
"""
result_sum = 0
w, h = P.shape
for i in range(0, len(spts), 2):
x1, y = trunc(*spts[i], w, h)
x2, yp = trunc(*spts[i+1], w, h)
if y != yp:
print("ERROR in wcvt_denominator")
result_sum += (P[x2][y] - P[x1][y])
return result_sum
def wcvt_ynumerator(spts, P):
"""The numerator for the y-coordinate of the centroidal voronoi diagram
centroid computation.
Args:
spts: The scan points (see scanPoints()) for the region.
P: The pre-computed partial integral from [Secord 02] (Note it is simply the
cumulative sum of rho across the x axis.)
Returns:
The y-coordinate numerator of the integral.
"""
result_sum = 0
w, h = P.shape
for i in range(0, len(spts), 2):
x1, y = trunc(*spts[i], w, h)
x2, yp = trunc(*spts[i+1], w, h)
if y != yp:
print("ERROR in wcvt_ynumerator")
result_sum += (y * (P[x2][y] - P[x1][y]))
return result_sum
def wcvt_xnumerator(spts, P, Q):
"""The numerator for the x-coordinate of the centroidal voronoi diagram
centroid computation.
Args:
spts: The scan points (see scanPoints()) for the region.
P: The pre-computed partial integral from [Secord 02] (Note it is simply the
cumulative sum of rho across the x axis.)
Q: The second pre-computed partial integral from [Secord 02] (Note it is simply the
cumulative sum of P across the x axis.)
Returns:
The x-coordinate numerator of the integral.
"""
result_sum = 0
w, h = P.shape
for i in range(0, len(spts), 2):
x1, y = trunc(*spts[i], w, h)
x2, yp = trunc(*spts[i+1], w, h)
if y != yp:
print("ERROR in wcvt_xnumerator")
result_sum += ((x2 * P[x2][y] - Q[x2][y]) - (x1 * P[x1][y] - Q[x1][y]))
return result_sum
def avg_point(pts):
"""The average of a list of points.
Args:
pts: List[(number, number)] A list of points.
Returns:
The average point.
"""
sumx, sumy = 0, 0
invlen = 1 / len(pts)
for x, y in pts:
sumx += x
sumy += y
return (sumx * invlen, sumy * invlen)
def wcvt_centroid(pts, P, Q):
"""Computes the Voronoi centroid of the Voronoi region with corners given by the
polygon pts (without repetition of the last vertex).
Args:
pts: List[(int, int)] of points given in integer coordinates with no repeated end vertex.
P: The pre-computed partial integral from [Secord 02] (Note it is simply the
cumulative sum of rho across the x axis.)
Q: The second pre-computed partial integral from [Secord 02] (Note it is simply the
cumulative sum of P across the x axis.)
Returns:
The weighted centroid of the Voronoi region.
"""
spts = scanPoints(pts)
denom = wcvt_denominator(spts, P)
if denom == 0:
return avg_point(pts)
else:
inv_denom = 1 / denom
return (wcvt_xnumerator(spts, P, Q) * inv_denom,
wcvt_ynumerator(spts, P) * inv_denom)
def voronoi_polygons(voronoi, diameter):
"""Generate shapely.geometry.Polygon objects corresponding to the
regions of a scipy.spatial.Voronoi object, in the order of the
input points. The polygons for the infinite regions are large
enough that all points within a distance 'diameter' of a Voronoi
vertex are contained in one of the infinite polygons.
Author: From <NAME>'s solution at
https://stackoverflow.com/questions/23901943/voronoi-compute-exact-boundaries-of-every-region
"""
centroid = voronoi.points.mean(axis=0)
# Mapping from (input point index, Voronoi point index) to list of
# unit vectors in the directions of the infinite ridges starting
# at the Voronoi point and neighbouring the input point.
ridge_direction = defaultdict(list)
for (p, q), rv in zip(voronoi.ridge_points, voronoi.ridge_vertices):
u, v = sorted(rv)
if u == -1:
# Infinite ridge starting at ridge point with index v,
# equidistant from input points with indexes p and q.
t = voronoi.points[q] - voronoi.points[p] # tangent
n = np.array([-t[1], t[0]]) / np.linalg.norm(t) # normal
midpoint = voronoi.points[[p, q]].mean(axis=0)
direction = np.sign(np.dot(midpoint - centroid, n)) * n
ridge_direction[p, v].append(direction)
ridge_direction[q, v].append(direction)
for i, r in enumerate(voronoi.point_region):
region = voronoi.regions[r]
if -1 not in region:
# Finite region.
yield voronoi.vertices[region]
continue
# Infinite region.
inf = region.index(-1) # Index of vertex at infinity.
j = region[(inf - 1) % len(region)] # Index of previous vertex.
k = region[(inf + 1) % len(region)] # Index of next vertex.
if j == k:
# Region has one Voronoi vertex with two ridges.
dir_j, dir_k = ridge_direction[i, j]
else:
# Region has two Voronoi vertices, each with one ridge.
dir_j, = ridge_direction[i, j]
dir_k, = ridge_direction[i, k]
# Length of ridges needed for the extra edge to lie at least
# 'diameter' away from all Voronoi vertices.
length = 2 * diameter / np.linalg.norm(dir_j + dir_k)
# Polygon consists of finite part plus an extra edge.
finite_part = voronoi.vertices[region[inf + 1:] + region[:inf]]
extra_edge = [voronoi.vertices[j] + dir_j * length,
voronoi.vertices[k] + dir_k * length]
yield np.concatenate((finite_part, extra_edge))
def intCoords(coords):
"""Convenience for converting a list of coordinates into integer coordinates.
Args:
coords: List[(float, float)]
Returns:
List[(int, int)]
"""
return [(int(round(x)), int(round(y))) for x, y in coords] | [
"shapely.geometry.Polygon",
"scipy.spatial.Voronoi",
"collections.defaultdict",
"numpy.cumsum",
"numpy.array",
"numpy.linalg.norm",
"numpy.dot",
"numpy.concatenate"
] | [((3818, 3840), 'numpy.cumsum', 'np.cumsum', (['rho'], {'axis': '(0)'}), '(rho, axis=0)\n', (3827, 3840), True, 'import numpy as np\n'), ((3851, 3871), 'numpy.cumsum', 'np.cumsum', (['P'], {'axis': '(0)'}), '(P, axis=0)\n', (3860, 3871), True, 'import numpy as np\n'), ((14526, 14543), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (14537, 14543), False, 'from collections import defaultdict\n'), ((3910, 3952), 'numpy.array', 'np.array', (['[[0, 0], [w, 0], [w, h], [0, h]]'], {}), '([[0, 0], [w, 0], [w, h], [0, h]])\n', (3918, 3952), True, 'import numpy as np\n'), ((4373, 4392), 'numpy.array', 'np.array', (['centroids'], {}), '(centroids)\n', (4381, 4392), True, 'import numpy as np\n'), ((16062, 16091), 'numpy.linalg.norm', 'np.linalg.norm', (['(dir_j + dir_k)'], {}), '(dir_j + dir_k)\n', (16076, 16091), True, 'import numpy as np\n'), ((16361, 16402), 'numpy.concatenate', 'np.concatenate', (['(finite_part, extra_edge)'], {}), '((finite_part, extra_edge))\n', (16375, 16402), True, 'import numpy as np\n'), ((4077, 4099), 'scipy.spatial.Voronoi', 'Voronoi', (['current_sites'], {}), '(current_sites)\n', (4084, 4099), False, 'from scipy.spatial import Voronoi\n'), ((14876, 14899), 'numpy.array', 'np.array', (['[-t[1], t[0]]'], {}), '([-t[1], t[0]])\n', (14884, 14899), True, 'import numpy as np\n'), ((14902, 14919), 'numpy.linalg.norm', 'np.linalg.norm', (['t'], {}), '(t)\n', (14916, 14919), True, 'import numpy as np\n'), ((4132, 4142), 'shapely.geometry.Polygon', 'Polygon', (['p'], {}), '(p)\n', (4139, 4142), False, 'from shapely.geometry import Polygon\n'), ((15020, 15050), 'numpy.dot', 'np.dot', (['(midpoint - centroid)', 'n'], {}), '(midpoint - centroid, n)\n', (15026, 15050), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#da https://www.tensorflow.org/tutorials/structured_data/time_series
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
data = df
#data["y"] = data["glucose"]
import pandas as pd
import numpy as np
from datetime import timedelta, datetime
import matplotlib as mpl
import matplotlib.pyplot as plt
import tensorflow as tf
def resample(data, freq):
"""
:param data: dataframe
:param freq: sampling frequency
:return: resampled data between the the first day at 00:00:00 and the last day at 23:60-freq:00 at freq sample frequency
"""
start = data.datetime.iloc[0].strftime('%Y-%m-%d') + " 00:00:00"
end = datetime.strptime(data.datetime.iloc[-1].strftime('%Y-%m-%d'), "%Y-%m-%d") + timedelta(days=1) - timedelta(
minutes=freq)
index = pd.period_range(start=start,
end=end,
freq=str(freq) + 'min').to_timestamp()
data = data.resample(str(freq) + 'min', on="datetime").agg({'glucose': np.mean, 'CHO': np.sum, "insulin": np.sum})
data = data.reindex(index=index)
data = data.reset_index()
data = data.rename(columns={"index": "datetime"})
return data
data = resample(data, 5)
#fill na's
data["glucose"].interpolate(method = "polynomial", order = 3, inplace = True)#impostare limit se no vengono dei valori di glucosio negativi
for col in data.columns:
if "insulin" in col or "CHO" in col:
data[col] = data[col].fillna(0)
#per tf
data = data.drop(["datetime"], axis=1)
data = data.dropna()
column_indices = {name: i for i, name in enumerate(data.columns)}
n = len(data)
train_df = data[0:int(n*0.7)]
val_df = data[int(n*0.7):int(n*0.9)]
test_df = data[int(n*0.9):]
num_features = data.shape[1]
#genera soltanto le dimensioni della finestra credo
class WindowGenerator():
def __init__(self, input_width, label_width, shift,
train_df=train_df, val_df=val_df, test_df=test_df,
label_columns=None):
# Store the raw data.
self.train_df = train_df
self.val_df = val_df
self.test_df = test_df
# Work out the label column indices.
self.label_columns = label_columns
if label_columns is not None:
self.label_columns_indices = {name: i for i, name in
enumerate(label_columns)}
self.column_indices = {name: i for i, name in
enumerate(train_df.columns)}
# Work out the window parameters.
self.input_width = input_width
self.label_width = label_width
self.shift = shift
self.total_window_size = input_width + shift
self.input_slice = slice(0, input_width)
self.input_indices = np.arange(self.total_window_size)[self.input_slice]
self.label_start = self.total_window_size - self.label_width
self.labels_slice = slice(self.label_start, None)
self.label_indices = np.arange(self.total_window_size)[self.labels_slice]
def __repr__(self):
return '\n'.join([
f'Total window size: {self.total_window_size}',
f'Input indices: {self.input_indices}',
f'Label indices: {self.label_indices}',
f'Label column name(s): {self.label_columns}'])
#fa lo split tra input e labels ma le dimensioni devono essere giuste, non gli posso passasre il dataset grezzo
def split_window(self, features):
inputs = features[:, self.input_slice, :]
labels = features[:, self.labels_slice, :]
if self.label_columns is not None:
labels = tf.stack(
[labels[:, :, self.column_indices[name]] for name in self.label_columns],
axis=-1)
# Slicing doesn't preserve static shape information, so set the shapes
# manually. This way the `tf.data.Datasets` are easier to inspect.
inputs.set_shape([None, self.input_width, None])
labels.set_shape([None, self.label_width, None])
return inputs, labels
WindowGenerator.split_window = split_window
def plot(self, model=None, plot_col='glucose', max_subplots=3):
inputs, labels = self.example
plt.figure(figsize=(12, 8))
plot_col_index = self.column_indices[plot_col]
max_n = min(max_subplots, len(inputs))
for n in range(max_n):
plt.subplot(3, 1, n+1)
plt.ylabel(f'{plot_col} [normed]')
plt.plot(self.input_indices, inputs[n, :, plot_col_index],
label='Inputs', marker='.', zorder=-10)
if self.label_columns:
label_col_index = self.label_columns_indices.get(plot_col, None)
else:
label_col_index = plot_col_index
if label_col_index is None:
continue
plt.scatter(self.label_indices, labels[n, :, label_col_index],
edgecolors='k', label='Labels', c='#2ca02c', s=64)
if model is not None:
predictions = model(inputs)
plt.scatter(self.label_indices, predictions[n, :, label_col_index],
marker='X', edgecolors='k', label='Predictions',
c='#ff7f0e', s=64)
if n == 0:
plt.legend()
plt.xlabel('Time [h]')
WindowGenerator.plot = plot
def make_dataset(self, data):
data = np.array(data, dtype=np.float32)
ds = tf.keras.preprocessing.timeseries_dataset_from_array(
data=data,
targets=None,
sequence_length=self.total_window_size,
sequence_stride=1,
shuffle=True,
batch_size=32,)
ds = ds.map(self.split_window)
return ds
WindowGenerator.make_dataset = make_dataset
#funzione che NON prende il dataset e lo divide nelle finestre vere e proprie,
#per far ciò devo iterare sul dataset
#d = []
#for example_inputs, example_labels in w1.train.take(1):
# d.extend([example_inputs, example_labels])
#d[0]
#d[1].numpy()
#
@property
def train(self):
return self.make_dataset(self.train_df)
@property
def val(self):
return self.make_dataset(self.val_df)
@property
def test(self):
return self.make_dataset(self.test_df)
@property
def example(self):
"""Get and cache an example batch of `inputs, labels` for plotting."""
result = getattr(self, '_example', None)
if result is None:
# No example batch was found, so get one from the `.train` dataset
result = next(iter(self.train))
# And cache it for next time
self._example = result
return result
WindowGenerator.train = train
WindowGenerator.val = val
WindowGenerator.test = test
WindowGenerator.example = example
# Each element is an (inputs, label) pair
#single step models
single_step_window = WindowGenerator(
input_width=1, label_width=1, shift=1,
label_columns=['glucose'])
single_step_window
single_step_window.train_df
single_step_window.make_dataset(train_df)
example_window = tf.stack([np.array(train_df[:single_step_window.total_window_size]),
np.array(train_df[100:100+single_step_window.total_window_size]),
np.array(train_df[200:200+single_step_window.total_window_size])])
inputs, labels = single_step_window.split_window(example_window)
for i in inputs:
print(i)
for i in labels:
print(i)
for inputs,labels in single_step_window.train.take(1):
print(f'Inputs shape (batch, time, features): {inputs.shape}')
print(f'Labels shape (batch, time, features): {labels.shape}')
for i in single_step_window:
print(i)
#single step model BASELINE
class Baseline(tf.keras.Model):
def __init__(self, label_index=None):
super().__init__()
self.label_index = label_index
def call(self, inputs):
if self.label_index is None:
return inputs
result = inputs[:, :, self.label_index]
return result[:, :, tf.newaxis]
baseline = Baseline(label_index=column_indices['glucose'])
baseline.compile(loss=tf.losses.MeanSquaredError(),
metrics=[tf.metrics.MeanAbsoluteError()])
val_performance = {}
performance = {}
val_performance['Baseline'] = baseline.evaluate(single_step_window.val)
performance['Baseline'] = baseline.evaluate(single_step_window.test, verbose=0)
wide_window = WindowGenerator(
input_width=24, label_width=24, shift=1,
label_columns=['glucose'])
wide_window
wide_window.plot(baseline)
#single step model - LINEAR
linear = tf.keras.Sequential([
tf.keras.layers.Dense(units=1)
])
MAX_EPOCHS = 20
def compile_and_fit(model, window, patience=2):
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
patience=patience,
mode='min')
model.compile(loss=tf.losses.MeanSquaredError(),
optimizer=tf.optimizers.Adam(),
metrics=[tf.metrics.MeanAbsoluteError()])
history = model.fit(window.train, epochs=MAX_EPOCHS,
validation_data=window.val,
callbacks=[early_stopping])
return history
history = compile_and_fit(linear, single_step_window)
val_performance['Linear'] = linear.evaluate(single_step_window.val)
performance['Linear'] = linear.evaluate(single_step_window.test, verbose=0)
wide_window.plot(linear)
# single step model - dense
dense = tf.keras.Sequential([
tf.keras.layers.Dense(units=64, activation='relu'),
tf.keras.layers.Dense(units=64, activation='relu'),
tf.keras.layers.Dense(units=1)
])
history = compile_and_fit(dense, single_step_window)
val_performance['Dense'] = dense.evaluate(single_step_window.val)
performance['Dense'] = dense.evaluate(single_step_window.test, verbose=0)
wide_window.plot(dense)
#multi step models - dense
#invece di usare uno step nel passato per predirre uno step nel futuro, utilizzo 24 step nel passato per predirne uno nel futuro
CONV_WIDTH = 24
conv_window = WindowGenerator(
input_width=CONV_WIDTH,
label_width=1,
shift=1,
label_columns=['glucose'])
conv_window
multi_step_dense = tf.keras.Sequential([
# Shape: (time, features) => (time*features)
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=32, activation='relu'),
tf.keras.layers.Dense(units=32, activation='relu'),
tf.keras.layers.Dense(units=1),
# Add back the time dimension.
# Shape: (outputs) => (1, outputs)
tf.keras.layers.Reshape([1, -1]),
])
history = compile_and_fit(multi_step_dense, conv_window)
val_performance['Multi step dense'] = multi_step_dense.evaluate(conv_window.val)
performance['Multi step dense'] = multi_step_dense.evaluate(conv_window.test, verbose=0)
conv_window.plot(multi_step_dense)
#multi step model - convolutional nn
conv_model = tf.keras.Sequential([
tf.keras.layers.Conv1D(filters=32,
kernel_size=(CONV_WIDTH,),
activation='relu'),
tf.keras.layers.Dense(units=32, activation='relu'),
tf.keras.layers.Dense(units=1),
])
history = compile_and_fit(conv_model, conv_window)
val_performance['Conv'] = conv_model.evaluate(conv_window.val)
performance['Conv'] = conv_model.evaluate(conv_window.test, verbose=0)
conv_window.plot(conv_model)
#
LABEL_WIDTH = 24
INPUT_WIDTH = LABEL_WIDTH + (CONV_WIDTH - 1)
wide_conv_window = WindowGenerator(
input_width=INPUT_WIDTH,
label_width=LABEL_WIDTH,
shift=1,
label_columns=['glucose'])
wide_conv_window
history = compile_and_fit(conv_model, wide_conv_window)
val_performance['Conv'] = conv_model.evaluate(wide_conv_window.val)
performance['Conv'] = conv_model.evaluate(wide_conv_window.test, verbose=0)
wide_conv_window.plot(conv_model)
#lstm
lstm_model = tf.keras.models.Sequential([
# Shape [batch, time, features] => [batch, time, lstm_units]
tf.keras.layers.LSTM(32, return_sequences=True),
# Shape => [batch, time, features]
tf.keras.layers.Dense(units=1)
])
history = compile_and_fit(lstm_model, wide_window)
val_performance['LSTM'] = lstm_model.evaluate(wide_window.val)
performance['LSTM'] = lstm_model.evaluate(wide_window.test, verbose=0)
wide_window.plot(lstm_model)
#residual connections
class ResidualWrapper(tf.keras.Model):
def __init__(self, model):
super().__init__()
self.model = model
def call(self, inputs, *args, **kwargs):
delta = self.model(inputs, *args, **kwargs)
# The prediction for each timestep is the input
# from the previous time step plus the delta
# calculated by the model.
return inputs + delta
residual_lstm = ResidualWrapper(
tf.keras.Sequential([
tf.keras.layers.LSTM(32, return_sequences=True),
tf.keras.layers.Dense(
num_features,
# The predicted deltas should start small
# So initialize the output layer with zeros
kernel_initializer=tf.initializers.zeros)
]))
history = compile_and_fit(residual_lstm, wide_window)
val_performance['Residual LSTM'] = residual_lstm.evaluate(wide_window.val)
performance['Residual LSTM'] = residual_lstm.evaluate(wide_window.test, verbose=0)
print()
| [
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Dense",
"matplotlib.pyplot.figure",
"numpy.arange",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.preprocessing.timeseries_dataset_from_array",
"tensorflow.keras.layers.Flatten",
"tensorflow.stack",
"datetime.timedelta",
"... | [((4020, 4047), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (4030, 4047), True, 'import matplotlib.pyplot as plt\n'), ((4952, 4974), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [h]"""'], {}), "('Time [h]')\n", (4962, 4974), True, 'import matplotlib.pyplot as plt\n'), ((5045, 5077), 'numpy.array', 'np.array', (['data'], {'dtype': 'np.float32'}), '(data, dtype=np.float32)\n', (5053, 5077), True, 'import numpy as np\n'), ((5085, 5259), 'tensorflow.keras.preprocessing.timeseries_dataset_from_array', 'tf.keras.preprocessing.timeseries_dataset_from_array', ([], {'data': 'data', 'targets': 'None', 'sequence_length': 'self.total_window_size', 'sequence_stride': '(1)', 'shuffle': '(True)', 'batch_size': '(32)'}), '(data=data, targets=\n None, sequence_length=self.total_window_size, sequence_stride=1,\n shuffle=True, batch_size=32)\n', (5137, 5259), True, 'import tensorflow as tf\n'), ((8226, 8313), 'tensorflow.keras.callbacks.EarlyStopping', 'tf.keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': 'patience', 'mode': '"""min"""'}), "(monitor='val_loss', patience=patience,\n mode='min')\n", (8258, 8313), True, 'import tensorflow as tf\n'), ((772, 795), 'datetime.timedelta', 'timedelta', ([], {'minutes': 'freq'}), '(minutes=freq)\n', (781, 795), False, 'from datetime import timedelta, datetime\n'), ((3496, 3592), 'tensorflow.stack', 'tf.stack', (['[labels[:, :, self.column_indices[name]] for name in self.label_columns]'], {'axis': '(-1)'}), '([labels[:, :, self.column_indices[name]] for name in self.\n label_columns], axis=-1)\n', (3504, 3592), True, 'import tensorflow as tf\n'), ((4167, 4191), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(n + 1)'], {}), '(3, 1, n + 1)\n', (4178, 4191), True, 'import matplotlib.pyplot as plt\n'), ((4194, 4228), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['f"""{plot_col} [normed]"""'], {}), "(f'{plot_col} [normed]')\n", (4204, 4228), True, 'import matplotlib.pyplot as plt\n'), ((4233, 4335), 'matplotlib.pyplot.plot', 'plt.plot', (['self.input_indices', 'inputs[n, :, plot_col_index]'], {'label': '"""Inputs"""', 'marker': '"""."""', 'zorder': '(-10)'}), "(self.input_indices, inputs[n, :, plot_col_index], label='Inputs',\n marker='.', zorder=-10)\n", (4241, 4335), True, 'import matplotlib.pyplot as plt\n'), ((4546, 4664), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.label_indices', 'labels[n, :, label_col_index]'], {'edgecolors': '"""k"""', 'label': '"""Labels"""', 'c': '"""#2ca02c"""', 's': '(64)'}), "(self.label_indices, labels[n, :, label_col_index], edgecolors=\n 'k', label='Labels', c='#2ca02c', s=64)\n", (4557, 4664), True, 'import matplotlib.pyplot as plt\n'), ((6603, 6660), 'numpy.array', 'np.array', (['train_df[:single_step_window.total_window_size]'], {}), '(train_df[:single_step_window.total_window_size])\n', (6611, 6660), True, 'import numpy as np\n'), ((6689, 6755), 'numpy.array', 'np.array', (['train_df[100:100 + single_step_window.total_window_size]'], {}), '(train_df[100:100 + single_step_window.total_window_size])\n', (6697, 6755), True, 'import numpy as np\n'), ((6782, 6848), 'numpy.array', 'np.array', (['train_df[200:200 + single_step_window.total_window_size]'], {}), '(train_df[200:200 + single_step_window.total_window_size])\n', (6790, 6848), True, 'import numpy as np\n'), ((7615, 7643), 'tensorflow.losses.MeanSquaredError', 'tf.losses.MeanSquaredError', ([], {}), '()\n', (7641, 7643), True, 'import tensorflow as tf\n'), ((8108, 8138), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(1)'}), '(units=1)\n', (8129, 8138), True, 'import tensorflow as tf\n'), ((9033, 9083), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(64)', 'activation': '"""relu"""'}), "(units=64, activation='relu')\n", (9054, 9083), True, 'import tensorflow as tf\n'), ((9089, 9139), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(64)', 'activation': '"""relu"""'}), "(units=64, activation='relu')\n", (9110, 9139), True, 'import tensorflow as tf\n'), ((9145, 9175), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(1)'}), '(units=1)\n', (9166, 9175), True, 'import tensorflow as tf\n'), ((9802, 9827), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (9825, 9827), True, 'import tensorflow as tf\n'), ((9833, 9883), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(32)', 'activation': '"""relu"""'}), "(units=32, activation='relu')\n", (9854, 9883), True, 'import tensorflow as tf\n'), ((9889, 9939), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(32)', 'activation': '"""relu"""'}), "(units=32, activation='relu')\n", (9910, 9939), True, 'import tensorflow as tf\n'), ((9945, 9975), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(1)'}), '(units=1)\n', (9966, 9975), True, 'import tensorflow as tf\n'), ((10055, 10087), 'tensorflow.keras.layers.Reshape', 'tf.keras.layers.Reshape', (['[1, -1]'], {}), '([1, -1])\n', (10078, 10087), True, 'import tensorflow as tf\n'), ((10434, 10519), 'tensorflow.keras.layers.Conv1D', 'tf.keras.layers.Conv1D', ([], {'filters': '(32)', 'kernel_size': '(CONV_WIDTH,)', 'activation': '"""relu"""'}), "(filters=32, kernel_size=(CONV_WIDTH,), activation='relu'\n )\n", (10456, 10519), True, 'import tensorflow as tf\n'), ((10574, 10624), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(32)', 'activation': '"""relu"""'}), "(units=32, activation='relu')\n", (10595, 10624), True, 'import tensorflow as tf\n'), ((10630, 10660), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(1)'}), '(units=1)\n', (10651, 10660), True, 'import tensorflow as tf\n'), ((11460, 11507), 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['(32)'], {'return_sequences': '(True)'}), '(32, return_sequences=True)\n', (11480, 11507), True, 'import tensorflow as tf\n'), ((11552, 11582), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(1)'}), '(units=1)\n', (11573, 11582), True, 'import tensorflow as tf\n'), ((752, 769), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (761, 769), False, 'from datetime import timedelta, datetime\n'), ((2705, 2738), 'numpy.arange', 'np.arange', (['self.total_window_size'], {}), '(self.total_window_size)\n', (2714, 2738), True, 'import numpy as np\n'), ((2902, 2935), 'numpy.arange', 'np.arange', (['self.total_window_size'], {}), '(self.total_window_size)\n', (2911, 2935), True, 'import numpy as np\n'), ((4742, 4882), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.label_indices', 'predictions[n, :, label_col_index]'], {'marker': '"""X"""', 'edgecolors': '"""k"""', 'label': '"""Predictions"""', 'c': '"""#ff7f0e"""', 's': '(64)'}), "(self.label_indices, predictions[n, :, label_col_index], marker=\n 'X', edgecolors='k', label='Predictions', c='#ff7f0e', s=64)\n", (4753, 4882), True, 'import matplotlib.pyplot as plt\n'), ((4936, 4948), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4946, 4948), True, 'import matplotlib.pyplot as plt\n'), ((7671, 7701), 'tensorflow.metrics.MeanAbsoluteError', 'tf.metrics.MeanAbsoluteError', ([], {}), '()\n', (7699, 7701), True, 'import tensorflow as tf\n'), ((8436, 8464), 'tensorflow.losses.MeanSquaredError', 'tf.losses.MeanSquaredError', ([], {}), '()\n', (8462, 8464), True, 'import tensorflow as tf\n'), ((8492, 8512), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', ([], {}), '()\n', (8510, 8512), True, 'import tensorflow as tf\n'), ((12255, 12302), 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['(32)'], {'return_sequences': '(True)'}), '(32, return_sequences=True)\n', (12275, 12302), True, 'import tensorflow as tf\n'), ((12308, 12385), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_features'], {'kernel_initializer': 'tf.initializers.zeros'}), '(num_features, kernel_initializer=tf.initializers.zeros)\n', (12329, 12385), True, 'import tensorflow as tf\n'), ((8539, 8569), 'tensorflow.metrics.MeanAbsoluteError', 'tf.metrics.MeanAbsoluteError', ([], {}), '()\n', (8567, 8569), True, 'import tensorflow as tf\n')] |
import ctypes
from ctypes import c_ulonglong, cast
from _ctypes import POINTER, addressof, sizeof
try:
from mcculw import ul
from mcculw.enums import ScanOptions, FunctionType, Status, ChannelType, ULRange, \
InterfaceType, TriggerSource, TriggerSensitivity, TriggerEvent, InfoType, BoardInfo
from mcculw.ul import ULError
except:
print('mcculw APT did not load properly - if needed, ensure that DLL has been installed!')
import numpy as np
import time
import os
from _ctypes import POINTER, addressof, sizeof
from ctypes import c_double, cast
import time
# from builtins import * # @UnusedWildImport
import threading
from scipy import signal
# from PyDAQmx import *
board_num = 0
channel_intSphere = 0
channel_ref = 2
# ai_range = ULRange.BIP5VOLTS
max_rate = 50e3
class DAQ(object):
def __init__(self, channel_intSphere = 0, channel_ref = 2, rate = 50000, dwelltime = None, counts = 2500, extclock = False, countsPerTrigger = 35, countsPulseDuration = 15):
self.board_num = 0
# self.ai_range = ULRange.BIP5VOLTS
self.__rate = rate
self.__dwelltime = dwelltime
self.acquiringBG = False
self.useExtClock = extclock
self.countsPerTrigger = countsPerTrigger
self.countsPulseDuration = countsPulseDuration
self.useFilter = False
self.criticalFrequency = 100
# prioritize dwelltime argument when setting counts/rate. if none provided, use explicitly provided counts
if dwelltime is not None:
self.__countsPerChannel = round(self.__dwelltime * self.__rate) #counts per channel = rate (Hz) * dwelltime (s)
else:
self.__countsPerChannel = counts
self.__dwelltime = self.__countsPerChannel / self.__rate
self.channels = {
'Label': ['IntSphere', 'Reference'],
'Number': [channel_intSphere, channel_ref],
'Type': [ChannelType.ANALOG_DIFF, ChannelType.ANALOG_DIFF],
'Gain': [ULRange.BIP5VOLTS, ULRange.BIP5VOLTS]
}
# Try connecting to the DAQ
try:
self.connect()
# If error "mcculw.ul.ULError: Error 1026: Board number already in use", pass
except:
print("DAQ is already connected.")
@property
def dwelltime(self):
return self.__dwelltime
@dwelltime.setter
def dwelltime(self, x):
# sets daq counts to match desired measurement time (x, in seconds)
self.__dwelltime = x
self.__countsPerChannel = round(self.__dwelltime * self.__rate)
print('Dwelltime: {0} s\nCounts: {1}\nRate: {2} Hz'.format(self.__dwelltime, self.__countsPerChannel, self.__rate))
@property
def rate(self):
return self.__rate
@rate.setter
def rate(self, x):
# sets daq counting rate, adjusts countsPerChannel to preserve dwelltime
x = round(x) #only integer values allowed
if x > max_rate:
print('Desired rate ({0} Hz) is greater than max allowed rate ({1} Hz): setting rate to {1} Hz.'.format(x, max_rate))
x = max_rate
self.__rate = x
self.__countsPerChannel = round(self.__rate * self.__dwelltime)
print('Dwelltime: {0} s\nCounts: {1}\nRate: {2} Hz'.format(self.__dwelltime, self.__countsPerChannel, self.__rate))
@property
def counts(self):
return self.__countsPerChannel
@rate.setter
def counts(self, x):
# sets daq counting rate, adjusts countsPerChannel to preserve dwelltime
self.__countsPerChannel = round(x) #only integer values allowed
newrate = round(self.__countsPerChannel * self.__dwelltime)
if newrate > max_rate:
print('Desired rate ({0} Hz) is greater than max allowed rate ({1} Hz): setting rate to {1} Hz.'.format(x, max_rate))
newrate = max_rate
self.__dwelltime = self.__countsPerChannel * newrate
self.__rate = newrate
print('Dwelltime: {0} s\nCounts: {1}\nRate: {2} Hz'.format(self.__dwelltime, self.__countsPerChannel, self.__rate))
# connects the daq device
def connect(self):
#connects to first MCC DAQ device detected. Assuming we only have the USB-1808
devices = ul.get_daq_device_inventory(InterfaceType.ANY)
ul.create_daq_device(board_num, devices[0])
return True
# disconnects the daq device
def disconnect(self):
ul.release_daq_device(self.board_num)
return True
def read(self, processPulseTrain = False):
if self.useExtClock:
# scan_options = ScanOptions.FOREGROUND | ScanOptions.SCALEDATA | ScanOptions.EXTCLOCK
scan_options = ScanOptions.FOREGROUND | ScanOptions.SCALEDATA | ScanOptions.EXTTRIGGER # | ScanOptions.RETRIGMODE
else:
scan_options = ScanOptions.FOREGROUND | ScanOptions.SCALEDATA
ul.set_config(
info_type = InfoType.BOARDINFO,
board_num = self.board_num,
dev_num = 0, #value here is ignored
config_item = BoardInfo.ADTRIGCOUNT,
config_val = 0 #number of samples to take per trigger. 0 = continuous triggering
)
channelList = []
channelNumbers = []
low_chan = min(self.channels['Number'])
high_chan = max(self.channels['Number'])
for cnum in range(low_chan, high_chan+1):
if cnum in self.channels['Number']:
cidx = self.channels['Number'].index(cnum)
cname = self.channels['Label'][cidx]
else:
cname = 'Dummy'
channelList.append(cname)
num_chans = len(channelList)
totalCount = num_chans * self.__countsPerChannel
memhandle = ul.scaled_win_buf_alloc(totalCount)
ctypesArray = ctypes.cast(memhandle, ctypes.POINTER(ctypes.c_double))
ul.a_in_scan(
board_num = self.board_num,
low_chan = low_chan,
high_chan = high_chan,
num_points = totalCount,
rate = self.__rate,
ul_range = ULRange.BIP5VOLTS,
memhandle = memhandle,
options = scan_options
)
data = {}
for ch in channelList:
data[ch] = {
'Raw':[],
'Mean': None,
'Std': None
}
dataIndex = 0
for each in range(self.__countsPerChannel):
for ch in channelList:
data[ch]['Raw'].append(ctypesArray[dataIndex])
dataIndex += 1
data.pop('Dummy') #toss dummy data from middle channels
for ch in data.keys():
data[ch]['Mean'] = np.mean(data[ch]['Raw'])
data[ch]['Std'] = np.std(data[ch]['Raw'])
# data['Reference']['Mean'] = np.ones(data['Reference']['Mean'].shape) #set reference detector readings to 1
ul.win_buf_free(memhandle)
if self.useFilter:
data = self.filterSignal(data)
if processPulseTrain:
data = self.processPulseTrain(data)
return data
def processPulseTrain(self, readData):
data = {}
for ch in self.channels['Label']:
numPulses = int(len(readData[ch]['Raw']) / self.countsPerTrigger)
ill = np.zeros((numPulses,))
dark = np.zeros((numPulses,))
for i in range(numPulses):
startIdx = (i-1) *self.countsPerTrigger
endIdx = (i*self.countsPerTrigger) - 1
ill[i] = np.max(readData[ch]['Raw'][startIdx:endIdx]) #get the second measurement from this pulse (peak value)
dark[i] = np.mean(readData[ch]['Raw'][startIdx+self.countsPulseDuration:endIdx]) #15 is currently hardcoded for 50 cts per pulse, basically want to catch the portion of signal after pulse has completed
data[ch] = {
'Raw': readData[ch]['Raw'],
'AllIlluminated': ill,
'MeanIlluminated': ill.mean(),
'StdIlluminated': ill.std(),
'AllDark': dark,
'MeanDark': dark.mean(),
'StdDark': dark.std()
}
return data
def filterSignal(self, readData):
data = {}
for ch in self.channels['Label']:
if ch is 'IntSphere':
data[ch] = readData[ch]
else:
raw = readData[ch]['Raw']
b, a = signal.butter(3, self.criticalFrequency, fs = self.__rate)
filtered = signal.filtfilt(b, a, raw)[500:]
data[ch] = {
'Raw': raw,
'Filtered': filtered,
'Mean': filtered.mean(),
'Std': filtered.std()
}
return data
def startBG(self, filepath = "tempfile.dat"):
#starts background DAQ acquisition. Data is written to file designated by filepath
self.acquiringBG = True
self.filepathBG = filepath
self.threadBG = threading.Thread(target = self._readBG, args = (filepath,))
self.threadBG.start()
def stopBG(self, removefile = True):
#stops background DAQ acquisition, returns timestamps + data stored in file
self.acquiringBG = False
self.threadBG.join()
data = np.genfromtxt(self.filepathBG, delimiter = ',')
numpts = data.shape[0]
time = np.linspace(0,numpts,numpts+1)[1:] / self.__rate
if removefile:
os.remove(self.filepathBG)
return time, data
def _readBG(self, file_name):
# file_name = 'C:\\Users\\PVGroup\\Desktop\\frgmapper\\Data\\20190913\\test.data'
# totalCount = len(self.channels['Number']) * self.__countsPerChannel
# memhandle = ul.win_buf_alloc_64(totalCount)
# ctypesArray = ctypes.cast(memhandle, ctypes.POINTER(ctypes.c_ulonglong))
# The size of the UL buffer to create, in seconds
buffer_size_seconds = 2
# The number of buffers to write. After this number of UL buffers are
# written to file, the example will be stopped.
num_buffers_to_write = 2
low_chan = 0
high_chan = 1
num_chans = high_chan - low_chan + 1
# Create a circular buffer that can hold buffer_size_seconds worth of
# data, or at least 10 points (this may need to be adjusted to prevent
# a buffer overrun)
points_per_channel = max(self.__rate * buffer_size_seconds, 10)
# Some hardware requires that the total_count is an integer multiple
# of the packet size. For this case, calculate a points_per_channel
# that is equal to or just above the points_per_channel selected
# which matches that requirement.
# if ai_props.continuous_requires_packet_size_multiple:
# packet_size = ai_props.packet_size
# remainder = points_per_channel % packet_size
# if remainder != 0:
# points_per_channel += packet_size - remainder
ul_buffer_count = points_per_channel * num_chans
# Write the UL buffer to the file num_buffers_to_write times.
points_to_write = ul_buffer_count * num_buffers_to_write
# When handling the buffer, we will read 1/10 of the buffer at a time
write_chunk_size = int(ul_buffer_count / 100)
if self.useExtClock:
scan_options = ScanOptions.BACKGROUND | ScanOptions.CONTINUOUS | ScanOptions.SCALEDATA | ScanOptions.EXTCLOCK
else:
scan_options = ScanOptions.BACKGROUND | ScanOptions.CONTINUOUS | ScanOptions.SCALEDATA
memhandle = ul.scaled_win_buf_alloc(ul_buffer_count)
# Allocate an array of doubles temporary storage of the data
write_chunk_array = (c_double * write_chunk_size)()
# Check if the buffer was successfully allocated
if not memhandle:
print("Failed to allocate memory.")
return
try:
# Start the scan
ul.daq_in_scan(
board_num = self.board_num,
chan_list = self.channels['Number'],
chan_type_list = self.channels['Type'],
gain_list = self.channels['Gain'],
chan_count = len(self.channels['Number']),
rate = self.__rate,
pretrig_count = 0,
total_count = ul_buffer_count,
memhandle = memhandle,
options = scan_options
)
status = Status.IDLE
# Wait for the scan to start fully
while(status == Status.IDLE):
status, _, _ = ul.get_status(
board_num, FunctionType.DAQIFUNCTION)
# Create a file for storing the data
with open(file_name, 'w') as f:
# print('Writing data to ' + file_name, end='')
# Write a header to the file
# for chan_num in range(low_chan, high_chan + 1):
# f.write('Channel ' + str(chan_num) + ',')
# f.write(u'\n')
# Start the write loop
prev_count = 0
prev_index = 0
write_ch_num = low_chan
keepReading = True
while status != Status.IDLE and keepReading:
# Get the latest counts
status, curr_count, _ = ul.get_status(
board_num, FunctionType.DAQIFUNCTION)
new_data_count = curr_count - prev_count
# Check for a buffer overrun before copying the data, so
# that no attempts are made to copy more than a full buffer
# of data
if new_data_count > ul_buffer_count:
# Print an error and stop writing
ul.stop_background(board_num, FunctionType.DAQIFUNCTION)
print("A buffer overrun occurred")
break
# Check if a chunk is available
if new_data_count > write_chunk_size:
wrote_chunk = True
# Copy the current data to a new array
# Check if the data wraps around the end of the UL
# buffer. Multiple copy operations will be required.
if prev_index + write_chunk_size > ul_buffer_count - 1:
first_chunk_size = ul_buffer_count - prev_index
second_chunk_size = (
write_chunk_size - first_chunk_size)
# Copy the first chunk of data to the
# write_chunk_array
ul.scaled_win_buf_to_array(
memhandle, write_chunk_array, prev_index,
first_chunk_size)
# Create a pointer to the location in
# write_chunk_array where we want to copy the
# remaining data
second_chunk_pointer = cast(
addressof(write_chunk_array) + first_chunk_size
* sizeof(c_double), POINTER(c_double))
# Copy the second chunk of data to the
# write_chunk_array
ul.scaled_win_buf_to_array(
memhandle, second_chunk_pointer,
0, second_chunk_size)
else:
# Copy the data to the write_chunk_array
ul.scaled_win_buf_to_array(
memhandle, write_chunk_array, prev_index,
write_chunk_size)
# Check for a buffer overrun just after copying the data
# from the UL buffer. This will ensure that the data was
# not overwritten in the UL buffer before the copy was
# completed. This should be done before writing to the
# file, so that corrupt data does not end up in it.
status, curr_count, _ = ul.get_status(
board_num, FunctionType.DAQIFUNCTION)
if curr_count - prev_count > ul_buffer_count:
# Print an error and stop writing
ul.stop_background(board_num, FunctionType.DAQIFUNCTION)
print("A buffer overrun occurred")
break
for i in range(write_chunk_size):
f.write(str(write_chunk_array[i]))
write_ch_num += 1
if write_ch_num == high_chan + 1:
write_ch_num = low_chan
f.write(u'\n')
else:
f.write(',')
else:
wrote_chunk = False
if wrote_chunk:
# Increment prev_count by the chunk size
prev_count += write_chunk_size
# Increment prev_index by the chunk size
prev_index += write_chunk_size
# Wrap prev_index to the size of the UL buffer
prev_index %= ul_buffer_count
if not self.acquiringBG: #make sure to wait until after writing to check if we should stop to avoid truncation
keepReading = False
# if prev_count >= points_to_write:
# break
# f.write('-----\n')
# print('.', end='')
else:
# Wait a short amount of time for more data to be
# acquired.
time.sleep(0.01)
ul.stop_background(board_num, FunctionType.DAQIFUNCTION)
except ULError as e:
pass
finally:
# print('Done')
# Free the buffer in a finally block to prevent errors from causing
# a memory leak.
ul.win_buf_free(memhandle)
| [
"mcculw.ul.scaled_win_buf_alloc",
"os.remove",
"numpy.mean",
"mcculw.ul.a_in_scan",
"mcculw.ul.release_daq_device",
"numpy.std",
"numpy.genfromtxt",
"numpy.max",
"mcculw.ul.create_daq_device",
"mcculw.ul.scaled_win_buf_to_array",
"numpy.linspace",
"scipy.signal.butter",
"ctypes.POINTER",
"... | [((3822, 3868), 'mcculw.ul.get_daq_device_inventory', 'ul.get_daq_device_inventory', (['InterfaceType.ANY'], {}), '(InterfaceType.ANY)\n', (3849, 3868), False, 'from mcculw import ul\n'), ((3871, 3914), 'mcculw.ul.create_daq_device', 'ul.create_daq_device', (['board_num', 'devices[0]'], {}), '(board_num, devices[0])\n', (3891, 3914), False, 'from mcculw import ul\n'), ((3985, 4022), 'mcculw.ul.release_daq_device', 'ul.release_daq_device', (['self.board_num'], {}), '(self.board_num)\n', (4006, 4022), False, 'from mcculw import ul\n'), ((5100, 5135), 'mcculw.ul.scaled_win_buf_alloc', 'ul.scaled_win_buf_alloc', (['totalCount'], {}), '(totalCount)\n', (5123, 5135), False, 'from mcculw import ul\n'), ((5211, 5411), 'mcculw.ul.a_in_scan', 'ul.a_in_scan', ([], {'board_num': 'self.board_num', 'low_chan': 'low_chan', 'high_chan': 'high_chan', 'num_points': 'totalCount', 'rate': 'self.__rate', 'ul_range': 'ULRange.BIP5VOLTS', 'memhandle': 'memhandle', 'options': 'scan_options'}), '(board_num=self.board_num, low_chan=low_chan, high_chan=\n high_chan, num_points=totalCount, rate=self.__rate, ul_range=ULRange.\n BIP5VOLTS, memhandle=memhandle, options=scan_options)\n', (5223, 5411), False, 'from mcculw import ul\n'), ((6046, 6072), 'mcculw.ul.win_buf_free', 'ul.win_buf_free', (['memhandle'], {}), '(memhandle)\n', (6061, 6072), False, 'from mcculw import ul\n'), ((7757, 7812), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._readBG', 'args': '(filepath,)'}), '(target=self._readBG, args=(filepath,))\n', (7773, 7812), False, 'import threading\n'), ((8017, 8062), 'numpy.genfromtxt', 'np.genfromtxt', (['self.filepathBG'], {'delimiter': '""","""'}), "(self.filepathBG, delimiter=',')\n", (8030, 8062), True, 'import numpy as np\n'), ((10078, 10118), 'mcculw.ul.scaled_win_buf_alloc', 'ul.scaled_win_buf_alloc', (['ul_buffer_count'], {}), '(ul_buffer_count)\n', (10101, 10118), False, 'from mcculw import ul\n'), ((4391, 4524), 'mcculw.ul.set_config', 'ul.set_config', ([], {'info_type': 'InfoType.BOARDINFO', 'board_num': 'self.board_num', 'dev_num': '(0)', 'config_item': 'BoardInfo.ADTRIGCOUNT', 'config_val': '(0)'}), '(info_type=InfoType.BOARDINFO, board_num=self.board_num,\n dev_num=0, config_item=BoardInfo.ADTRIGCOUNT, config_val=0)\n', (4404, 4524), False, 'from mcculw import ul\n'), ((5175, 5206), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_double'], {}), '(ctypes.c_double)\n', (5189, 5206), False, 'import ctypes\n'), ((5862, 5886), 'numpy.mean', 'np.mean', (["data[ch]['Raw']"], {}), "(data[ch]['Raw'])\n", (5869, 5886), True, 'import numpy as np\n'), ((5908, 5931), 'numpy.std', 'np.std', (["data[ch]['Raw']"], {}), "(data[ch]['Raw'])\n", (5914, 5931), True, 'import numpy as np\n'), ((6376, 6398), 'numpy.zeros', 'np.zeros', (['(numPulses,)'], {}), '((numPulses,))\n', (6384, 6398), True, 'import numpy as np\n'), ((6409, 6431), 'numpy.zeros', 'np.zeros', (['(numPulses,)'], {}), '((numPulses,))\n', (6417, 6431), True, 'import numpy as np\n'), ((8168, 8194), 'os.remove', 'os.remove', (['self.filepathBG'], {}), '(self.filepathBG)\n', (8177, 8194), False, 'import os\n'), ((14666, 14722), 'mcculw.ul.stop_background', 'ul.stop_background', (['board_num', 'FunctionType.DAQIFUNCTION'], {}), '(board_num, FunctionType.DAQIFUNCTION)\n', (14684, 14722), False, 'from mcculw import ul\n'), ((14879, 14905), 'mcculw.ul.win_buf_free', 'ul.win_buf_free', (['memhandle'], {}), '(memhandle)\n', (14894, 14905), False, 'from mcculw import ul\n'), ((6564, 6608), 'numpy.max', 'np.max', (["readData[ch]['Raw'][startIdx:endIdx]"], {}), "(readData[ch]['Raw'][startIdx:endIdx])\n", (6570, 6608), True, 'import numpy as np\n'), ((6680, 6752), 'numpy.mean', 'np.mean', (["readData[ch]['Raw'][startIdx + self.countsPulseDuration:endIdx]"], {}), "(readData[ch]['Raw'][startIdx + self.countsPulseDuration:endIdx])\n", (6687, 6752), True, 'import numpy as np\n'), ((7305, 7361), 'scipy.signal.butter', 'signal.butter', (['(3)', 'self.criticalFrequency'], {'fs': 'self.__rate'}), '(3, self.criticalFrequency, fs=self.__rate)\n', (7318, 7361), False, 'from scipy import signal\n'), ((8099, 8133), 'numpy.linspace', 'np.linspace', (['(0)', 'numpts', '(numpts + 1)'], {}), '(0, numpts, numpts + 1)\n', (8110, 8133), True, 'import numpy as np\n'), ((10865, 10916), 'mcculw.ul.get_status', 'ul.get_status', (['board_num', 'FunctionType.DAQIFUNCTION'], {}), '(board_num, FunctionType.DAQIFUNCTION)\n', (10878, 10916), False, 'from mcculw import ul\n'), ((7379, 7405), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'raw'], {}), '(b, a, raw)\n', (7394, 7405), False, 'from scipy import signal\n'), ((11433, 11484), 'mcculw.ul.get_status', 'ul.get_status', (['board_num', 'FunctionType.DAQIFUNCTION'], {}), '(board_num, FunctionType.DAQIFUNCTION)\n', (11446, 11484), False, 'from mcculw import ul\n'), ((11770, 11826), 'mcculw.ul.stop_background', 'ul.stop_background', (['board_num', 'FunctionType.DAQIFUNCTION'], {}), '(board_num, FunctionType.DAQIFUNCTION)\n', (11788, 11826), False, 'from mcculw import ul\n'), ((13473, 13524), 'mcculw.ul.get_status', 'ul.get_status', (['board_num', 'FunctionType.DAQIFUNCTION'], {}), '(board_num, FunctionType.DAQIFUNCTION)\n', (13486, 13524), False, 'from mcculw import ul\n'), ((14645, 14661), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (14655, 14661), False, 'import time\n'), ((12419, 12509), 'mcculw.ul.scaled_win_buf_to_array', 'ul.scaled_win_buf_to_array', (['memhandle', 'write_chunk_array', 'prev_index', 'first_chunk_size'], {}), '(memhandle, write_chunk_array, prev_index,\n first_chunk_size)\n', (12445, 12509), False, 'from mcculw import ul\n'), ((12866, 12951), 'mcculw.ul.scaled_win_buf_to_array', 'ul.scaled_win_buf_to_array', (['memhandle', 'second_chunk_pointer', '(0)', 'second_chunk_size'], {}), '(memhandle, second_chunk_pointer, 0,\n second_chunk_size)\n', (12892, 12951), False, 'from mcculw import ul\n'), ((13032, 13122), 'mcculw.ul.scaled_win_buf_to_array', 'ul.scaled_win_buf_to_array', (['memhandle', 'write_chunk_array', 'prev_index', 'write_chunk_size'], {}), '(memhandle, write_chunk_array, prev_index,\n write_chunk_size)\n', (13058, 13122), False, 'from mcculw import ul\n'), ((13633, 13689), 'mcculw.ul.stop_background', 'ul.stop_background', (['board_num', 'FunctionType.DAQIFUNCTION'], {}), '(board_num, FunctionType.DAQIFUNCTION)\n', (13651, 13689), False, 'from mcculw import ul\n'), ((12766, 12783), '_ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (12773, 12783), False, 'from _ctypes import POINTER, addressof, sizeof\n'), ((12690, 12718), '_ctypes.addressof', 'addressof', (['write_chunk_array'], {}), '(write_chunk_array)\n', (12699, 12718), False, 'from _ctypes import POINTER, addressof, sizeof\n'), ((12748, 12764), '_ctypes.sizeof', 'sizeof', (['c_double'], {}), '(c_double)\n', (12754, 12764), False, 'from _ctypes import POINTER, addressof, sizeof\n')] |
import numpy as np
def linear_interpolation(x_data, y_data, point):
if len(x_data) != len(y_data):
raise Exception("X and Y vectors must have equal number of elements.")
if x_data.size < 2:
raise Exception("X and Y vectors have to contain at least 2 elements.")
def _interpolate(x1, x2, y1, y2, point):
return ((y2 - y1) * point + x2 * y1 - x1 * y2) / (x2 - x1)
for index, x in np.ndenumerate(x_data[:-1]):
i = index[0]
if point >= x and point <= x_data[i + 1]:
x1 = x
x2 = x_data[i + 1]
y1 = y_data[i]
y2 = y_data[i + 1]
return _interpolate(x1, x2, y1, y2, point)
| [
"numpy.ndenumerate"
] | [((423, 450), 'numpy.ndenumerate', 'np.ndenumerate', (['x_data[:-1]'], {}), '(x_data[:-1])\n', (437, 450), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
: Project - Dialated CRF
: Validation for GANet
: Author - <NAME>
: Institute - University of Kansas
: Date - 4/26/2021
: Last Update - 7/10/2021
: License: Apache 2.0
"""
import numpy as np
import math
import torch
from pathlib import Path
from utils.configuration import CONFIG
'''
Options for save metrics to disk
"all": Write all metrics
"iou": Jaccard index per cls
"acc": correct pixels per cls
"dice": F1-score per cls
"precision": Precisoin per cls
"recall": Recall per cls
"roc": TPR and FPR for calculating ROC per cls
"mcc": Phi coefficient per cls
'''
# Metric for Offline/Online CPU mode
class Metrics:
def __init__(self, label, gt, one_hot=False):
assert gt.ndim == 2, "groundtruth must be grayscale image"
# one_hot label to unary
if one_hot:
assert label.ndim == 3, "label must be 3-dimensional for one-hot encoding"
label = np.argmax(label, axis = 2)
else:
assert label.ndim == 2, "label must be 2-dimensional"
self.H, self.W = CONFIG["SIZE"]
label_area, gt_area = np.where(label == CONFIG["NUM_CLS"] - 1), \
np.where(gt == CONFIG["NUM_CLS"] - 1)
self.label_area = set(zip(label_area[0], label_area[1]))
self.gt_area = set(zip(gt_area[0], gt_area[1]))
self.TP_FN = len(self.gt_area)
self.TP_FP = len(self.label_area)
self.TP = len(self.label_area.intersection(self.gt_area))
self.FP = self.TP_FP - self.TP
self.TN = self.H * self.W - self.TP_FN - self.TP_FP + self.TP
self.FN = self.TP_FN - self.TP
# Jaccard: TP/(FP+TP+FN)
def IOU(self) -> np.float32:
UN = self.TP_FN + self.TP_FP
if UN == 0: return 1.0
return np.float32(self.TP / (UN - self.TP + 1e-31))
# acc: TP+TN/(TP+FP+TN+FN)
def ACC(self) -> np.float32:
accuracy = (self.TP + self.TN) / (self.H * self.W)
return np.float32(accuracy)
# dice: Sørensen–Dice coefficient 1/(1/precision + 1/recall)
# precision, recall(aka TPR), FPR
def DICE(self) -> [np.float32]:
if self.TP == self.FN == self.FP == 0: return 1.0
precision = np.float32(self.TP / (self.TP + self.FP + 1e-31))
recall = np.float32(self.TP / (self.TP + self.FN + 1e-31))
dice = np.float32(2 * self.TP / (2 * self.TP + self.FP + self.FN + 1e-31))
return dice
# precision
def PRECISION(self) -> np.float32:
if self.TP == self.FN == self.FP == 0: return 1.0
return np.float32(self.TP / (self.TP + self.FP + 1e-31))
# recall
def RECALL(self) -> np.float32:
if self.TP == self.FN == self.FP == 0: return 1.0
return np.float32(self.TP / (self.TP + self.FN + 1e-31))
# TPR and FPR for ROC curve
def ROC(self) -> [np.float32]:
if self.TP == self.FN == 0 and self.FP == self.TN == 0:
return [1.0, 1.0]
if not (self.TP == self.FN == 0) and self.FP == self.TN == 0:
tpr = np.float32(self.TP / (self.TP + self.FN))
return [tpr, 1.0]
if not (self.FP == self.TN == 0) and self.TP == self.FN == 0:
fpr = np.float32(self.FP / (self.FP + self.TN))
return [1.0, fpr]
tpr = np.float32(self.TP / (self.TP + self.FN))
fpr = np.float32(self.FP / (self.FP + self.TN))
return [tpr, fpr]
# mcc: Matthews correlation coefficient (Phi coefficient)
def MCC(self) -> np.float32:
if self.TP == self.FN == self.FP == 0: return 1.0
N = self.TN + self.TP + self.FN + self.FP
S = (self.TP + self.FN) / N
P = (self.TP + self.FP) / N
if S == 0 or P == 0: return -1.0
if S == 1 or P == 1: return 0.0
return np.float32((self.TP / N - S * P) / math.sqrt(P * S * (1-S) * (1-P)))
# evalute and save results to disk
'''
options:
'all': save all evalutaion metrics to disk
otherwise: specify the metric to be saved, refer to line 19
'''
def save_to_disk(self, name: str, path: Path, option="all"):
path = path.joinpath("evaluation.txt")
if option == "all":
with open(path, "a+") as f:
iou, acc, dice = 100 * self.IOU(), 100 * self.ACC(), 100 * self.DICE()
precsion, recall = 100 * self.PRECISION(), 100 * self.RECALL()
tpr, fpr = self.ROC()
tpr *= 100
fpr *= 100
mcc = 100 * self.MCC()
f.write(f"{name} iou:{iou:.2f} acc:{acc:.2f} precision:{precsion:.2f} "
f"recall:{recall:.2f} dice:{dice:.2f} "
f"tpr:{tpr:.2f} fpr:{fpr:.2f} mcc:{mcc:.2f}\n")
return
# write iou only
if option == "iou":
with open(path, "a+") as f:
iou = 100 * self.IOU()
f.write(f"{name:s} iou:{iou:.2f}\n")
return
# write acc only
if option == "acc":
with open(path, "a+") as f:
acc = 100 * self.self.ACC()
f.write(f"{name:s} acc:{acc:.2f}\n")
return
# write dice only
if option == "dice":
with open(path, "a+") as f:
dice = 100 * self.DICE()
f.write(f"{name:s} dice:{dice:.2f}\n")
return
# write precision only
if option == "precision":
with open(path, "a+") as f:
precision = 100 * self.PRECISION()
f.write(f"{name:s} precision:{precision:.2f}\n")
return
# write recall only
if option == "recall":
with open(path, "a+") as f:
recall = 100 * self.RECALL()
f.write(f"{name:s} precision:{recall:.2f}\n")
return
# write roc only
if option == "roc":
with open(path, "a+") as f:
tpr, fpr = 100 * self.ROC()
f.write(f"{name:s} tpr:{tpr:.2f} fpr:{fpr:.2f}\n")
return
# write mcc only
if option == "mcc":
with open(path, "a+") as f:
mcc = 100 * self.MCC()
f.write(f"{name:s} mcc:{mcc:.2f}\n")
return
# generate evaluations on-the-fly
'''
Return a dict of metrics for further processing, options:
"all" or []: all metrics
[metrics]: selected metrics by names, refer to line 19('roc' -> 'tpr' and 'fpr')
'''
def values(self, options="all"):
varDict = {"iou":None, "acc": None, "dice":None, "precision": None,"recall":None,
"tpr": None, "fpr":None, "mcc": None}
if options == "all" or options == []:
options = ["iou", "acc", "dice", "precision", "recall", "tpr", "fpr", "mcc"]
for metric in options:
if metric == "iou": varDict[metric] = self.IOU()
if metric == "acc": varDict[metric] = self.ACC()
if metric == "dice": varDict[metric] = self.DICE()
if metric == "precision": varDict[metric] = self.PRECISION()
if metric == "recall": varDict[metric] = self.RECALL()
if metric == "tpr": varDict[metric] = self.ROC()[0]
if metric == "fpr": varDict[metric] = self.ROC()[1]
if metric == "mcc": varDict[metric] = self.MCC()
return varDict | [
"numpy.float32",
"numpy.where",
"math.sqrt",
"numpy.argmax"
] | [((1968, 2012), 'numpy.float32', 'np.float32', (['(self.TP / (UN - self.TP + 1e-31))'], {}), '(self.TP / (UN - self.TP + 1e-31))\n', (1978, 2012), True, 'import numpy as np\n'), ((2157, 2177), 'numpy.float32', 'np.float32', (['accuracy'], {}), '(accuracy)\n', (2167, 2177), True, 'import numpy as np\n'), ((2402, 2451), 'numpy.float32', 'np.float32', (['(self.TP / (self.TP + self.FP + 1e-31))'], {}), '(self.TP / (self.TP + self.FP + 1e-31))\n', (2412, 2451), True, 'import numpy as np\n'), ((2470, 2519), 'numpy.float32', 'np.float32', (['(self.TP / (self.TP + self.FN + 1e-31))'], {}), '(self.TP / (self.TP + self.FN + 1e-31))\n', (2480, 2519), True, 'import numpy as np\n'), ((2536, 2603), 'numpy.float32', 'np.float32', (['(2 * self.TP / (2 * self.TP + self.FP + self.FN + 1e-31))'], {}), '(2 * self.TP / (2 * self.TP + self.FP + self.FN + 1e-31))\n', (2546, 2603), True, 'import numpy as np\n'), ((2759, 2808), 'numpy.float32', 'np.float32', (['(self.TP / (self.TP + self.FP + 1e-31))'], {}), '(self.TP / (self.TP + self.FP + 1e-31))\n', (2769, 2808), True, 'import numpy as np\n'), ((2937, 2986), 'numpy.float32', 'np.float32', (['(self.TP / (self.TP + self.FN + 1e-31))'], {}), '(self.TP / (self.TP + self.FN + 1e-31))\n', (2947, 2986), True, 'import numpy as np\n'), ((3501, 3542), 'numpy.float32', 'np.float32', (['(self.TP / (self.TP + self.FN))'], {}), '(self.TP / (self.TP + self.FN))\n', (3511, 3542), True, 'import numpy as np\n'), ((3558, 3599), 'numpy.float32', 'np.float32', (['(self.FP / (self.FP + self.TN))'], {}), '(self.FP / (self.FP + self.TN))\n', (3568, 3599), True, 'import numpy as np\n'), ((1096, 1120), 'numpy.argmax', 'np.argmax', (['label'], {'axis': '(2)'}), '(label, axis=2)\n', (1105, 1120), True, 'import numpy as np\n'), ((1279, 1319), 'numpy.where', 'np.where', (["(label == CONFIG['NUM_CLS'] - 1)"], {}), "(label == CONFIG['NUM_CLS'] - 1)\n", (1287, 1319), True, 'import numpy as np\n'), ((1354, 1391), 'numpy.where', 'np.where', (["(gt == CONFIG['NUM_CLS'] - 1)"], {}), "(gt == CONFIG['NUM_CLS'] - 1)\n", (1362, 1391), True, 'import numpy as np\n'), ((3246, 3287), 'numpy.float32', 'np.float32', (['(self.TP / (self.TP + self.FN))'], {}), '(self.TP / (self.TP + self.FN))\n', (3256, 3287), True, 'import numpy as np\n'), ((3411, 3452), 'numpy.float32', 'np.float32', (['(self.FP / (self.FP + self.TN))'], {}), '(self.FP / (self.FP + self.TN))\n', (3421, 3452), True, 'import numpy as np\n'), ((4044, 4080), 'math.sqrt', 'math.sqrt', (['(P * S * (1 - S) * (1 - P))'], {}), '(P * S * (1 - S) * (1 - P))\n', (4053, 4080), False, 'import math\n')] |
import os
import numpy as np
from pyspark.sql import SparkSession
from spark_function import fc, split_array, divide_func, multiply_func, l2norm, concat
def convolve(item_features_df, item_neighbors_df, conv_fc1_w, conv_fc1_b, conv_fc2_w, conv_fc2_b, convolve_hidden_size):
""" sparksql realize PinSage Convolve dnn layer
:param item_features_df:
:param item_neighbors_df:
:param conv_fc1_w:
:param conv_fc1_b:
:param conv_fc2_w:
:param conv_fc2_b:
:param convolve_hidden_size:
:return:
"""
# neighbor feature do dnn fully connect layer forward
neighbor_features_df = item_features_df.withColumnRenamed('item_id', 'neighbor')
neighbor_features_df = fc(neighbor_features_df, 'feature', conv_fc1_w, conv_fc1_b, 'feature')
neighbor_features_df = split_array(neighbor_features_df, col='feature', size=convolve_hidden_size)
neighbor_features_df = neighbor_features_df.drop('feature')
# neighbor message update
join_df = item_neighbors_df.join(neighbor_features_df, on='neighbor')
for i in range(convolve_hidden_size):
join_df = join_df.withColumn('feature{}'.format(str(i)), multiply_func('feature{}'.format(str(i)), 'weight'))
join_df = join_df.groupBy('item_id').sum()
for i in range(convolve_hidden_size):
join_df = join_df.withColumn('feature{}'.format(str(i)), divide_func('sum(feature{})'.format(i), 'sum(weight)'))
cols = ['item_id'] + ['feature{}'.format(i) for i in range(convolve_hidden_size)]
join_df = join_df.select(cols)
# concat neighbor feature and local feature,do dnn fully connect layer forward
item_concat_df = join_df.join(item_features_df, on='item_id')
for i in range(convolve_hidden_size):
item_concat_df = item_concat_df.withColumn('feature', concat('feature', 'feature{}'.format(str(i))))
item_concat_df = fc(item_concat_df, 'feature', conv_fc2_w, conv_fc2_b, 'feature')
item_concat_df = item_concat_df.select('item_id', 'feature')
item_concat_df = item_concat_df.withColumn('feature', l2norm('feature'))
return item_concat_df
if __name__ == '__main__':
os.environ["PYSPARK_PYTHON"] = '/home/kuer/anaconda3/envs/tf2.2/bin/python' # set spark executor python path
spark = SparkSession.builder.config("spark.executor.memory", "4g") \
.config('spark.driver.memory', '4g') \
.config('spark.executor.instances', '4') \
.config('spark.sql.shuffle.partitions', '10') \
.getOrCreate() # config spark, create SparkSql context
item_neighbors_df = spark.read.json('../data/item-neighbors.json')
item_features_df = spark.read.json('../data/item-features.json')
item_neighbors_df.printSchema()
item_features_df.printSchema()
embedding_size = len(item_features_df.head()['feature'])
convolve_hidden_size, convolve_output_size = 32, 16
conv1_fc1_w = np.random.normal(size=(embedding_size, convolve_hidden_size))
conv1_fc1_b = np.random.normal(size=(convolve_hidden_size,))
conv1_fc2_w = np.random.normal(size=(embedding_size + convolve_hidden_size, convolve_output_size))
conv1_fc2_b = np.random.normal(size=(convolve_output_size,))
conv2_fc1_w = np.random.normal(size=(convolve_output_size, convolve_hidden_size))
conv2_fc1_b = np.random.normal(size=(convolve_hidden_size,))
conv2_fc2_w = np.random.normal(size=(convolve_output_size + convolve_hidden_size, convolve_output_size))
conv2_fc2_b = np.random.normal(size=(convolve_output_size,))
item_conv1_df = convolve(item_features_df, item_neighbors_df, conv1_fc1_w, conv1_fc1_b, conv1_fc2_w, conv1_fc2_b,
convolve_hidden_size)
item_conv2_df = convolve(item_conv1_df, item_neighbors_df, conv2_fc1_w, conv2_fc1_b, conv2_fc2_w, conv2_fc2_b,
convolve_hidden_size)
| [
"spark_function.split_array",
"spark_function.fc",
"pyspark.sql.SparkSession.builder.config",
"numpy.random.normal",
"spark_function.l2norm"
] | [((704, 774), 'spark_function.fc', 'fc', (['neighbor_features_df', '"""feature"""', 'conv_fc1_w', 'conv_fc1_b', '"""feature"""'], {}), "(neighbor_features_df, 'feature', conv_fc1_w, conv_fc1_b, 'feature')\n", (706, 774), False, 'from spark_function import fc, split_array, divide_func, multiply_func, l2norm, concat\n'), ((802, 877), 'spark_function.split_array', 'split_array', (['neighbor_features_df'], {'col': '"""feature"""', 'size': 'convolve_hidden_size'}), "(neighbor_features_df, col='feature', size=convolve_hidden_size)\n", (813, 877), False, 'from spark_function import fc, split_array, divide_func, multiply_func, l2norm, concat\n'), ((1859, 1923), 'spark_function.fc', 'fc', (['item_concat_df', '"""feature"""', 'conv_fc2_w', 'conv_fc2_b', '"""feature"""'], {}), "(item_concat_df, 'feature', conv_fc2_w, conv_fc2_b, 'feature')\n", (1861, 1923), False, 'from spark_function import fc, split_array, divide_func, multiply_func, l2norm, concat\n'), ((2872, 2933), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(embedding_size, convolve_hidden_size)'}), '(size=(embedding_size, convolve_hidden_size))\n', (2888, 2933), True, 'import numpy as np\n'), ((2952, 2998), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(convolve_hidden_size,)'}), '(size=(convolve_hidden_size,))\n', (2968, 2998), True, 'import numpy as np\n'), ((3017, 3105), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(embedding_size + convolve_hidden_size, convolve_output_size)'}), '(size=(embedding_size + convolve_hidden_size,\n convolve_output_size))\n', (3033, 3105), True, 'import numpy as np\n'), ((3120, 3166), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(convolve_output_size,)'}), '(size=(convolve_output_size,))\n', (3136, 3166), True, 'import numpy as np\n'), ((3185, 3252), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(convolve_output_size, convolve_hidden_size)'}), '(size=(convolve_output_size, convolve_hidden_size))\n', (3201, 3252), True, 'import numpy as np\n'), ((3271, 3317), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(convolve_hidden_size,)'}), '(size=(convolve_hidden_size,))\n', (3287, 3317), True, 'import numpy as np\n'), ((3336, 3430), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(convolve_output_size + convolve_hidden_size, convolve_output_size)'}), '(size=(convolve_output_size + convolve_hidden_size,\n convolve_output_size))\n', (3352, 3430), True, 'import numpy as np\n'), ((3445, 3491), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(convolve_output_size,)'}), '(size=(convolve_output_size,))\n', (3461, 3491), True, 'import numpy as np\n'), ((2047, 2064), 'spark_function.l2norm', 'l2norm', (['"""feature"""'], {}), "('feature')\n", (2053, 2064), False, 'from spark_function import fc, split_array, divide_func, multiply_func, l2norm, concat\n'), ((2247, 2305), 'pyspark.sql.SparkSession.builder.config', 'SparkSession.builder.config', (['"""spark.executor.memory"""', '"""4g"""'], {}), "('spark.executor.memory', '4g')\n", (2274, 2305), False, 'from pyspark.sql import SparkSession\n')] |
import numpy as np
import pandas as pd
class Generator(object):
def __init__(self, params):
self.num_respondents = params['num_respondents']
self.data_size = (self.num_respondents, 0)
self._data = None
self.output_file = params['output_file']
def generate_random_data(self):
self._data = pd.DataFrame(
np.random.randint(self.scale_min, self.scale_max+1, size=self.data_size),
index=["r{}".format(r) for r in range(self.num_respondents)],
columns=["i{}".format(i) for i in range(self.num_items)]
)
return self._data
def generate_data(self):
n_iters = 0
while not isinstance(self._data, pd.DataFrame) or not self._is_valid():
self._generate_data()
n_iters += 1
print("Num iterations:", n_iters)
def write(self):
if not isinstance(self._data, pd.DataFrame):
print("No data to print")
return
self._data.to_csv(self.output_file)
| [
"numpy.random.randint"
] | [((364, 438), 'numpy.random.randint', 'np.random.randint', (['self.scale_min', '(self.scale_max + 1)'], {'size': 'self.data_size'}), '(self.scale_min, self.scale_max + 1, size=self.data_size)\n', (381, 438), True, 'import numpy as np\n')] |
from datetime import datetime
from datetime import timedelta
import re
import calendar
import numpy as np
import roxar_api_utils.ioutil
from .profiles import Profiles
from .keyword_check import is_rate
from .profiles_interpolation import profiles_interpolation
def read_ofm(
file_name,
date_format,
alias_file=None,
undef_value=None,
set_nonnegative=True,
time_shift=True,
read_type='A'):
"""Read OFM text file
Args:
file_name (str): OFM text file name
date_format (str): Data format specification, as in 'DD.MM.YYYY'
alias_file (str): Optional file name for well name alias list, None if not used
undef_value (float): Values to replace OFM undefined values, us one for OFM default.
set_nonnegative (Bool): Replace negative values with zero
time_shift (Bool): True if time shift to Constant Backwards
read_type (A=All, P=Production, I=Injection)
Returns:
roxar_api_utils Profiles object with data read
"""
allwells, alldata, dat_order, options = _read_vol_file(
file_name, date_format, undef_value, set_nonnegative, time_shift)
if alias_file is not None:
aliasinfo = roxar_api_utils.ioutil.NameAlias(alias_file)
else:
aliasinfo = None
options['time_shift'] = time_shift
options['read_type'] = read_type
return _set_profiles(allwells, alldata, dat_order, aliasinfo, options)
def _read_key(line, options, read_order, dat_order, volmul, volprev, line_no):
"""Read keywords in file
"""
# Not supported keys:
not_sup = ['*FILE', '*TABLENAME', '*YY/MM']
weff_keys = ['DAYS', 'OIDAY', 'GIDAY', 'WIDAY', 'UPTIME']
skip_keys = ['DAY', 'MONTH', 'YEAR', 'DATE', 'WELL']
for k in not_sup:
if line.find(k) >= 0:
errmes = 'Keyword not supported ' + k + ' in line ' + str(line_no)
raise ValueError(errmes)
if max(line.find('*DAY'), line.find('*DATE')) >= 0:
terms = line.split()
ival = -1
if options['undef'] is None:
undef = 0.
else:
undef = options['undef']
for t in terms:
t = t.replace('*', '')
if t == 'GIDAYS':
t = 'GIDAY'
elif t == 'OIDAYS':
t = 'OIDAY'
elif t == 'WIDAYS':
t = 'WIDAY'
elif t == 'WATR':
t = 'WATER'
elif t == 'WATE':
t = 'WATER'
read_order.append(t)
if t not in skip_keys:
ival += 1
dat_order[t] = ival
volprev.append(undef)
volmul.append(1.)
if t in weff_keys:
options['hasweff'] = True
if options['hasweff']:
ival += 1
dat_order['WEFF'] = ival
volprev.append(undef)
if options['units'] == 'f':
if options['mmscf']:
if 'GAS' in dat_order.keys():
volmul[dat_order['GAS']] = 1000.
if 'GINJ' in dat_order.keys():
volmul[dat_order['GINJ']] = 1000.
if options['mstb']:
if 'OIL' in dat_order.keys():
volmul[dat_order['OIL']] = 1000.
if 'OINJ' in dat_order.keys():
volmul[dat_order['OINJ']] = 1000.
if 'WATER' in dat_order.keys():
volmul[dat_order['WATER']] = 1000.
if 'WINJ' in dat_order.keys():
volmul[dat_order['WINJ']] = 1000.
else:
if options['gkilosm3']:
if 'GAS' in dat_order.keys():
volmul[dat_order['GAS']] = 1000.
if 'GINJ' in dat_order.keys():
volmul[dat_order['GINJ']] = 1000.
if options['lkilosm3']:
if 'OIL' in dat_order.keys():
volmul[dat_order['OIL']] = 1000.
if 'OINJ' in dat_order.keys():
volmul[dat_order['OINJ']] = 1000.
if 'WATER' in dat_order.keys():
volmul[dat_order['WATER']] = 1000.
if 'WINJ' in dat_order.keys():
volmul[dat_order['WINJ']] = 1000.
if line.find('*METRIC') >= 0:
options['units'] = 'm'
elif line.find('*FIELD') >= 0:
options['units'] = 'f'
if line.find('*DAILY') >= 0:
options['freq'] = 'd'
elif line.find('*MONTHLY') >= 0:
options['freq'] = 'm'
elif line.find('*YEARLY') >= 0:
options['freq'] = 'y'
if line.find('*MSM3') >= 0:
if line.find(' GAS') >= 0:
options['gkilosm3'] = True
if line.find(' LIQUID') >= 0:
options['lkilosm3'] = True
if line.find('*MSTB') >= 0:
options['mstb'] = True
if line.find('*MMSCF') >= 0:
options['mmscf'] = True
if line.find('*UCRATES') >= 0:
options['ucrates'] = True
if line.find('*UUCRATES') >= 0:
options['ucrates'] = False
if line.find('*UPTIME_FRACTIONS') >= 0:
options['wefrac'] = True
if line.find('*HRS_IN_DAYS') >= 0:
options['wehrs'] = True
if line.find('*MNS_IN_YEARS') >= 0:
options['wemonths'] = True
if line.find('*CUMULATIVE') >= 0:
options['cumu'] = True
if line.find('*ZERO_MISSING') >= 0:
options['undef'] = 0.0
if line.find('*IGNORE_MISSING') >= 0:
options['undef'] = None
return (options, read_order, dat_order, volmul, volprev)
def _check_separator(line, read_order):
"""Identify data separators, tabs or blanks
"""
temp = line.strip()
lread = len(read_order)
terms1 = re.split('\t', temp)
# terms2 = re.split(' ', temp)
if len(terms1) == lread:
use_tabs = True
print('Information: Reading tab-separated table.')
else:
use_tabs = False
print('Information: Reading white-space-separated table.')
return use_tabs
def _read_data(line, date_format, read_order, options, volmul, volprev, well_name, line_no):
"""Read data line
"""
line = re.sub(' +', ' ', line)
if options['tabsep']:
if line.find(' ') >= 0:
print('Warning: Blank space in tab separated table, in line', line_no)
line = line.replace(' ', '\t')
terms = re.split('\t', line)
else:
terms = line.split()
lread = len(read_order)
lterms = len(terms)
if lterms > lread:
errmes = (
'Warning: Superfluous data items ignored in line '
+ str(line_no)
+ '. Found '
+ str(lterms)
+ ', expected '
+ str(lread)
+ '.')
print(errmes)
elif lterms < lread:
errmes = (
'Incorrect number of data items in line '
+ str(line_no)
+ '. Found '
+ str(lterms)
+ ', expected '
+ str(lread)
+ '.')
raise ValueError(errmes)
day = 1
month = 1
year = 1900
vdate = datetime(year, month, day)
voldat = list(volprev)
ival = -1
has_date = False
for i in range(lread):
t = terms[i]
if t == '':
ival = ival + 1
else:
if read_order[i] == 'DAY':
day = int(t)
elif read_order[i] == 'MONTH':
month = int(t)
elif read_order[i] == 'YEAR':
year = int(t)
elif read_order[i] == 'DATE':
vdate = roxar_api_utils.ioutil.string_to_datetime(t, date_format)
has_date = True
elif read_order[i] == 'WELL':
well_name = t.strip()
else:
if not has_date:
vdate = datetime(year, month, day)
ival += 1
voldat[ival] = float(t)*volmul[ival]
# WEFF placeholder
if options['hasweff']:
ival += 1
voldat[ival] = 1.0
if options['nonneg']:
voldat = [max(val, 0.) for val in voldat]
return well_name, vdate, voldat
def _add_final_step(datelist, vollist, freq):
"""Add extra date at end of each well data sequence
"""
l = len(datelist) - 1
lastdate = datelist[l]
lastlist = vollist[l]
if freq == 'd':
d = lastdate + timedelta(days=1)
elif freq == 'm':
newday = lastdate.day
delta = 1
newmonth = (((lastdate.month - 1) + delta) % 12) + 1
newyear = lastdate.year + (((lastdate.month - 1) + delta) // 12)
if newday > calendar.mdays[newmonth]:
newday = calendar.mdays[newmonth]
if newyear % 4 == 0 and newmonth == 2:
newday += 1
d = datetime(newyear, newmonth, newday, 0, 0, 0)
elif freq == 'y':
newyear = lastdate.year + 1
newmonth = lastdate.month
newday = lastdate.day # Not treating Feb 29
d = datetime(newyear, newmonth, newday, 0, 0, 0)
else:
errstr = 'Incorrect date frequency'
raise ValueError(errstr)
datelist.append(d)
vollist.append(lastlist)
return (datelist, vollist)
def _read_vol_file(file_name, date_format, undef_value, set_nonnegative, time_shift):
"""Read vol file
"""
try:
pfil = open(file_name, 'r')
except OSError as e:
raise OSError(e)
well_name = 'xxx'
options = dict()
options['units'] = 'm' # Metric
options['freq'] = 'm' # d = Daily, m=Monthly (OFM default), y=yearly
options['gkilosm3'] = False
options['lkilosm3'] = False
options['mstb'] = False
options['mmscf'] = False
options['wehrs'] = False
options['wemonths'] = False
options['wefrac'] = False
options['cumu'] = False
options['ucrates'] = False
options['nonneg'] = set_nonnegative
options['undef'] = undef_value
options['hasweff'] = False
options['tabsep'] = False
options['timeshift'] = time_shift
vollist = []
datelist = []
volprev = []
dateprev = datetime(1900, 1, 1)
volmul = []
read_order = []
dat_order = dict()
alldata = dict()
dataread = False
do_read = True
line_no = 0
allwells = []
check_sep = True
while True:
try:
line = pfil.readline()
line_no += 1
except IOError as e:
errstr = 'Error reading OFM file, line ' + str(line_no) + '\n' + str(e)
raise IOError(errstr)
if not line:
break
line = line.replace('\n', '')
line = line.replace('\r', '')
temp = line.lstrip()
ic = temp.find('--') # Schedule comments
if ic > 0:
ic -= 1
temp = temp[0:ic]
temp = temp.rstrip(' ')
utemp = temp.upper()
if utemp == '':
pass # Skip blank lines
elif ic == 0:
pass # Skip Schedule comments
elif utemp.startswith('\*'):
pass # Skip comments
elif utemp.find('*READOFF') >= 0:
do_read = False
elif utemp.find('*READON') >= 0:
do_read = True
elif not do_read:
pass
elif utemp.find('*NAME') >= 0:
if dataread:
if time_shift:
alldata[well_name] = _add_final_step(datelist, vollist, options['freq'])
else:
alldata[well_name] = (datelist, vollist)
datelist = []
vollist = []
dateprev = datetime(1900, 1, 1)
dataread = False
well_name = temp[5:].strip()
allwells.append(well_name)
elif utemp.find('*') >= 0:
options, read_order, dat_order, volmul, volprev = _read_key(
utemp, options, read_order, dat_order, volmul, volprev, line_no)
else:
if check_sep:
options['tabsep'] = _check_separator(temp, read_order)
check_sep = False
wname, vdat, voldat = _read_data(
temp, date_format, read_order, options, volmul, volprev, well_name, line_no)
if wname != well_name:
if dataread:
if time_shift:
alldata[well_name] = _add_final_step(datelist, vollist, options['freq'])
else:
alldata[well_name] = (datelist, vollist)
datelist = []
vollist = []
dateprev = datetime(1900, 1, 1)
well_name = wname
allwells.append(well_name)
dataread = True
if options['hasweff']:
voldat = _process_weff(vdat, voldat, dat_order, options)
if options['freq'] == 'm':
voldat = _process_monthly(vdat, voldat, dat_order)
elif options['freq'] == 'y':
voldat = _process_yearly(vdat, voldat, dat_order)
if vdat > dateprev:
vollist.append(voldat)
datelist.append(vdat)
dateprev = vdat
else:
errmes = 'Incorrect date order found in line ' + str(line_no) + '.'
raise ValueError(errmes)
if options['undef'] is None:
volprev = voldat
if dataread:
if time_shift:
alldata[well_name] = _add_final_step(datelist, vollist, options['freq'])
else:
alldata[well_name] = (datelist, vollist)
return (allwells, alldata, dat_order, options)
def _process_weff(vdat, voldat, dat_order, options):
iweff = dat_order['WEFF']
if not options['wefrac']:
if options['freq'] == 'd':
if options['wehrs']:
for k in ('DAYS', 'GIDAY', 'OIDAY', 'WIDAY'):
if k in dat_order.keys():
voldat[dat_order[k]] = voldat[dat_order[k]]/24.
elif options['freq'] == 'm':
days_in_month = float(calendar.monthrange(vdat.year, vdat.month)[1])
print( 'days in month: ',days_in_month,vdat.month )
for k in ('DAYS', 'GIDAY', 'OIDAY', 'WIDAY'):
if k in dat_order.keys():
print( 'dat_order = ',k, ':', voldat[dat_order[k]] )
voldat[dat_order[k]] = voldat[dat_order[k]]/days_in_month
print( 'dat_order = ',k, ':', voldat[dat_order[k]] )
print()
elif options['freq'] == 'y':
if options['wemonths']:
for k in ('DAYS', 'GIDAY', 'OIDAY', 'WIDAY'):
if k in dat_order.keys():
voldat[dat_order[k]] = voldat[dat_order[k]]/12.
if 'UPTIME' in dat_order.keys():
voldat[iweff] = voldat[dat_order['UPTIME']]
elif ('GIDAY' in dat_order.keys()
and 'GINJ' in dat_order.keys()
and voldat[dat_order['GINJ']]) > 0.:
voldat[iweff] = voldat[dat_order['GIDAY']]
elif ('OIDAY' in dat_order.keys()
and 'OINJ' in dat_order.keys()
and voldat[dat_order['OINJ']]) > 0.:
voldat[iweff] = voldat[dat_order['OIDAY']]
elif ('WIDAY' in dat_order.keys()
and 'WINJ' in dat_order.keys()
and voldat[dat_order['WINJ']]) > 0.:
voldat[iweff] = voldat[dat_order['WIDAY']]
elif 'DAYS' in dat_order.keys():
voldat[iweff] = voldat[dat_order['DAYS']]
# Adjust rates for well efficiency
if options['ucrates'] and voldat[iweff] > 0:
for k in ('GAS', 'OIL', 'WATER', 'GINJ', 'OINJ', 'WINJ'):
if k in dat_order.keys():
voldat[dat_order[k]] = voldat[dat_order[k]]/voldat[iweff]
return voldat
def _process_monthly(vdat, voldat, dat_order):
days_in_month = float(calendar.monthrange(vdat.year, vdat.month)[1])
rate_keys = ('GAS', 'OIL', 'WATER', 'GINJ', 'OINJ', 'WINJ')
for k in rate_keys:
if k in dat_order.keys():
voldat[dat_order[k]] = voldat[dat_order[k]]/days_in_month
return voldat
def _process_yearly(vdat, voldat, dat_order):
y1 = datetime(vdat.year, 1, 1)
y2 = datetime(vdat.year+1, 1, 1)
dy = y2 - y1
days_in_year = dy.days
rate_keys = ('GAS', 'OIL', 'WATER', 'GINJ', 'OINJ', 'WINJ')
for k in rate_keys:
if k in dat_order.keys():
voldat[dat_order[k]] = voldat[dat_order[k]]/days_in_year
return voldat
def _set_val(profiles, vtime, indx, vollist, vt, keyword, wname, iw, zunit, options):
if is_rate(keyword):
if options['timeshift']:
itype = 'B'
else:
itype = 'F'
else:
itype = 'L'
no_steps = vt.size
vq = np.zeros(no_steps)
if options['timeshift']:
for i in range(no_steps-1):
vi = vollist[i]
vq[i+1] = vi[indx]
else:
for i in range(no_steps):
vi = vollist[i]
vq[i] = vi[indx]
vqall = np.zeros(vtime.size)
for i, t in enumerate(vtime):
vqall[i] = profiles_interpolation(t, vt, vq, itype)
profiles.set_vector(keyword, vqall, name=wname, num=iw, unit=zunit)
return profiles
def _set_profiles(allwells, alldata, dat_order, aliasinfo, options):
"""Store data as Profiles object
"""
dateset = set()
for w in allwells:
datelist, vollist = alldata[w]
dateset.update(datelist)
datelist = list(dateset)
datelist.sort()
no_steps = len(datelist)
vtime = np.zeros(no_steps)
vday = np.zeros(no_steps)
vmonth = np.zeros(no_steps)
vyear = np.zeros(no_steps)
for i, d in enumerate(datelist):
if i == 0:
t = 0.
else:
dt = datelist[i]- datelist[i-1]
t = t + dt.days
vtime[i] = t
vday[i] = d.day
vmonth[i] = d.month
vyear[i] = d.year
if options['time_shift']:
backw = True
else:
backw = False
read_prod = False
read_inje = False
if options['read_type'] == 'P':
read_prod = True
elif options['read_type'] == 'I':
read_inje = True
else:
read_prod = True
read_inje = True
startd = datelist[0]
profiles = Profiles(profid='OFM', startdate=startd, backwards=backw)
profiles.set_vector('TIME', vtime, unit='DAYS')
profiles.set_vector('DAY', vday, unit=' ')
profiles.set_vector('MONTH', vmonth, unit=' ')
profiles.set_vector('YEAR', vyear, unit=' ')
dkeys = dict()
if options['cumu']:
zunit = 'SM3'
if read_prod:
if 'GAS' in dat_order.keys():
dkeys['GAS'] = ('WGPT', zunit)
if 'OIL' in dat_order.keys():
dkeys['OIL'] = ('WOPT', zunit)
if 'WATER' in dat_order.keys():
dkeys['WATER'] = ('WWPT', zunit)
if read_inje:
if 'GINJ' in dat_order.keys():
dkeys['GINJ'] = ('WGIT', zunit)
if 'OINJ' in dat_order.keys():
dkeys['OINJ'] = ('WOIT', zunit)
if 'WINJ' in dat_order.keys():
dkeys['WINJ'] = ('WWIT', zunit)
else:
zunit = 'SM3/D'
if read_prod:
if 'GAS' in dat_order.keys():
dkeys['GAS'] = ('WGPR', zunit)
if 'OIL' in dat_order.keys():
dkeys['OIL'] = ('WOPR', zunit)
if 'WATER' in dat_order.keys():
dkeys['WATER'] = ('WWPR', zunit)
if read_inje:
if 'GINJ' in dat_order.keys():
dkeys['GINJ'] = ('WGIR', zunit)
if 'OINJ' in dat_order.keys():
dkeys['OINJ'] = ('WOIR', zunit)
if 'WINJ' in dat_order.keys():
dkeys['WINJ'] = ('WWIR', zunit)
if 'BHP' in dat_order.keys():
dkeys['BHP'] = ('WBHP', 'BARS')
if 'THP' in dat_order.keys():
dkeys['THP'] = ('WTHP', 'BARS')
if 'WEFF' in dat_order.keys():
dkeys['WEFF'] = ('WEFF', ' ')
iw = 0
for w in allwells:
iw = iw + 1
datelist, vollist = alldata[w]
no_steps = len(datelist)
if aliasinfo is not None:
wname = aliasinfo.get_alias(w)
else:
wname = w
vt = np.zeros(no_steps)
for i, d in enumerate(datelist):
if i == 0:
dt = datelist[0] - startd
t = dt.days
else:
dt = datelist[i] - datelist[i-1]
t = t + dt.days
vt[i] = t
for k, val in dkeys.items():
indx = dat_order[k]
zkey, zunit = val
profiles = _set_val(profiles, vtime, indx, vollist, vt, zkey, wname, iw, zunit, options)
del vt
return profiles
| [
"re.split",
"numpy.zeros",
"datetime.datetime",
"datetime.timedelta",
"calendar.monthrange",
"re.sub"
] | [((5960, 5980), 're.split', 're.split', (['"""\t"""', 'temp'], {}), "('\\t', temp)\n", (5968, 5980), False, 'import re\n'), ((6399, 6422), 're.sub', 're.sub', (['""" +"""', '""" """', 'line'], {}), "(' +', ' ', line)\n", (6405, 6422), False, 'import re\n'), ((7382, 7408), 'datetime.datetime', 'datetime', (['year', 'month', 'day'], {}), '(year, month, day)\n', (7390, 7408), False, 'from datetime import datetime\n'), ((10481, 10501), 'datetime.datetime', 'datetime', (['(1900)', '(1)', '(1)'], {}), '(1900, 1, 1)\n', (10489, 10501), False, 'from datetime import datetime\n'), ((16738, 16763), 'datetime.datetime', 'datetime', (['vdat.year', '(1)', '(1)'], {}), '(vdat.year, 1, 1)\n', (16746, 16763), False, 'from datetime import datetime\n'), ((16774, 16803), 'datetime.datetime', 'datetime', (['(vdat.year + 1)', '(1)', '(1)'], {}), '(vdat.year + 1, 1, 1)\n', (16782, 16803), False, 'from datetime import datetime\n'), ((17350, 17368), 'numpy.zeros', 'np.zeros', (['no_steps'], {}), '(no_steps)\n', (17358, 17368), True, 'import numpy as np\n'), ((17619, 17639), 'numpy.zeros', 'np.zeros', (['vtime.size'], {}), '(vtime.size)\n', (17627, 17639), True, 'import numpy as np\n'), ((18170, 18188), 'numpy.zeros', 'np.zeros', (['no_steps'], {}), '(no_steps)\n', (18178, 18188), True, 'import numpy as np\n'), ((18201, 18219), 'numpy.zeros', 'np.zeros', (['no_steps'], {}), '(no_steps)\n', (18209, 18219), True, 'import numpy as np\n'), ((18234, 18252), 'numpy.zeros', 'np.zeros', (['no_steps'], {}), '(no_steps)\n', (18242, 18252), True, 'import numpy as np\n'), ((18266, 18284), 'numpy.zeros', 'np.zeros', (['no_steps'], {}), '(no_steps)\n', (18274, 18284), True, 'import numpy as np\n'), ((6626, 6646), 're.split', 're.split', (['"""\t"""', 'line'], {}), "('\\t', line)\n", (6634, 6646), False, 'import re\n'), ((20995, 21013), 'numpy.zeros', 'np.zeros', (['no_steps'], {}), '(no_steps)\n', (21003, 21013), True, 'import numpy as np\n'), ((8706, 8723), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (8715, 8723), False, 'from datetime import timedelta\n'), ((9121, 9165), 'datetime.datetime', 'datetime', (['newyear', 'newmonth', 'newday', '(0)', '(0)', '(0)'], {}), '(newyear, newmonth, newday, 0, 0, 0)\n', (9129, 9165), False, 'from datetime import datetime\n'), ((16411, 16453), 'calendar.monthrange', 'calendar.monthrange', (['vdat.year', 'vdat.month'], {}), '(vdat.year, vdat.month)\n', (16430, 16453), False, 'import calendar\n'), ((9328, 9372), 'datetime.datetime', 'datetime', (['newyear', 'newmonth', 'newday', '(0)', '(0)', '(0)'], {}), '(newyear, newmonth, newday, 0, 0, 0)\n', (9336, 9372), False, 'from datetime import datetime\n'), ((14587, 14629), 'calendar.monthrange', 'calendar.monthrange', (['vdat.year', 'vdat.month'], {}), '(vdat.year, vdat.month)\n', (14606, 14629), False, 'import calendar\n'), ((8136, 8162), 'datetime.datetime', 'datetime', (['year', 'month', 'day'], {}), '(year, month, day)\n', (8144, 8162), False, 'from datetime import datetime\n'), ((12034, 12054), 'datetime.datetime', 'datetime', (['(1900)', '(1)', '(1)'], {}), '(1900, 1, 1)\n', (12042, 12054), False, 'from datetime import datetime\n'), ((13053, 13073), 'datetime.datetime', 'datetime', (['(1900)', '(1)', '(1)'], {}), '(1900, 1, 1)\n', (13061, 13073), False, 'from datetime import datetime\n')] |
from __future__ import print_function
import logging
import os
import pickle
from datetime import datetime
import networkx as nx
import numpy as np
import scipy.sparse as sp
import pandas as pd
import tensorflow as tf
flags = tf.compat.v1.flags
FLAGS = flags.FLAGS
class ExpLogger:
""" Experiment logger. """
def __init__(self, name, cmd_print=True, log_file=None, spreadsheet=None, data_dir=None):
self.datetime_str = datetime.now().strftime("%Y%m%d_%H%M%S")
self.name = name + "_" + self.datetime_str
self.cmd_print = cmd_print
log_level = logging.INFO
logging.basicConfig(filename=log_file, level=log_level,
format='%(asctime)s - %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %H:%M:%S')
self.file_logger = logging.getLogger()
self.spreadsheet = spreadsheet
self.data_dir = data_dir
if self.spreadsheet is not None:
dirname = os.path.dirname(self.spreadsheet)
if not os.path.exists(dirname):
os.makedirs(dirname)
if os.path.isfile(self.spreadsheet):
try:
self.df = pd.read_csv(spreadsheet)
except:
self.df = pd.DataFrame()
else:
self.df = pd.DataFrame()
if self.data_dir is not None:
if not os.path.exists(self.data_dir):
os.makedirs(self.data_dir)
self.best_metric = float("-inf")
self.best_data = None
def __enter__(self):
self.log("Logger Started, name: " + self.name)
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.spreadsheet is not None:
self.df.to_csv(self.spreadsheet, index=False)
def log(self, content):
if not isinstance(content, str):
content = str(content)
if self.cmd_print:
print(content)
if self.file_logger is not None:
self.file_logger.info(content)
def debug(self, content):
if not isinstance(content, str):
content = str(content)
if self.cmd_print:
print("[DEBUG]::: " + content + ":::[DEBUG]")
if self.file_logger is not None:
self.file_logger.debug(str)
def spreadsheet_write(self, val_dict):
if self.spreadsheet is not None:
if "name" not in val_dict:
val_dict["name"] = self.name
self.df = self.df.append(val_dict, ignore_index=True)
def save_data(self, data, name):
name = name + "_" + self.datetime_str
if isinstance(data, np.ndarray):
np.savez_compressed(os.path.join(self.data_dir, name + ".npz"), data=data)
else:
with open(os.path.join(self.data_dir, name + ".pkl"), "wb") as f:
pickle.dump(data, f)
def update_record(self, metric, data):
if metric > self.best_metric:
self.best_metric = metric
self.best_data = data
def sparse_to_tuple(sparse_mx):
""" Convert sparse matrix to tuple representation. """
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
def to_tuple_list(matrices):
""" Convert a list of sparse matrices to tuple representation. """
coords = []
values = []
shape = [len(matrices)]
for idx in range(0, len(matrices)):
mx = matrices[idx]
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
# Create proper indices - coords is a numpy array of pairs of indices.
coords_mx = np.vstack((mx.row, mx.col)).transpose()
z = np.array([np.ones(coords_mx.shape[0]) * idx]).T
z = np.concatenate((z, coords_mx), axis=1)
z = z.astype(int)
coords.extend(z)
values.extend(mx.data)
shape.extend(matrices[0].shape)
shape = np.array(shape).astype("int64")
values = np.array(values).astype("float32")
coords = np.array(coords)
return coords, values, shape
if isinstance(sparse_mx, list) and isinstance(sparse_mx[0], list):
# Given a list of lists, convert it into a list of tuples.
for i in range(0, len(sparse_mx)):
sparse_mx[i] = to_tuple_list(sparse_mx[i])
elif isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def normalize_graph_gcn(adj):
""" Normalize adjacency matrix following GCN. """
adj = sp.coo_matrix(adj)
adj_ = adj + sp.eye(adj.shape[0])
row_sum = np.array(adj_.sum(1))
degree_mat_inv_sqrt = sp.diags(np.power(row_sum, -0.5).flatten())
adj_normalized = adj_.dot(degree_mat_inv_sqrt).transpose().dot(
degree_mat_inv_sqrt).tocoo()
return sparse_to_tuple(adj_normalized)
def sample_mask(idx, l):
""" Create mask. """
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
extra_tokens = ['_GO', 'EOS']
def get_data_set(cascades, timestamps, max_len=None, test_min_percent=0.1, test_max_percent=0.5, mode='test'):
""" Create train/val/test examples from input cascade sequences. Cascade sequences are truncated based on max_len.
Test examples are sampled with seed set percentage between 10% and 50%. Train/val sets include examples of all
possible seed sizes. """
dataset, dataset_times = [], []
eval_set, eval_set_times = [], []
for cascade in cascades:
if max_len is None or len(cascade) < max_len:
dataset.append(cascade)
else:
dataset.append(cascade[0:max_len]) # truncate
for ts_list in timestamps:
if max_len is None or len(ts_list) < max_len:
dataset_times.append(ts_list)
else:
dataset_times.append(ts_list[0:max_len]) # truncate
for cascade, ts_list in zip(dataset, dataset_times):
assert len(cascade) == len(ts_list)
for j in range(1, len(cascade)):
seed_set = cascade[0:j]
seed_set_times = ts_list[0:j]
remain = cascade[j:]
remain_times = ts_list[j:]
seed_set_percent = len(seed_set) / (len(seed_set) + len(remain))
if mode == 'train' or mode == 'val':
eval_set.append((seed_set, remain))
eval_set_times.append((seed_set_times, remain_times))
if mode == 'test' and (test_min_percent < seed_set_percent < test_max_percent):
eval_set.append((seed_set, remain))
eval_set_times.append((seed_set_times, remain_times))
print("# {} examples {}".format(mode, len(eval_set)))
return eval_set, eval_set_times
def load_graph(dataset_str):
""" Load social network as a sparse adjacency matrix. """
print("Loading graph", dataset_str)
g = nx.Graph()
n_nodes, n_edges = 0, 0
with open("data/{}/{}".format(dataset_str, "graph.txt"), 'rb') as f:
nu = 0
for line in f:
nu += 1
if nu == 1:
# assuming first line contains number of nodes, edges.
n_nodes, n_edges = [int(x) for x in line.strip().split()]
for i in range(n_nodes):
g.add_node(i)
continue
s, t = [int(x) for x in line.strip().split()]
g.add_edge(s, t)
adj = nx.adjacency_matrix(g)
print("# nodes", n_nodes, "# edges", n_edges, adj.shape)
global start_token, end_token
start_token = adj.shape[0] + extra_tokens.index('_GO') # start_token = 0
end_token = adj.shape[0] + extra_tokens.index('EOS') # end_token = 1
return adj
def load_feats(dataset_str):
""" Load user attributes in social network. """
x = np.load("data/{}/{}".format(dataset_str, "feats.npz"))
return x['arr_0']
def load_cascades(dataset_str, mode='train'):
""" Load cascade data, return cascade (user sequence, timestamp sequence). """
print("Loading cascade", dataset_str, "mode", mode)
cascades = []
global avg_diff
avg_diff = 0.0
time_stamps = []
path = mode + str(".txt")
with open("data/{}/{}".format(dataset_str, path), 'rb') as f:
for line in f:
if len(line) < 1:
continue
line = list(map(float, line.split()))
start, rest = int(line[0]), line[1:]
cascade = [start]
cascade.extend(list(map(int, rest[::2])))
time_stamp = [0] # start time = 0
time_stamp.extend(rest[1::2])
cascades.append(cascade)
time_stamps.append(time_stamp)
return cascades, time_stamps
def prepare_batch_sequences(input_sequences, target_sequences, batch_size):
""" Split cascade sequences into batches based on batch_size. """
# Split based on batch_size
assert (len(input_sequences) == len(target_sequences))
num_batch = len(input_sequences) // batch_size
if len(input_sequences) % batch_size != 0:
num_batch += 1
batches_x = []
batches_y = []
n = len(input_sequences)
for i in range(0, num_batch):
start = i * batch_size
end = min((i + 1) * batch_size, n)
batches_x.append(input_sequences[start:end])
batches_y.append(target_sequences[start:end])
return batches_x, batches_y
def prepare_batch_graph(adj, batch_size):
""" Split users into batches based on batch_size. """
n = adj.shape[0]
num_batch = n // batch_size + 1
random_ordering = np.random.permutation(n)
batches = []
batches_indices = []
for i in range(0, num_batch):
start = i * batch_size
end = min((i + 1) * batch_size, n)
batch_indices = random_ordering[start:end]
batch = adj[batch_indices, :]
batches.append(batch.toarray())
batches_indices.append(batch_indices)
return batches, batches_indices
def prepare_sequences(examples, examples_times, max_len=None, cascade_batch_size=1, mode='train'):
""" Prepare sequences by padding and adding dummy evaluation sequences. """
seqs_x = list(map(lambda seq_t: (seq_t[0][(-1) * max_len:], seq_t[1]), examples))
times_x = list(map(lambda seq_t: (seq_t[0][(-1) * max_len:], seq_t[1]), examples_times))
# add padding.
lengths_x = [len(s[0]) for s in seqs_x]
lengths_y = [len(s[1]) for s in seqs_x]
if len(seqs_x) % cascade_batch_size != 0 and (mode == 'test' or mode == 'val'):
# Dummy sequences for evaluation: this is required to ensure that each batch is full-sized -- else the
# data may not be split perfectly while evaluation.
x_batch_size = (1 + len(seqs_x) // cascade_batch_size) * cascade_batch_size
lengths_x.extend([1] * (x_batch_size - len(seqs_x)))
lengths_y.extend([1] * (x_batch_size - len(seqs_x)))
x_lengths = np.array(lengths_x).astype('int32')
max_len_x = max_len
# mask input with start token (n_nodes + 1) to work with embedding_lookup
x = np.ones((len(lengths_x), max_len_x)).astype('int32') * start_token
# mask target with -1 so that tf.one_hot will return a zero vector for padded nodes
y = np.ones((len(lengths_y), max_len_x)).astype('int32') * -1
# activation times are set to vector of ones.
x_times = np.ones((len(lengths_x), max_len_x)).astype('int32') * -1
y_times = np.ones((len(lengths_y), max_len_x)).astype('int32') * -1
mask = np.ones_like(x)
# Assign final set of sequences.
for idx, (s_x, t) in enumerate(seqs_x):
end_x = lengths_x[idx]
end_y = lengths_y[idx]
x[idx, :end_x] = s_x
y[idx, :end_y] = t
mask[idx, end_x:] = 0
for idx, (s_x, t) in enumerate(times_x):
end_x = lengths_x[idx]
end_y = lengths_y[idx]
x_times[idx, :end_x] = s_x
y_times[idx, :end_y] = t
return x, x_lengths, y, mask, x_times, y_times
def ensure_dir(d):
""" Helper function to create directory if it does not exist. """
if not os.path.isdir(d):
os.makedirs(d)
| [
"pickle.dump",
"pandas.read_csv",
"numpy.ones",
"os.path.isfile",
"os.path.join",
"scipy.sparse.eye",
"networkx.adjacency_matrix",
"pandas.DataFrame",
"numpy.power",
"os.path.dirname",
"os.path.exists",
"scipy.sparse.coo_matrix",
"datetime.datetime.now",
"numpy.ones_like",
"scipy.sparse.... | [((4818, 4836), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['adj'], {}), '(adj)\n', (4831, 4836), True, 'import scipy.sparse as sp\n'), ((5192, 5203), 'numpy.zeros', 'np.zeros', (['l'], {}), '(l)\n', (5200, 5203), True, 'import numpy as np\n'), ((5233, 5262), 'numpy.array', 'np.array', (['mask'], {'dtype': 'np.bool'}), '(mask, dtype=np.bool)\n', (5241, 5262), True, 'import numpy as np\n'), ((7134, 7144), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (7142, 7144), True, 'import networkx as nx\n'), ((7670, 7692), 'networkx.adjacency_matrix', 'nx.adjacency_matrix', (['g'], {}), '(g)\n', (7689, 7692), True, 'import networkx as nx\n'), ((9802, 9826), 'numpy.random.permutation', 'np.random.permutation', (['n'], {}), '(n)\n', (9823, 9826), True, 'import numpy as np\n'), ((11706, 11721), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (11718, 11721), True, 'import numpy as np\n'), ((609, 749), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'log_file', 'level': 'log_level', 'format': '"""%(asctime)s - %(levelname)s: %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""'}), "(filename=log_file, level=log_level, format=\n '%(asctime)s - %(levelname)s: %(message)s', datefmt='%m/%d/%Y %H:%M:%S')\n", (628, 749), False, 'import logging\n'), ((828, 847), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (845, 847), False, 'import logging\n'), ((4230, 4246), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (4238, 4246), True, 'import numpy as np\n'), ((4854, 4874), 'scipy.sparse.eye', 'sp.eye', (['adj.shape[0]'], {}), '(adj.shape[0])\n', (4860, 4874), True, 'import scipy.sparse as sp\n'), ((12282, 12298), 'os.path.isdir', 'os.path.isdir', (['d'], {}), '(d)\n', (12295, 12298), False, 'import os\n'), ((12308, 12322), 'os.makedirs', 'os.makedirs', (['d'], {}), '(d)\n', (12319, 12322), False, 'import os\n'), ((983, 1016), 'os.path.dirname', 'os.path.dirname', (['self.spreadsheet'], {}), '(self.spreadsheet)\n', (998, 1016), False, 'import os\n'), ((1113, 1145), 'os.path.isfile', 'os.path.isfile', (['self.spreadsheet'], {}), '(self.spreadsheet)\n', (1127, 1145), False, 'import os\n'), ((3187, 3208), 'scipy.sparse.isspmatrix_coo', 'sp.isspmatrix_coo', (['mx'], {}), '(mx)\n', (3204, 3208), True, 'import scipy.sparse as sp\n'), ((3939, 3977), 'numpy.concatenate', 'np.concatenate', (['(z, coords_mx)'], {'axis': '(1)'}), '((z, coords_mx), axis=1)\n', (3953, 3977), True, 'import numpy as np\n'), ((11134, 11153), 'numpy.array', 'np.array', (['lengths_x'], {}), '(lengths_x)\n', (11142, 11153), True, 'import numpy as np\n'), ((441, 455), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (453, 455), False, 'from datetime import datetime\n'), ((1036, 1059), 'os.path.exists', 'os.path.exists', (['dirname'], {}), '(dirname)\n', (1050, 1059), False, 'import os\n'), ((1077, 1097), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (1088, 1097), False, 'import os\n'), ((1336, 1350), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1348, 1350), True, 'import pandas as pd\n'), ((1408, 1437), 'os.path.exists', 'os.path.exists', (['self.data_dir'], {}), '(self.data_dir)\n', (1422, 1437), False, 'import os\n'), ((1455, 1481), 'os.makedirs', 'os.makedirs', (['self.data_dir'], {}), '(self.data_dir)\n', (1466, 1481), False, 'import os\n'), ((2718, 2760), 'os.path.join', 'os.path.join', (['self.data_dir', "(name + '.npz')"], {}), "(self.data_dir, name + '.npz')\n", (2730, 2760), False, 'import os\n'), ((2881, 2901), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (2892, 2901), False, 'import pickle\n'), ((3255, 3282), 'numpy.vstack', 'np.vstack', (['(mx.row, mx.col)'], {}), '((mx.row, mx.col))\n', (3264, 3282), True, 'import numpy as np\n'), ((3657, 3678), 'scipy.sparse.isspmatrix_coo', 'sp.isspmatrix_coo', (['mx'], {}), '(mx)\n', (3674, 3678), True, 'import scipy.sparse as sp\n'), ((4129, 4144), 'numpy.array', 'np.array', (['shape'], {}), '(shape)\n', (4137, 4144), True, 'import numpy as np\n'), ((4178, 4194), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (4186, 4194), True, 'import numpy as np\n'), ((4946, 4969), 'numpy.power', 'np.power', (['row_sum', '(-0.5)'], {}), '(row_sum, -0.5)\n', (4954, 4969), True, 'import numpy as np\n'), ((1198, 1222), 'pandas.read_csv', 'pd.read_csv', (['spreadsheet'], {}), '(spreadsheet)\n', (1209, 1222), True, 'import pandas as pd\n'), ((2809, 2851), 'os.path.join', 'os.path.join', (['self.data_dir', "(name + '.pkl')"], {}), "(self.data_dir, name + '.pkl')\n", (2821, 2851), False, 'import os\n'), ((3819, 3846), 'numpy.vstack', 'np.vstack', (['(mx.row, mx.col)'], {}), '((mx.row, mx.col))\n', (3828, 3846), True, 'import numpy as np\n'), ((1277, 1291), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1289, 1291), True, 'import pandas as pd\n'), ((3885, 3912), 'numpy.ones', 'np.ones', (['coords_mx.shape[0]'], {}), '(coords_mx.shape[0])\n', (3892, 3912), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import ast
from flask import Flask, session, redirect, url_for, request, jsonify
from flask import render_template, session, g
from werkzeug.utils import secure_filename
from PIL import Image
import serial
from time import sleep
app = Flask(__name__)
import numpy as np
from scipy.cluster.vq import kmeans
from skimage.color import rgb2hsv
ser = serial.Serial('/dev/serial0', 9600, write_timeout=2)
def send(msg, duration=0):
print(msg)
b_msg = '{}\r\n'.format(msg)
ser.write(b_msg.encode('UTF-8'))
#ser.write(b'Button A\r\n')
sleep(duration)
ser.write(b'RELEASE\r\n');
def send_break(msg, duration, break_time=0.1):
send(msg, duration=duration)
sleep(break_time)
def draw_init():
send_break('Button X', 0.1)
for i in range(4):
send_break('HAT BOTTOM', 0.1)
send_break('Button A', 0.1)
send_break('Button L', 0.1)
send_break('Button A', 0.1)
send_break('Button R', 0.1)
def set_color(hue_meter, vivid_meter, bright_meter):
send_break('Button X', 0.1)
for i in range(5):
send_break('HAT TOP', 0.1)
send_break('HAT RIGHT', 0.1)
send_break('Button A', 0.1)
for hue_step, vivid_step, bright_step in zip(hue_meter, vivid_meter, bright_meter):
# for i in range(1):
# send_break('HAT LEFT', 0.1)
# for i in range(1):
# send_break('HAT RIGHT', 0.1)
# send_break('HAT BOTTOM', 0.1)
# for i in range(1):
# send_break('HAT LEFT', 0.1)
# for i in range(1):
# send_break('HAT RIGHT', 0.1)
# send_break('HAT BOTTOM', 0.1)
# for i in range(1):
# send_break('HAT LEFT', 0.1)
# for i in range(1):
# send_break('HAT RIGHT', 0.1)
# send_break('HAT BOTTOM', 0.1)
# send_break('Button R', 0.1)
for i in range(30):
send_break('HAT LEFT', 0.1)
for i in range(hue_step):
send_break('HAT RIGHT', 0.1)
send_break('HAT BOTTOM', 0.1)
for i in range(15):
send_break('HAT LEFT', 0.1)
for i in range(vivid_step):
send_break('HAT RIGHT', 0.1)
send_break('HAT BOTTOM', 0.1)
for i in range(15):
send_break('HAT LEFT', 0.1)
for i in range(bright_step):
send_break('HAT RIGHT', 0.1)
send_break('HAT BOTTOM', 0.1)
send_break('Button R', 0.1)
send_break('Button A', 0.1, 2.)
# Move cursor to the top left
send_break('HAT BOTTOM', 0.1)
send_break('HAT LEFT', 0.1)
send_break('Button A', 0.1)
for i in range(16):
send_break('HAT TOP', 0.1)
send_break('HAT LEFT', 0.1)
pass
def draw_pixel(chosen_color):
pointer = 0
for i in range(32):
for j in range(32):
idx = chosen_color[i,j]
diff = idx - pointer
if diff == 0:
send_break('Button A', 0.1)
send_break('HAT RIGHT', 0.1)
continue
if np.abs(diff) > 8:
if diff < 0:
diff += 16
else:
diff -= 16
if diff < 0:
for k in range(np.abs(diff)):
send_break('Button L', 0.1)
else:
for k in range(diff):
send_break('Button R', 0.1)
send_break('Button A', 0.1)
pointer = idx
send_break('HAT RIGHT', 0.1)
send_break('HAT BOTTOM', 0.1)
for j in range(31):
send_break('HAT LEFT', 0.1)
pass
@app.route('/')
def hello_world():
return render_template('index.html')
@app.route('/action', methods=['POST'])
def action():
if request.method == 'POST':
action_to_do = request.form['request']
send(action_to_do, 0.1)
return render_template('index.html')
@app.route('/pic', methods=['POST'])
def post_picture():
print('hello')
if request.method == 'POST':
if request.files:
data = request.files['image']
img = Image.open(request.files['image'])
img = np.array(img)[:,:,:3]
np.save('tmp.npy', img)
return render_template('index.html')
@app.route('/gen-color', methods=['POST'])
def gen_color():
img = np.load('tmp.npy')
tech_chan_flatten = img.reshape((-1, 3))
code, _ = kmeans(tech_chan_flatten.astype(np.float32), 15)
#code = np.array([1,2,3])
return jsonify(code.astype(np.uint8).tolist())
@app.route('/start-draw', methods=['POST'])
def draw_color():
print(request.form)
color_palette = np.array(ast.literal_eval(request.form['palette'])).astype(np.uint8)
print('hey', color_palette)
hsv_palette = rgb2hsv(np.expand_dims(color_palette, axis=0))[0]
print(hsv_palette)
hue_meter = np.around(hsv_palette[:,0] * 30).astype(np.int32)
vivid_meter = np.around(hsv_palette[:,1] * 15).astype(np.int32)
bright_meter = np.around(hsv_palette[:,2] * 15).astype(np.int32)
print(hue_meter, vivid_meter, bright_meter)
# Calculate color nearest
img = np.load('tmp.npy')
tech_chan_flatten = img.reshape((-1, 3))
tech_chan_expand = np.expand_dims(tech_chan_flatten,axis=1).astype(np.float32)
color_palette = np.concatenate([color_palette, [[255,255,255]]], axis=0) # Add white
color_palette_expand = np.expand_dims(color_palette, axis=0).astype(np.float32)
tmp = tech_chan_expand - color_palette_expand
r = (tech_chan_expand[:,:,0] + color_palette_expand[:,:,0]) / 2
dC = np.sqrt((2 + r / 256) * tmp[:,:,0] ** 2 + 4 * tmp[:,:,1] ** 2
+ (2+(255-r)/256)*tmp[:,:,2]**2)
#
chosen_color = dC.argmin(axis=1).reshape(32, 32)
drawn_img = color_palette[chosen_color].reshape([32,32,3])
np.save('drawn.npy', drawn_img)
# Initial setup
draw_init()
set_color(hue_meter, vivid_meter, bright_meter)
draw_pixel(chosen_color)
return render_template('index.html')
#
#return 'Hello, World!'
| [
"serial.Serial",
"numpy.load",
"numpy.save",
"numpy.abs",
"flask.Flask",
"numpy.expand_dims",
"time.sleep",
"PIL.Image.open",
"numpy.around",
"numpy.array",
"flask.render_template",
"ast.literal_eval",
"numpy.concatenate",
"numpy.sqrt"
] | [((260, 275), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (265, 275), False, 'from flask import Flask, session, redirect, url_for, request, jsonify\n'), ((372, 424), 'serial.Serial', 'serial.Serial', (['"""/dev/serial0"""', '(9600)'], {'write_timeout': '(2)'}), "('/dev/serial0', 9600, write_timeout=2)\n", (385, 424), False, 'import serial\n'), ((573, 588), 'time.sleep', 'sleep', (['duration'], {}), '(duration)\n', (578, 588), False, 'from time import sleep\n'), ((706, 723), 'time.sleep', 'sleep', (['break_time'], {}), '(break_time)\n', (711, 723), False, 'from time import sleep\n'), ((3669, 3698), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (3684, 3698), False, 'from flask import render_template, session, g\n'), ((3886, 3915), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (3901, 3915), False, 'from flask import render_template, session, g\n'), ((4234, 4263), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (4249, 4263), False, 'from flask import render_template, session, g\n'), ((4335, 4353), 'numpy.load', 'np.load', (['"""tmp.npy"""'], {}), "('tmp.npy')\n", (4342, 4353), True, 'import numpy as np\n'), ((5138, 5156), 'numpy.load', 'np.load', (['"""tmp.npy"""'], {}), "('tmp.npy')\n", (5145, 5156), True, 'import numpy as np\n'), ((5305, 5363), 'numpy.concatenate', 'np.concatenate', (['[color_palette, [[255, 255, 255]]]'], {'axis': '(0)'}), '([color_palette, [[255, 255, 255]]], axis=0)\n', (5319, 5363), True, 'import numpy as np\n'), ((5586, 5701), 'numpy.sqrt', 'np.sqrt', (['((2 + r / 256) * tmp[:, :, 0] ** 2 + 4 * tmp[:, :, 1] ** 2 + (2 + (255 - r) /\n 256) * tmp[:, :, 2] ** 2)'], {}), '((2 + r / 256) * tmp[:, :, 0] ** 2 + 4 * tmp[:, :, 1] ** 2 + (2 + (\n 255 - r) / 256) * tmp[:, :, 2] ** 2)\n', (5593, 5701), True, 'import numpy as np\n'), ((5825, 5856), 'numpy.save', 'np.save', (['"""drawn.npy"""', 'drawn_img'], {}), "('drawn.npy', drawn_img)\n", (5832, 5856), True, 'import numpy as np\n'), ((5991, 6020), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (6006, 6020), False, 'from flask import render_template, session, g\n'), ((4112, 4146), 'PIL.Image.open', 'Image.open', (["request.files['image']"], {}), "(request.files['image'])\n", (4122, 4146), False, 'from PIL import Image\n'), ((4199, 4222), 'numpy.save', 'np.save', (['"""tmp.npy"""', 'img'], {}), "('tmp.npy', img)\n", (4206, 4222), True, 'import numpy as np\n'), ((4777, 4814), 'numpy.expand_dims', 'np.expand_dims', (['color_palette'], {'axis': '(0)'}), '(color_palette, axis=0)\n', (4791, 4814), True, 'import numpy as np\n'), ((4858, 4891), 'numpy.around', 'np.around', (['(hsv_palette[:, 0] * 30)'], {}), '(hsv_palette[:, 0] * 30)\n', (4867, 4891), True, 'import numpy as np\n'), ((4926, 4959), 'numpy.around', 'np.around', (['(hsv_palette[:, 1] * 15)'], {}), '(hsv_palette[:, 1] * 15)\n', (4935, 4959), True, 'import numpy as np\n'), ((4995, 5028), 'numpy.around', 'np.around', (['(hsv_palette[:, 2] * 15)'], {}), '(hsv_palette[:, 2] * 15)\n', (5004, 5028), True, 'import numpy as np\n'), ((5225, 5266), 'numpy.expand_dims', 'np.expand_dims', (['tech_chan_flatten'], {'axis': '(1)'}), '(tech_chan_flatten, axis=1)\n', (5239, 5266), True, 'import numpy as np\n'), ((5401, 5438), 'numpy.expand_dims', 'np.expand_dims', (['color_palette'], {'axis': '(0)'}), '(color_palette, axis=0)\n', (5415, 5438), True, 'import numpy as np\n'), ((3044, 3056), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (3050, 3056), True, 'import numpy as np\n'), ((4165, 4178), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (4173, 4178), True, 'import numpy as np\n'), ((4659, 4700), 'ast.literal_eval', 'ast.literal_eval', (["request.form['palette']"], {}), "(request.form['palette'])\n", (4675, 4700), False, 'import ast\n'), ((3232, 3244), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (3238, 3244), True, 'import numpy as np\n')] |
# simulate_perspective.py - search for a simple formula for the insolation on a flat earth
import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
import seaborn as sns
sns.set_style('whitegrid')
n = 36
m = 39
l = 12
p = 4
r0 = 1/2
z0 = 1/4
r = np.linspace(0, 2*r0, n)
λ = np.linspace(0, np.pi, n)
ɸ = np.arctan(z0/r)
Λ, R = np.meshgrid(λ, r)
Λ, Φ = np.meshgrid(λ, ɸ)
X, Y = R*np.cos(Λ), R*np.sin(Λ)
ψ = np.linspace(0, 1.5, l)
S = np.zeros((n, l))
for i in range(l):
xS = r0*(1 + ψ[i]/(np.pi/2)*np.cos(np.linspace(0, np.pi, m)))[None,None,:]
# zS = z0*np.sqrt(1 - (xS/(2*r0))**2)
zS = z0
S[:,i] = np.sum(zS/((X[:,:,None] - xS)**2 + Y[:,:,None]**2 + zS**2)**(3/2), axis=(1,2))
S /= n*m/z0**2
sns.set_palette('rainbow', n_colors=l)
plt.figure()
plt.plot(r, S)
plt.plot(r, z0**3/(r**2 + z0**2)**(3/2), 'k--', label="tidally lockd")
plt.xlabel("raditude")
plt.ylabel("insolacion")
C = np.empty((l, p))
for i in range(l):
C[i,:] = np.polyfit(r**2, 1/S[:,i], p-1)[::-1]
# C[i,:] = optimize.curve_fit((lambda x,d,c,b,a: a*np.exp(-(x/b)**2) + c*np.exp(-(x/d)**2)), r, S[:,i], p0=[-.5, .5*r0, 1, 1.5*r0], maxfev=10000)[0]
plt.figure()
plt.plot(ψ, C)
plt.xlabel("axial tilt")
plt.ylabel("polynomial coefficient")
slopes = np.empty(p)
# blopes = np.empty(p)
offsets = np.empty(p)
for i in range(p):
a, c = optimize.curve_fit((lambda x,a,c: a*np.cos(2*x) + c), ψ, C[:,i])[0]
slopes[i] = a
offsets[i] = c
slopes = np.around(slopes, 3)
offsets = np.around(offsets, 3)
print(slopes)
print(offsets)
plt.figure()
# plt.plot(r, S)
for i in range(l):
d, c, b, a = np.matmul(np.stack([slopes, offsets], axis=1), [np.cos(2*ψ[i]), 1])
# d, c, b, a = C[i,:]
v = 1/(a*r**6 + b*r**4 + c*r**2 + d)
print(r)
print(np.cos(2*ψ[i]))
# v = a*np.exp(-(r/b)**2) + c*np.exp(-(r/d)**2)
plt.plot(r, v, '--')
# plt.yscale('log')
plt.show()
| [
"numpy.stack",
"seaborn.set_style",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.plot",
"numpy.polyfit",
"numpy.empty",
"numpy.zeros",
"matplotlib.pyplot.ylabel",
"numpy.around",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.linspace",
"numpy.cos",
"num... | [((192, 218), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (205, 218), True, 'import seaborn as sns\n'), ((271, 296), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * r0)', 'n'], {}), '(0, 2 * r0, n)\n', (282, 296), True, 'import numpy as np\n'), ((300, 324), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', 'n'], {}), '(0, np.pi, n)\n', (311, 324), True, 'import numpy as np\n'), ((329, 346), 'numpy.arctan', 'np.arctan', (['(z0 / r)'], {}), '(z0 / r)\n', (338, 346), True, 'import numpy as np\n'), ((353, 370), 'numpy.meshgrid', 'np.meshgrid', (['λ', 'r'], {}), '(λ, r)\n', (364, 370), True, 'import numpy as np\n'), ((379, 396), 'numpy.meshgrid', 'np.meshgrid', (['λ', 'ɸ'], {}), '(λ, ɸ)\n', (390, 396), True, 'import numpy as np\n'), ((434, 456), 'numpy.linspace', 'np.linspace', (['(0)', '(1.5)', 'l'], {}), '(0, 1.5, l)\n', (445, 456), True, 'import numpy as np\n'), ((461, 477), 'numpy.zeros', 'np.zeros', (['(n, l)'], {}), '((n, l))\n', (469, 477), True, 'import numpy as np\n'), ((726, 764), 'seaborn.set_palette', 'sns.set_palette', (['"""rainbow"""'], {'n_colors': 'l'}), "('rainbow', n_colors=l)\n", (741, 764), True, 'import seaborn as sns\n'), ((765, 777), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (775, 777), True, 'import matplotlib.pyplot as plt\n'), ((778, 792), 'matplotlib.pyplot.plot', 'plt.plot', (['r', 'S'], {}), '(r, S)\n', (786, 792), True, 'import matplotlib.pyplot as plt\n'), ((793, 880), 'matplotlib.pyplot.plot', 'plt.plot', (['r', '(z0 ** 3 / (r ** 2 + z0 ** 2) ** (3 / 2))', '"""k--"""'], {'label': '"""tidally lockd"""'}), "(r, z0 ** 3 / (r ** 2 + z0 ** 2) ** (3 / 2), 'k--', label=\n 'tidally lockd')\n", (801, 880), True, 'import matplotlib.pyplot as plt\n'), ((864, 886), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""raditude"""'], {}), "('raditude')\n", (874, 886), True, 'import matplotlib.pyplot as plt\n'), ((887, 911), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""insolacion"""'], {}), "('insolacion')\n", (897, 911), True, 'import matplotlib.pyplot as plt\n'), ((917, 933), 'numpy.empty', 'np.empty', (['(l, p)'], {}), '((l, p))\n', (925, 933), True, 'import numpy as np\n'), ((1151, 1163), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1161, 1163), True, 'import matplotlib.pyplot as plt\n'), ((1164, 1178), 'matplotlib.pyplot.plot', 'plt.plot', (['ψ', 'C'], {}), '(ψ, C)\n', (1172, 1178), True, 'import matplotlib.pyplot as plt\n'), ((1179, 1203), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""axial tilt"""'], {}), "('axial tilt')\n", (1189, 1203), True, 'import matplotlib.pyplot as plt\n'), ((1204, 1240), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""polynomial coefficient"""'], {}), "('polynomial coefficient')\n", (1214, 1240), True, 'import matplotlib.pyplot as plt\n'), ((1251, 1262), 'numpy.empty', 'np.empty', (['p'], {}), '(p)\n', (1259, 1262), True, 'import numpy as np\n'), ((1296, 1307), 'numpy.empty', 'np.empty', (['p'], {}), '(p)\n', (1304, 1307), True, 'import numpy as np\n'), ((1443, 1463), 'numpy.around', 'np.around', (['slopes', '(3)'], {}), '(slopes, 3)\n', (1452, 1463), True, 'import numpy as np\n'), ((1474, 1495), 'numpy.around', 'np.around', (['offsets', '(3)'], {}), '(offsets, 3)\n', (1483, 1495), True, 'import numpy as np\n'), ((1526, 1538), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1536, 1538), True, 'import matplotlib.pyplot as plt\n'), ((1842, 1852), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1850, 1852), True, 'import matplotlib.pyplot as plt\n'), ((631, 731), 'numpy.sum', 'np.sum', (['(zS / ((X[:, :, None] - xS) ** 2 + Y[:, :, None] ** 2 + zS ** 2) ** (3 / 2))'], {'axis': '(1, 2)'}), '(zS / ((X[:, :, None] - xS) ** 2 + Y[:, :, None] ** 2 + zS ** 2) ** (\n 3 / 2), axis=(1, 2))\n', (637, 731), True, 'import numpy as np\n'), ((1801, 1821), 'matplotlib.pyplot.plot', 'plt.plot', (['r', 'v', '"""--"""'], {}), "(r, v, '--')\n", (1809, 1821), True, 'import matplotlib.pyplot as plt\n'), ((405, 414), 'numpy.cos', 'np.cos', (['Λ'], {}), '(Λ)\n', (411, 414), True, 'import numpy as np\n'), ((419, 428), 'numpy.sin', 'np.sin', (['Λ'], {}), '(Λ)\n', (425, 428), True, 'import numpy as np\n'), ((963, 1001), 'numpy.polyfit', 'np.polyfit', (['(r ** 2)', '(1 / S[:, i])', '(p - 1)'], {}), '(r ** 2, 1 / S[:, i], p - 1)\n', (973, 1001), True, 'import numpy as np\n'), ((1599, 1634), 'numpy.stack', 'np.stack', (['[slopes, offsets]'], {'axis': '(1)'}), '([slopes, offsets], axis=1)\n', (1607, 1634), True, 'import numpy as np\n'), ((1735, 1751), 'numpy.cos', 'np.cos', (['(2 * ψ[i])'], {}), '(2 * ψ[i])\n', (1741, 1751), True, 'import numpy as np\n'), ((1637, 1653), 'numpy.cos', 'np.cos', (['(2 * ψ[i])'], {}), '(2 * ψ[i])\n', (1643, 1653), True, 'import numpy as np\n'), ((534, 558), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', 'm'], {}), '(0, np.pi, m)\n', (545, 558), True, 'import numpy as np\n'), ((1371, 1384), 'numpy.cos', 'np.cos', (['(2 * x)'], {}), '(2 * x)\n', (1377, 1384), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""anchorbox_generator.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1UyJUFUeV9iiJwSG9xuV9iCVNLdU0VcJk
"""
# -*- coding: utf-8 -*-
"""Untitled4.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1aXtcmr3xTYcCMODQbHOEoOrcImcbPq4Y
"""
import numpy as np
import cv2
import math
def Anchorbox_generator(bbox0=[],label=[], anc_pos_iou_thresh=0.7, anc_neg_iou_thresh = 0.3, anc_pos_ratio=0.5, anc_n_sample=256):
# Define the number of anchor boxes at each anchor point
ratios = [0.5, 1, 2]
anchor_scales = [8, 16, 32]
anchor_base = np.zeros((len(ratios) * len(anchor_scales), 4), dtype=np.float32)
# The output of the feature extraction should have a feature map size of (800//16)
sub_sample = 16
# Fill out the coordinates for each anchor scale and ratios
ctr_y = sub_sample / 2.
ctr_x = sub_sample / 2.
#print(800//16)
#print(ctr_y, ctr_x)
for i in range(len(ratios)):
for j in range(len(anchor_scales)):
h = sub_sample * anchor_scales[j] * np.sqrt(ratios[i])
w = sub_sample * anchor_scales[j] * np.sqrt(1./ ratios[i])
index = i * len(anchor_scales) + j
anchor_base[index, 0] = ctr_y - h / 2.
anchor_base[index, 1] = ctr_x - w / 2.
anchor_base[index, 2] = ctr_y + h / 2.
anchor_base[index, 3] = ctr_x + w / 2.
#print(anchor_base)
# Centers for each feature map pixel
fe_size = (800//16)
ctr_x = np.arange(16, (fe_size+1) * 16, 16)
ctr_y = np.arange(16, (fe_size+1) * 16, 16)
# Generate center for each location
index = 0
ctr = np.zeros((len(ctr_x)*len(ctr_y),2))
for x in range(len(ctr_x)):
for y in range(len(ctr_y)):
ctr[index, 1] = ctr_x[x] - 8
ctr[index, 0] = ctr_y[y] - 8
index += 1
anchors = np.zeros(((fe_size * fe_size * 9), 4))
index = 0
for c in ctr:
ctr_y, ctr_x = c
for i in range(len(ratios)):
for j in range(len(anchor_scales)):
h = sub_sample * anchor_scales[j] * np.sqrt(ratios[i])
w = sub_sample * anchor_scales[j] * np.sqrt(1./ ratios[i])
anchors[index, 0] = ctr_y - h / 2.
anchors[index, 1] = ctr_x - w / 2.
anchors[index, 2] = ctr_y + h / 2.
anchors[index, 3] = ctr_x + w / 2.
index += 1
if isinstance(label, list):
return anchors
else:
# Assign ground truth bounding box and label
bbox = bbox0 # [y1, x1, y2, x2] format
labels = label
# Find valid anchor boxes
index_inside = np.where(
(anchors[:, 0] >= 0) &
(anchors[:, 1] >= 0) &
(anchors[:, 2] <= 800) &
(anchors[:, 3] <= 800)
)[0]
valid_anchor_boxes = anchors[index_inside]
#print(valid_anchor_boxes.shape)
# Create empty label array
label = np.empty((len(index_inside), ), dtype=np.int32)
label.fill(-1)
#print(label.shape)
# Calculate the IoU
ious = np.empty((len(valid_anchor_boxes), len(bbox[:,1])), dtype=np.float32)
ious.fill(0)
for num1, i in enumerate(valid_anchor_boxes):
ya1, xa1, ya2, xa2 = i
anchor_area = (ya2 - ya1) * (xa2 - xa1)
for num2, j in enumerate(bbox):
yb1, xb1, yb2, xb2 = j
box_area = (yb2- yb1) * (xb2 - xb1)
inter_x1 = max([xb1, xa1])
inter_y1 = max([yb1, ya1])
inter_x2 = min([xb2, xa2])
inter_y2 = min([yb2, ya2])
if (inter_x1 < inter_x2) and (inter_y1 < inter_y2):
iter_area = (inter_y2 - inter_y1) *(inter_x2 - inter_x1)
iou = iter_area /(anchor_area+ box_area - iter_area)
else:
iou = 0.
ious[num1, num2] = iou
#print(ious.shape)
# Find the highest IoU for each ground truth box and corresponding anchor box
gt_argmax_ious = ious.argmax(axis=0)
#print(gt_argmax_ious)
gt_max_ious = ious[gt_argmax_ious, np.arange(ious.shape[1])]
#print(gt_max_ious)
# Find the highest IoU for each anchor box and its corresponding ground truth box
argmax_ious = ious.argmax(axis=1)
#print(argmax_ious.shape)
#print(argmax_ious)
max_ious = ious[np.arange(len(index_inside)), argmax_ious]
#print(max_ious)
gt_argmax_ious = np.where(ious == gt_max_ious)[0]
#print(gt_argmax_ious)
# Set the threshold values for the IoU
anc_pos_iou_thresh = anc_pos_iou_thresh
neg_iou_thresh = anc_neg_iou_thresh
# Assign negative label (0) to anchor boxes that are under the set negative threshhold
label[max_ious < neg_iou_thresh] = 0
# Assign positive label (1) to all the anchor boxes which have highest IoU overlap with a ground truth box
label[gt_argmax_ious] = 1
# Assign positive label (1) to all the anchor boxes which have max_iou greater than positive threshold
label[max_ious >= anc_pos_iou_thresh] = 1
# Set sample values
anc_pos_ratio = anc_pos_ratio
anc_n_sample = anc_n_sample
# Total positive samples and negative samples
n_pos = anc_pos_ratio * anc_n_sample
n_pos = math.floor(n_pos)
n_neg = anc_n_sample * np.sum(label == 1)
n_neg = math.floor(n_neg)
# Sample positive samples
pos_index = np.where(label == 1)[0]
if len(pos_index) > n_pos:
disable_index = np.random.choice(pos_index, size=(len(pos_index) - n_pos), replace=False)
label[disable_index] = -1
# Sample negative samples
neg_index = np.where(label == 0)[0]
if len(neg_index) > n_neg:
disable_index = np.random.choice(neg_index, size=(len(neg_index) - n_neg), replace = False)
label[disable_index] = -1
# For each anchor box, find the groundtruth object which has max IoU
max_iou_bbox = bbox[argmax_ious]
# Convert [y1, x1, y2, x2] format to [t_{x}, t_{y}, t_{w}, t_{h}] format
height = valid_anchor_boxes[:, 2] - valid_anchor_boxes[:, 0]
width = valid_anchor_boxes[:, 3] - valid_anchor_boxes[:, 1]
ctr_y = valid_anchor_boxes[:, 0] + 0.5 * height
ctr_x = valid_anchor_boxes[:, 1] + 0.5 * width
base_height = max_iou_bbox[:, 2] - max_iou_bbox[:, 0]
base_width = max_iou_bbox[:, 3] - max_iou_bbox[:, 1]
base_ctr_y = max_iou_bbox[:, 0] + 0.5 * base_height
base_ctr_x = max_iou_bbox[:, 1] + 0.5 * base_width
# Find the anchor locations
eps = np.finfo(height.dtype).eps
height = np.maximum(height, eps)
width = np.maximum(width, eps)
dy = (base_ctr_y - ctr_y) / height
dx = (base_ctr_x - ctr_x) / width
dh = np.log(base_height / height)
dw = np.log(base_width / width)
anchor_locs = np.vstack((dy, dx, dh, dw)).transpose()
#print(anchor_locs)
# Get final labels
anchor_labels = np.empty((len(anchors),), dtype=label.dtype)
anchor_labels.fill(-1)
anchor_labels[index_inside] = label
#print(np.shape(anchor_labels))
# Get final locations
anchor_locations = np.empty((len(anchors),) + anchors.shape[1:], dtype=anchor_locs.dtype)
anchor_locations.fill(0)
anchor_locations[index_inside, :] = anchor_locs
#print(np.shape(anchor_locations))
return anchor_labels, anchor_locations, anchors | [
"numpy.maximum",
"numpy.log",
"numpy.sum",
"numpy.zeros",
"math.floor",
"numpy.finfo",
"numpy.where",
"numpy.arange",
"numpy.vstack",
"numpy.sqrt"
] | [((1597, 1634), 'numpy.arange', 'np.arange', (['(16)', '((fe_size + 1) * 16)', '(16)'], {}), '(16, (fe_size + 1) * 16, 16)\n', (1606, 1634), True, 'import numpy as np\n'), ((1645, 1682), 'numpy.arange', 'np.arange', (['(16)', '((fe_size + 1) * 16)', '(16)'], {}), '(16, (fe_size + 1) * 16, 16)\n', (1654, 1682), True, 'import numpy as np\n'), ((1956, 1992), 'numpy.zeros', 'np.zeros', (['(fe_size * fe_size * 9, 4)'], {}), '((fe_size * fe_size * 9, 4))\n', (1964, 1992), True, 'import numpy as np\n'), ((5417, 5434), 'math.floor', 'math.floor', (['n_pos'], {}), '(n_pos)\n', (5427, 5434), False, 'import math\n'), ((5497, 5514), 'math.floor', 'math.floor', (['n_neg'], {}), '(n_neg)\n', (5507, 5514), False, 'import math\n'), ((6756, 6779), 'numpy.maximum', 'np.maximum', (['height', 'eps'], {}), '(height, eps)\n', (6766, 6779), True, 'import numpy as np\n'), ((6794, 6816), 'numpy.maximum', 'np.maximum', (['width', 'eps'], {}), '(width, eps)\n', (6804, 6816), True, 'import numpy as np\n'), ((6909, 6937), 'numpy.log', 'np.log', (['(base_height / height)'], {}), '(base_height / height)\n', (6915, 6937), True, 'import numpy as np\n'), ((6949, 6975), 'numpy.log', 'np.log', (['(base_width / width)'], {}), '(base_width / width)\n', (6955, 6975), True, 'import numpy as np\n'), ((2701, 2809), 'numpy.where', 'np.where', (['((anchors[:, 0] >= 0) & (anchors[:, 1] >= 0) & (anchors[:, 2] <= 800) & (\n anchors[:, 3] <= 800))'], {}), '((anchors[:, 0] >= 0) & (anchors[:, 1] >= 0) & (anchors[:, 2] <= \n 800) & (anchors[:, 3] <= 800))\n', (2709, 2809), True, 'import numpy as np\n'), ((4571, 4600), 'numpy.where', 'np.where', (['(ious == gt_max_ious)'], {}), '(ious == gt_max_ious)\n', (4579, 4600), True, 'import numpy as np\n'), ((5464, 5482), 'numpy.sum', 'np.sum', (['(label == 1)'], {}), '(label == 1)\n', (5470, 5482), True, 'import numpy as np\n'), ((5566, 5586), 'numpy.where', 'np.where', (['(label == 1)'], {}), '(label == 1)\n', (5574, 5586), True, 'import numpy as np\n'), ((5806, 5826), 'numpy.where', 'np.where', (['(label == 0)'], {}), '(label == 0)\n', (5814, 5826), True, 'import numpy as np\n'), ((6714, 6736), 'numpy.finfo', 'np.finfo', (['height.dtype'], {}), '(height.dtype)\n', (6722, 6736), True, 'import numpy as np\n'), ((1176, 1194), 'numpy.sqrt', 'np.sqrt', (['ratios[i]'], {}), '(ratios[i])\n', (1183, 1194), True, 'import numpy as np\n'), ((1239, 1263), 'numpy.sqrt', 'np.sqrt', (['(1.0 / ratios[i])'], {}), '(1.0 / ratios[i])\n', (1246, 1263), True, 'import numpy as np\n'), ((4221, 4245), 'numpy.arange', 'np.arange', (['ious.shape[1]'], {}), '(ious.shape[1])\n', (4230, 4245), True, 'import numpy as np\n'), ((6996, 7023), 'numpy.vstack', 'np.vstack', (['(dy, dx, dh, dw)'], {}), '((dy, dx, dh, dw))\n', (7005, 7023), True, 'import numpy as np\n'), ((2175, 2193), 'numpy.sqrt', 'np.sqrt', (['ratios[i]'], {}), '(ratios[i])\n', (2182, 2193), True, 'import numpy as np\n'), ((2240, 2264), 'numpy.sqrt', 'np.sqrt', (['(1.0 / ratios[i])'], {}), '(1.0 / ratios[i])\n', (2247, 2264), True, 'import numpy as np\n')] |
# MIT License
# Copyright (c) 2021 xadrianzetx
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import Optional
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Input
from coral_deeplab import pretrained
from coral_deeplab._downloads import download_and_checksum_mlmodel
from coral_deeplab._encoders import mobilenetv2
from coral_deeplab._blocks import (
deeplab_aspp_module,
deeplabv3_decoder,
deeplabv3plus_decoder
)
__all__ = ['CoralDeepLabV3', 'CoralDeepLabV3Plus']
def CoralDeepLabV3(input_shape: tuple = (513, 513, 3),
alpha: float = 1.0,
weights: Optional[str] = None,
n_classes: int = 30, **kwargs) -> tf.keras.Model:
"""DeepLab v3 implementation compilable to coral.ai Edge TPU.
Implementation follows original paper as close as possible, and
compiles to TPU up to decoder conv layer providing significant
speedup over CPU inference time.
MobileNetV2 is used as encoder, but last 3 blocks had been modified
to use atrous convolution in order to preserve spatial resolution.
Arguments
---------
input_shape : tuple, default=(513, 513, 3)
Input tensor shape.
alpha : float, default=1.0
Float between 0. and 1.
MobileNetV2 depth multiplier.
weights : str, default=None
One of None (random initialization) or `pascal_voc`
(pre-training on Pascal VOC trainaug set).
n_classes : int, default=30
Number of segmentation classes.
By default set to cityscapes dayaset
number of class labels.
Returns
-------
model : tf.keras.Model
DeepLabV3 keras model instance.
References
----------
- [1] https://arxiv.org/pdf/1706.05587.pdf
- [2] https://coral.ai/products/
Notes
-----
There is no last activation layer. Model outputs logits.
Last layer in the decoder (bilinear upsampling) has been
removed for performance reasons, making this model OS16.
Examples
--------
>>> import coral_deeplab as cdl
>>> model = cdl.applications.CoralDeepLabV3()
>>> print(model.name)
'CoralDeepLabV3'
"""
if weights == 'pascal_voc':
if alpha == 0.5:
model_type = pretrained.KerasModel.DEEPLAB_V3_DM05
else:
# alpha 1.0 and default fallback for unsupported depths.
model_type = pretrained.KerasModel.DEEPLAB_V3_DM1
model_path = download_and_checksum_mlmodel(model_type)
model = tf.keras.models.load_model(
model_path, custom_objects={'tf': tf}, compile=False)
return model
if np.argmin(input_shape) == 0:
# assuming channels always
# gonna be smallest number
raise ValueError('Channels-first not supported.')
if input_shape[0] != input_shape[1]:
raise ValueError('Non square inputs not supported.')
inputs = Input(shape=input_shape)
aspp_in = mobilenetv2(inputs, alpha)
aspp_out = deeplab_aspp_module(aspp_in)
outputs = deeplabv3_decoder(aspp_out, n_classes)
name = 'CoralDeeplabV3'
model = tf.keras.Model(inputs=inputs, outputs=outputs, name=name)
return model
def CoralDeepLabV3Plus(input_shape: tuple = (513, 513, 3),
alpha: float = 1.0,
weights: Optional[str] = None,
n_classes: int = 30, **kwargs) -> tf.keras.Model:
"""DeepLabV3 Plus implementation compilable to coral.ai Edge TPU.
Implementation follows original paper as close as possible, and
compiles to TPU up to decoder conv layer providing significant
speedup over CPU inference time.
MobileNetV2 is used as encoder, but last 3 blocks had been modified
to use atrous convolution in order to preserve spatial resolution.
Arguments
---------
input_shape : tuple, default=(513, 513, 3)
Input tensor shape.
alpha : float, default=1.0
Float between 0. and 1.
MobileNetV2 depth multiplier.
weights : str, default=None
One of None (random initialization) or `pascal_voc`
(pre-training on Pascal VOC trainaug set).
n_classes : int, default=30
Number of segmentation classes.
By default set to cityscapes dayaset
number of class labels.
Returns
-------
model : tf.keras.Model
DeepLabV3Plus keras model instance.
References
----------
- [1] https://arxiv.org/pdf/1802.02611.pdf
- [2] https://coral.ai/products/
Notes
-----
There is no last activation layer. Model outputs logits.
Last layer in the decoder (bilinear upsampling) has been
removed for performance reasons, but one in decoder is still
present making this model OS4 (output size is roughly 4x smaller
than input size).
Examples
--------
>>> import coral_deeplab as cdl
>>> model = cdl.applications.CoralDeepLabV3Plus()
>>> print(model.name)
'CoralDeepLabV3Plus'
"""
if weights == 'pascal_voc':
if alpha == 0.5:
model_type = pretrained.KerasModel.DEEPLAB_V3_PLUS_DM05
else:
model_type = pretrained.KerasModel.DEEPLAB_V3_PLUS_DM1
model_path = download_and_checksum_mlmodel(model_type)
model = tf.keras.models.load_model(
model_path, custom_objects={'tf': tf}, compile=False)
return model
encoder = CoralDeepLabV3(input_shape, alpha)
encoder_last = encoder.get_layer('concat_projection/relu')
encoder_skip = encoder.get_layer('expanded_conv_3/expand/relu')
outputs = deeplabv3plus_decoder(encoder_last.output,
encoder_skip.output, n_classes)
name = 'CoralDeeplabV3Plus'
model = tf.keras.Model(inputs=encoder.inputs, outputs=outputs, name=name)
return model
| [
"tensorflow.keras.models.load_model",
"numpy.argmin",
"tensorflow.keras.Model",
"coral_deeplab._encoders.mobilenetv2",
"coral_deeplab._downloads.download_and_checksum_mlmodel",
"coral_deeplab._blocks.deeplabv3_decoder",
"coral_deeplab._blocks.deeplab_aspp_module",
"tensorflow.keras.layers.Input",
"c... | [((3956, 3980), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (3961, 3980), False, 'from tensorflow.keras.layers import Input\n'), ((3995, 4021), 'coral_deeplab._encoders.mobilenetv2', 'mobilenetv2', (['inputs', 'alpha'], {}), '(inputs, alpha)\n', (4006, 4021), False, 'from coral_deeplab._encoders import mobilenetv2\n'), ((4037, 4065), 'coral_deeplab._blocks.deeplab_aspp_module', 'deeplab_aspp_module', (['aspp_in'], {}), '(aspp_in)\n', (4056, 4065), False, 'from coral_deeplab._blocks import deeplab_aspp_module, deeplabv3_decoder, deeplabv3plus_decoder\n'), ((4080, 4118), 'coral_deeplab._blocks.deeplabv3_decoder', 'deeplabv3_decoder', (['aspp_out', 'n_classes'], {}), '(aspp_out, n_classes)\n', (4097, 4118), False, 'from coral_deeplab._blocks import deeplab_aspp_module, deeplabv3_decoder, deeplabv3plus_decoder\n'), ((4159, 4216), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'name': 'name'}), '(inputs=inputs, outputs=outputs, name=name)\n', (4173, 4216), True, 'import tensorflow as tf\n'), ((6637, 6711), 'coral_deeplab._blocks.deeplabv3plus_decoder', 'deeplabv3plus_decoder', (['encoder_last.output', 'encoder_skip.output', 'n_classes'], {}), '(encoder_last.output, encoder_skip.output, n_classes)\n', (6658, 6711), False, 'from coral_deeplab._blocks import deeplab_aspp_module, deeplabv3_decoder, deeplabv3plus_decoder\n'), ((6792, 6857), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'encoder.inputs', 'outputs': 'outputs', 'name': 'name'}), '(inputs=encoder.inputs, outputs=outputs, name=name)\n', (6806, 6857), True, 'import tensorflow as tf\n'), ((3501, 3542), 'coral_deeplab._downloads.download_and_checksum_mlmodel', 'download_and_checksum_mlmodel', (['model_type'], {}), '(model_type)\n', (3530, 3542), False, 'from coral_deeplab._downloads import download_and_checksum_mlmodel\n'), ((3559, 3644), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['model_path'], {'custom_objects': "{'tf': tf}", 'compile': '(False)'}), "(model_path, custom_objects={'tf': tf}, compile=False\n )\n", (3585, 3644), True, 'import tensorflow as tf\n'), ((3682, 3704), 'numpy.argmin', 'np.argmin', (['input_shape'], {}), '(input_shape)\n', (3691, 3704), True, 'import numpy as np\n'), ((6269, 6310), 'coral_deeplab._downloads.download_and_checksum_mlmodel', 'download_and_checksum_mlmodel', (['model_type'], {}), '(model_type)\n', (6298, 6310), False, 'from coral_deeplab._downloads import download_and_checksum_mlmodel\n'), ((6327, 6412), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['model_path'], {'custom_objects': "{'tf': tf}", 'compile': '(False)'}), "(model_path, custom_objects={'tf': tf}, compile=False\n )\n", (6353, 6412), True, 'import tensorflow as tf\n')] |
"""Methods to plot rates results"""
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
from ipywidgets import interact
import ipywidgets as widgets
COLORS = {'Oil': 'black', 'Water': 'blue', 'Gas': 'orange', 'Free Gas': 'red'}
COMPARE_COLOR = 'green'
NAMES_TO_KW = {'Oil': 'WOPR', 'Water': 'WWPR', 'Gas': 'WGPR', 'Free Gas': 'WFGPR'}
def _safe_data_getter(df, time_start, time_end, kw):
"""Get columns data."""
if df.empty:
return np.full(time_end - time_start, np.nan)
if kw in df:
return df.loc[time_start:time_end, kw].values
return np.full(len(df.loc[time_start:time_end]), np.nan)
def show_blocks_dynamics(wells, timesteps, wellnames, figsize=(16, 6)):
"""Plot liquid or gas rates and pvt props for a chosen block of a chosen well segment on two separate axes.
Parameters
----------
wells : Wells class instance
Wells component with calculated rates and properties in blocks_dynamics attribute.
timesteps : list of Timestamps
Dates at which rates were calculated.
wellnames : array-like
List of active producing wells.
figsize : tuple
Figsize for two axes plots.
"""
def update(wellname, block, rate, pvt_prop, time_start, time_end):
dynamics = getattr(wells[wellname], 'blocks_dynamics', pd.DataFrame(dict(DATE=timesteps)))
data = _safe_data_getter(dynamics, time_start, time_end, NAMES_TO_KW.get(rate, rate))
data = np.array([np.stack(x) for x in data])
prod_rate = data[..., block]
data = _safe_data_getter(dynamics, time_start, time_end, NAMES_TO_KW.get(pvt_prop, pvt_prop))
data = np.array([np.stack(x) for x in data])
pvt = data[..., block]
_, axes = plt.subplots(1, 2, figsize=figsize)
axes[0].set_title(f'Well {wellname.upper()}. {rate}')
axes[0].set_ylabel('Rate')
axes[0].set_xlabel('Timestep')
axes[0].plot(dynamics.loc[time_start:time_end, 'DATE'], prod_rate,
'o-', c=COLORS[rate], ms=3.5, lw=1.3)
axes[0].axvline(dynamics.loc[time_start, 'DATE'], ls=':', c='grey')
axes[0].axvline(dynamics.loc[time_end, 'DATE'], ls=':', c='grey')
axes[0].grid(True)
axes[1].set_title(f'Well {wellname.upper()}. {pvt_prop}')
axes[1].set_ylabel('PVT Property')
axes[1].set_xlabel('Timestep')
axes[1].plot(dynamics.loc[time_start:time_end, 'DATE'], pvt,
'o-', c='black', ms=3.5, lw=1.3)
axes[1].axvline(dynamics.loc[time_start, 'DATE'], ls=':', c='grey')
axes[1].axvline(dynamics.loc[time_end, 'DATE'], ls=':', c='grey')
axes[1].grid(True)
well_ind_widget = widgets.Dropdown(options=wellnames)
block_widget = widgets.Dropdown(options=[(tuple(item), i) for i, item in
enumerate(wells[well_ind_widget.value].blocks)])
timesteps_len = len(timesteps) - 1
def update_block_list(*args):
name = args[0]['new']
block_widget.options = [(tuple(item), i) for i, item in enumerate(wells[name].blocks)]
well_ind_widget.observe(update_block_list, 'value')
rate_widget = widgets.Dropdown(options=['Oil', 'Water', 'Gas', 'Free Gas'], value='Oil')
pvt_prop_widget = widgets.Dropdown(options=['WBHP', 'FVF_O', 'FVF_W', 'FVF_G', 'VISC_O', 'VISC_W',
'VISC_G', 'KR_O', 'KR_W', 'KR_G'], value='WBHP')
interact(update,
wellname=well_ind_widget,
block=block_widget,
rate=rate_widget,
pvt_prop=pvt_prop_widget,
time_start=widgets.IntSlider(min=0, max=timesteps_len, step=1, value=0),
time_end=widgets.IntSlider(min=0, max=timesteps_len, step=1, value=timesteps_len))
plt.show()
def show_rates(wells, timesteps, wellnames, wells2=None, labels=None, figsize=(16, 6)):
"""Plot liquid and gas rates for a chosen well segment on two separate axes.
Parameters
----------
wells : Wells class instance
Wells component with calculated rates in results attribute.
timesteps : list of Timestamps
Dates at which rates were calculated.
wells2 : Wells class instance
Wells component with results for comparison.
labels : array-like
List of labels corresponding to plots.
figsize : tuple
Figsize for two axes plots.
"""
def update(wellname, rate, cumulative, time_start, time_end):
rates = wells[wellname].total_rates
_, ax = plt.subplots(1, 1, figsize=figsize)
title = 'Cumulative ' + rate if cumulative else rate
ax.set_title('Well {}. {}'.format(wellname.upper(), title))
ax.set_ylabel('Cumulative Rate' if cumulative else 'Rate')
ax.set_xlabel('Date')
t = rates.loc[time_start:time_end, 'DATE']
data = _safe_data_getter(rates, time_start, time_end, NAMES_TO_KW.get(rate, rate))
if cumulative:
data = np.cumsum(data)
ax.plot(t, data, 'o-', c=COLORS[rate], label=labels[0], ms=3.5, lw=1.3)
ax.axvline(rates.loc[time_start, 'DATE'], ls=':', c='grey')
ax.axvline(rates.loc[time_end, 'DATE'], ls=':', c='grey')
ax.grid(True)
if wells2 is not None:
rates2 = wells2[wellname].total_rates
t = rates2.loc[time_start:time_end, 'DATE']
data = _safe_data_getter(rates2, time_start, time_end, NAMES_TO_KW.get(rate, rate))
if cumulative:
data = np.cumsum(data)
ax.plot(t, data, 'o-', c='green', label=labels[1], ms=3.5, lw=1.3)
ax.legend(loc='best')
timesteps_len = len(timesteps) - 1
if labels is None:
labels = ['Model_1', 'Model_2'] if wells2 is not None else ['']
interact(update,
wellname=widgets.Dropdown(options=wellnames),
rate=widgets.Dropdown(options=['Oil', 'Water', 'Gas', 'Free Gas'], value='Oil'),
cumulative=widgets.Checkbox(value=False, description='Cumulative'),
time_start=widgets.IntSlider(min=0, max=timesteps_len, step=1, value=0),
time_end=widgets.IntSlider(min=0, max=timesteps_len, step=1, value=timesteps_len))
plt.show()
def show_rates2(wells, timesteps, wellnames, wells2=None, labels=None, figsize=(16, 6)):
"""Plot total liquid and gas rates for a chosen well segment on two separate axes.
Parameters
----------
wells : Wells class instance
Wells component with calculated rates in results attribute.
timesteps : list of Timestamps
Dates at which rates were calculated.
wellnames : array-like
List of active producing wells.
wells2 : Wells class instance
Wells component with results for comparison.
labels : array-like
List of labels corresponding to plots.
figsize : tuple
Figsize for two axes plots.
"""
def update(wellname, liquid_rate, gas_rate, cumulative, time_start, time_end):
rates = wells[wellname].total_rates
_, axes = plt.subplots(1, 2, figsize=figsize)
title = 'Cumulative ' + liquid_rate if cumulative else liquid_rate
axes[0].set_title('Well {}. {}'.format(wellname.upper(), title))
axes[0].set_ylabel('Cumulative Rate' if cumulative else 'Rate')
axes[0].set_xlabel('Date')
t = rates.loc[time_start:time_end, 'DATE']
data = _safe_data_getter(rates, time_start, time_end, NAMES_TO_KW.get(liquid_rate, liquid_rate))
if cumulative:
data = np.cumsum(data)
axes[0].plot(t, data, 'o-', c=COLORS[liquid_rate], label=labels[0], ms=3.5, lw=1.3)
axes[0].axvline(rates.loc[time_start, 'DATE'], ls=':', c='grey')
axes[0].axvline(rates.loc[time_end, 'DATE'], ls=':', c='grey')
axes[0].grid(True)
title = 'Cumulative ' + gas_rate if cumulative else gas_rate
axes[1].set_title('Well {}. {}'.format(wellname.upper(), title))
axes[1].set_ylabel('Cumulative Rate' if cumulative else 'Rate')
data = _safe_data_getter(rates, time_start, time_end, NAMES_TO_KW.get(gas_rate, gas_rate))
if cumulative:
data = np.cumsum(data)
axes[1].plot(t, data, 'o-', c=COLORS[gas_rate], label=labels[0], ms=3.5, lw=1.3)
axes[1].axvline(rates.loc[time_start, 'DATE'], ls=':', c='grey')
axes[1].axvline(rates.loc[time_end, 'DATE'], ls=':', c='grey')
axes[1].grid(True)
if wells2 is not None:
rates2 = wells2[wellname].total_rates
t = rates2.loc[time_start:time_end, 'DATE']
data = _safe_data_getter(rates2, time_start, time_end, NAMES_TO_KW.get(liquid_rate, liquid_rate))
if cumulative:
data = np.cumsum(data)
axes[0].plot(t, data, 'o-', c=COMPARE_COLOR, label=labels[1], ms=3.5, lw=1.3)
data = _safe_data_getter(rates2, time_start, time_end, NAMES_TO_KW.get(gas_rate, gas_rate))
if cumulative:
data = np.cumsum(data)
axes[1].plot(t, data, 'o-', c=COMPARE_COLOR, label=labels[1], ms=3.5, lw=1.3)
axes[0].legend(loc='best')
axes[1].legend(loc='best')
timesteps_len = len(timesteps) - 1
if labels is None:
labels = ['Model_1', 'Model_2'] if wells2 is not None else ['']
interact(update,
wellname=widgets.Dropdown(options=wellnames),
liquid_rate=widgets.Dropdown(options=['Oil', 'Water'], value='Oil'),
gas_rate=widgets.Dropdown(options=['Gas', 'Free Gas'], value='Gas'),
cumulative=widgets.Checkbox(value=False, description='Cumulative'),
time_start=widgets.IntSlider(min=0, max=timesteps_len, step=1, value=0),
time_end=widgets.IntSlider(min=0, max=timesteps_len, step=1, value=timesteps_len))
plt.show()
| [
"numpy.full",
"numpy.stack",
"ipywidgets.IntSlider",
"ipywidgets.Dropdown",
"numpy.cumsum",
"ipywidgets.Checkbox",
"matplotlib.pylab.subplots",
"matplotlib.pylab.show"
] | [((2708, 2743), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'options': 'wellnames'}), '(options=wellnames)\n', (2724, 2743), True, 'import ipywidgets as widgets\n'), ((3190, 3264), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'options': "['Oil', 'Water', 'Gas', 'Free Gas']", 'value': '"""Oil"""'}), "(options=['Oil', 'Water', 'Gas', 'Free Gas'], value='Oil')\n", (3206, 3264), True, 'import ipywidgets as widgets\n'), ((3287, 3420), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'options': "['WBHP', 'FVF_O', 'FVF_W', 'FVF_G', 'VISC_O', 'VISC_W', 'VISC_G', 'KR_O',\n 'KR_W', 'KR_G']", 'value': '"""WBHP"""'}), "(options=['WBHP', 'FVF_O', 'FVF_W', 'FVF_G', 'VISC_O',\n 'VISC_W', 'VISC_G', 'KR_O', 'KR_W', 'KR_G'], value='WBHP')\n", (3303, 3420), True, 'import ipywidgets as widgets\n'), ((3815, 3825), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (3823, 3825), True, 'import matplotlib.pylab as plt\n'), ((6247, 6257), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (6255, 6257), True, 'import matplotlib.pylab as plt\n'), ((9872, 9882), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (9880, 9882), True, 'import matplotlib.pylab as plt\n'), ((468, 506), 'numpy.full', 'np.full', (['(time_end - time_start)', 'np.nan'], {}), '(time_end - time_start, np.nan)\n', (475, 506), True, 'import numpy as np\n'), ((1752, 1787), 'matplotlib.pylab.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': 'figsize'}), '(1, 2, figsize=figsize)\n', (1764, 1787), True, 'import matplotlib.pylab as plt\n'), ((4558, 4593), 'matplotlib.pylab.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': 'figsize'}), '(1, 1, figsize=figsize)\n', (4570, 4593), True, 'import matplotlib.pylab as plt\n'), ((7083, 7118), 'matplotlib.pylab.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': 'figsize'}), '(1, 2, figsize=figsize)\n', (7095, 7118), True, 'import matplotlib.pylab as plt\n'), ((3653, 3713), 'ipywidgets.IntSlider', 'widgets.IntSlider', ([], {'min': '(0)', 'max': 'timesteps_len', 'step': '(1)', 'value': '(0)'}), '(min=0, max=timesteps_len, step=1, value=0)\n', (3670, 3713), True, 'import ipywidgets as widgets\n'), ((3737, 3809), 'ipywidgets.IntSlider', 'widgets.IntSlider', ([], {'min': '(0)', 'max': 'timesteps_len', 'step': '(1)', 'value': 'timesteps_len'}), '(min=0, max=timesteps_len, step=1, value=timesteps_len)\n', (3754, 3809), True, 'import ipywidgets as widgets\n'), ((5004, 5019), 'numpy.cumsum', 'np.cumsum', (['data'], {}), '(data)\n', (5013, 5019), True, 'import numpy as np\n'), ((5849, 5884), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'options': 'wellnames'}), '(options=wellnames)\n', (5865, 5884), True, 'import ipywidgets as widgets\n'), ((5904, 5978), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'options': "['Oil', 'Water', 'Gas', 'Free Gas']", 'value': '"""Oil"""'}), "(options=['Oil', 'Water', 'Gas', 'Free Gas'], value='Oil')\n", (5920, 5978), True, 'import ipywidgets as widgets\n'), ((6004, 6059), 'ipywidgets.Checkbox', 'widgets.Checkbox', ([], {'value': '(False)', 'description': '"""Cumulative"""'}), "(value=False, description='Cumulative')\n", (6020, 6059), True, 'import ipywidgets as widgets\n'), ((6085, 6145), 'ipywidgets.IntSlider', 'widgets.IntSlider', ([], {'min': '(0)', 'max': 'timesteps_len', 'step': '(1)', 'value': '(0)'}), '(min=0, max=timesteps_len, step=1, value=0)\n', (6102, 6145), True, 'import ipywidgets as widgets\n'), ((6169, 6241), 'ipywidgets.IntSlider', 'widgets.IntSlider', ([], {'min': '(0)', 'max': 'timesteps_len', 'step': '(1)', 'value': 'timesteps_len'}), '(min=0, max=timesteps_len, step=1, value=timesteps_len)\n', (6186, 6241), True, 'import ipywidgets as widgets\n'), ((7572, 7587), 'numpy.cumsum', 'np.cumsum', (['data'], {}), '(data)\n', (7581, 7587), True, 'import numpy as np\n'), ((8207, 8222), 'numpy.cumsum', 'np.cumsum', (['data'], {}), '(data)\n', (8216, 8222), True, 'import numpy as np\n'), ((9404, 9439), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'options': 'wellnames'}), '(options=wellnames)\n', (9420, 9439), True, 'import ipywidgets as widgets\n'), ((9466, 9521), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'options': "['Oil', 'Water']", 'value': '"""Oil"""'}), "(options=['Oil', 'Water'], value='Oil')\n", (9482, 9521), True, 'import ipywidgets as widgets\n'), ((9545, 9603), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'options': "['Gas', 'Free Gas']", 'value': '"""Gas"""'}), "(options=['Gas', 'Free Gas'], value='Gas')\n", (9561, 9603), True, 'import ipywidgets as widgets\n'), ((9629, 9684), 'ipywidgets.Checkbox', 'widgets.Checkbox', ([], {'value': '(False)', 'description': '"""Cumulative"""'}), "(value=False, description='Cumulative')\n", (9645, 9684), True, 'import ipywidgets as widgets\n'), ((9710, 9770), 'ipywidgets.IntSlider', 'widgets.IntSlider', ([], {'min': '(0)', 'max': 'timesteps_len', 'step': '(1)', 'value': '(0)'}), '(min=0, max=timesteps_len, step=1, value=0)\n', (9727, 9770), True, 'import ipywidgets as widgets\n'), ((9794, 9866), 'ipywidgets.IntSlider', 'widgets.IntSlider', ([], {'min': '(0)', 'max': 'timesteps_len', 'step': '(1)', 'value': 'timesteps_len'}), '(min=0, max=timesteps_len, step=1, value=timesteps_len)\n', (9811, 9866), True, 'import ipywidgets as widgets\n'), ((1481, 1492), 'numpy.stack', 'np.stack', (['x'], {}), '(x)\n', (1489, 1492), True, 'import numpy as np\n'), ((1674, 1685), 'numpy.stack', 'np.stack', (['x'], {}), '(x)\n', (1682, 1685), True, 'import numpy as np\n'), ((5540, 5555), 'numpy.cumsum', 'np.cumsum', (['data'], {}), '(data)\n', (5549, 5555), True, 'import numpy as np\n'), ((8781, 8796), 'numpy.cumsum', 'np.cumsum', (['data'], {}), '(data)\n', (8790, 8796), True, 'import numpy as np\n'), ((9041, 9056), 'numpy.cumsum', 'np.cumsum', (['data'], {}), '(data)\n', (9050, 9056), True, 'import numpy as np\n')] |
# coding: utf-8
from __future__ import division, print_function
__author__ = "adrn <<EMAIL>>"
# Standard library
import os
# Third-party
import numpy as np
import matplotlib.pyplot as plt
# Project
import gary.potential as gp
from gary.units import galactic
from ..initialconditions import tube_grid_xz, box_grid
plot_path = "output/tests/initialconditions"
if not os.path.exists(plot_path):
os.makedirs(plot_path)
potential = gp.LeeSutoTriaxialNFWPotential(v_c=0.22, r_s=30.,
a=1., b=1., c=0.8, units=galactic)
def test_tube():
plt.figure(figsize=(10,10))
for E in np.linspace(-0.12, -0.2, 5):
w0 = tube_grid_xz(E=E, potential=potential, dx=1., dz=1.)
Es = potential.total_energy(w0[:,:3], w0[:,3:])
np.testing.assert_allclose(Es, E)
plt.scatter(w0[:,0], w0[:,2], c='k', alpha=0.5)
plt.savefig(os.path.join(plot_path, "tube_E{:.2f}.png".format(E)))
plt.clf()
def test_box():
from mpl_toolkits.mplot3d import Axes3D
for E in np.linspace(-0.12, -0.2, 5):
w0 = box_grid(E=E, potential=potential, approx_num=1024)
print(w0.shape)
Es = potential.total_energy(w0[:,:3], w0[:,3:])
np.testing.assert_allclose(Es, E)
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111,projection='3d')
ax.plot(w0[:,0], w0[:,1], w0[:,2], c='k',
alpha=0.5, marker='o', linestyle='none')
ax.elev = 45
ax.azim = 45
fig.savefig(os.path.join(plot_path, "box_E{:.2f}.png".format(E)))
| [
"os.makedirs",
"matplotlib.pyplot.clf",
"gary.potential.LeeSutoTriaxialNFWPotential",
"matplotlib.pyplot.scatter",
"os.path.exists",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.testing.assert_allclose"
] | [((438, 529), 'gary.potential.LeeSutoTriaxialNFWPotential', 'gp.LeeSutoTriaxialNFWPotential', ([], {'v_c': '(0.22)', 'r_s': '(30.0)', 'a': '(1.0)', 'b': '(1.0)', 'c': '(0.8)', 'units': 'galactic'}), '(v_c=0.22, r_s=30.0, a=1.0, b=1.0, c=0.8,\n units=galactic)\n', (468, 529), True, 'import gary.potential as gp\n'), ((371, 396), 'os.path.exists', 'os.path.exists', (['plot_path'], {}), '(plot_path)\n', (385, 396), False, 'import os\n'), ((402, 424), 'os.makedirs', 'os.makedirs', (['plot_path'], {}), '(plot_path)\n', (413, 424), False, 'import os\n'), ((588, 616), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (598, 616), True, 'import matplotlib.pyplot as plt\n'), ((630, 657), 'numpy.linspace', 'np.linspace', (['(-0.12)', '(-0.2)', '(5)'], {}), '(-0.12, -0.2, 5)\n', (641, 657), True, 'import numpy as np\n'), ((1048, 1075), 'numpy.linspace', 'np.linspace', (['(-0.12)', '(-0.2)', '(5)'], {}), '(-0.12, -0.2, 5)\n', (1059, 1075), True, 'import numpy as np\n'), ((789, 822), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['Es', 'E'], {}), '(Es, E)\n', (815, 822), True, 'import numpy as np\n'), ((832, 881), 'matplotlib.pyplot.scatter', 'plt.scatter', (['w0[:, 0]', 'w0[:, 2]'], {'c': '"""k"""', 'alpha': '(0.5)'}), "(w0[:, 0], w0[:, 2], c='k', alpha=0.5)\n", (843, 881), True, 'import matplotlib.pyplot as plt\n'), ((963, 972), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (970, 972), True, 'import matplotlib.pyplot as plt\n'), ((1230, 1263), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['Es', 'E'], {}), '(Es, E)\n', (1256, 1263), True, 'import numpy as np\n'), ((1279, 1307), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1289, 1307), True, 'import matplotlib.pyplot as plt\n')] |
from typing import Dict
import numpy as np
import torch
from echovpr.datasets.utils import get_1_hot_encode, load_np_file
from torch.utils.data import DataLoader
from torch.utils.data.dataset import ConcatDataset, Subset, TensorDataset
def prepare_final_datasets(esn_descriptors: Dict[str, torch.Tensor], config, eval_only = False):
train_dataset, val_dataset, test_dataset, train_gt, eval_gt = get_datasets(esn_descriptors, config, eval_only)
train_dataLoader = None
if not eval_only:
train_dataLoader = DataLoader(train_dataset, num_workers=int(config['dataloader_threads']), batch_size=int(config['train_batchsize']), shuffle=True)
val_dataLoader = DataLoader(val_dataset, num_workers=int(config['dataloader_threads']), batch_size=int(config['train_batchsize']), shuffle=False)
test_dataLoader = DataLoader(test_dataset, num_workers=int(config['dataloader_threads']), batch_size=int(config['train_batchsize']), shuffle=False)
return train_dataset, train_dataLoader, val_dataset, val_dataLoader, test_dataLoader, train_gt, eval_gt
def get_datasets(esn_descriptors: Dict[str, torch.Tensor], config, eval_only: bool):
if config['dataset'] == 'nordland':
return get_datasets_for_nordland(esn_descriptors, config, eval_only)
if config['dataset'] == 'nordland_spr_fall':
return get_datasets_for_nordland_spring_vs_fall(esn_descriptors, config, eval_only)
elif config['dataset'] == 'oxford':
return get_datasets_for_oxford(esn_descriptors, config, eval_only)
else:
raise ValueError(f"Unknown dataset: {config['dataset']}")
def get_dataset_infos(config):
if config['dataset'] == 'nordland':
summer_dataset_info = load_np_file(config['dataset_nordland_summer_dataset_file_path'])
winter_dataset_info = load_np_file(config['dataset_nordland_winter_dataset_file_path'])
return summer_dataset_info, winter_dataset_info
# if config['dataset'] == 'nordland_spr_fall':
# return get_datasets_for_nordland_spring_vs_fall(esn_descriptors, config, eval_only)
elif config['dataset'] == 'oxford':
day_dataset_info = load_np_file(config['dataset_oxford_day_dataset_file_path'])
night_dataset_info = load_np_file(config['dataset_oxford_night_dataset_file_path'])
return day_dataset_info, night_dataset_info
else:
raise ValueError(f"Unknown dataset: {config['dataset']}")
def get_datasets_for_nordland(esn_descriptors: Dict[str, torch.Tensor], config, eval_only = False):
summer_dataset_info = load_np_file(config['dataset_nordland_summer_dataset_file_path'])
winter_dataset_info = load_np_file(config['dataset_nordland_winter_dataset_file_path'])
val_test_splits = load_np_file(config['dataset_nordland_winter_val_test_splits_indices_file_path'])
train_dataset = None
train_gt = None
if not eval_only:
train_gt = summer_dataset_info['ground_truth_indices']
esn_descriptor_summer = esn_descriptors['summer']
summer_image_idx = torch.from_numpy(summer_dataset_info['image_indices'])
summer_image_1_hot = torch.from_numpy(get_1_hot_encode(summer_dataset_info['image_indices'], len(summer_dataset_info['image_indices']))).type(torch.float)
train_dataset = TensorDataset(esn_descriptor_summer, summer_image_1_hot, summer_image_idx)
print(f"Train dataset size: {len(train_dataset)}")
eval_gt = winter_dataset_info['ground_truth_indices']
esn_descriptor_winter = esn_descriptors['winter']
winter_image_idx = torch.from_numpy(winter_dataset_info['image_indices'])
winter_dataset = TensorDataset(esn_descriptor_winter, winter_image_idx)
val_dataset = Subset(winter_dataset, val_test_splits['val_indices'])
print(f"Val dataset size: {len(val_dataset)}")
test_dataset = Subset(winter_dataset, val_test_splits['test_indices'])
print(f"Test dataset size: {len(test_dataset)}")
return train_dataset, val_dataset, test_dataset, train_gt, eval_gt
def get_datasets_for_nordland_spring_vs_fall(esn_descriptors: Dict[str, torch.Tensor], config, eval_only = False):
desired_train_size = int(config['desired_train_size'])
desired_val_size = int(config['desired_val_size'])
desired_test_size = int(config['desired_test_size'])
continuity = int(config['continuity'])
train_indices, val_indices, test_indices = generate_indices_splits(continuity, desired_train_size, desired_val_size, desired_test_size)
summer_dataset_info = load_np_file(config['dataset_nordland_summer_dataset_file_path'])
esn_descriptor_summer = esn_descriptors['summer']
summer_image_idx = torch.from_numpy(summer_dataset_info['image_indices'])
winter_dataset_info = load_np_file(config['dataset_nordland_winter_dataset_file_path'])
esn_descriptor_winter = esn_descriptors['winter']
winter_image_idx = torch.from_numpy(winter_dataset_info['image_indices'])
train_gt = None
train_dataset = None
if not eval_only:
train_gt = summer_dataset_info['ground_truth_indices']
summer_image_1_hot = torch.from_numpy(get_1_hot_encode(summer_dataset_info['image_indices'], len(summer_dataset_info['image_indices']))).type(torch.float)
summer_train_dataset = TensorDataset(esn_descriptor_summer, summer_image_1_hot, summer_image_idx)
winter_image_1_hot = torch.from_numpy(get_1_hot_encode(winter_dataset_info['image_indices'], len(winter_dataset_info['image_indices']))).type(torch.float)
winter_train_dataset = TensorDataset(esn_descriptor_winter, winter_image_1_hot, winter_image_idx)
train_dataset = ConcatDataset([Subset(summer_train_dataset, train_indices), Subset(winter_train_dataset, train_indices)])
print(f"Train dataset size: {len(train_dataset)}")
summer_val_dataset = TensorDataset(esn_descriptor_summer, summer_image_idx)
winter_val_dataset = TensorDataset(esn_descriptor_winter, winter_image_idx)
val_dataset = ConcatDataset([Subset(summer_val_dataset, val_indices), Subset(winter_val_dataset, val_indices)])
print(f"Val dataset size: {len(val_dataset)}")
spring_dataset_info = load_np_file(config['dataset_nordland_spring_dataset_file_path'])
esn_descriptor_spring = esn_descriptors['spring']
spring_image_idx = torch.from_numpy(spring_dataset_info['image_indices'])
spring_dataset = TensorDataset(esn_descriptor_spring, spring_image_idx)
fall_dataset_info = load_np_file(config['dataset_nordland_fall_dataset_file_path'])
esn_descriptor_fall = esn_descriptors['fall']
fall_image_idx = torch.from_numpy(fall_dataset_info['image_indices'])
fall_dataset = TensorDataset(esn_descriptor_fall, fall_image_idx)
test_dataset = ConcatDataset([Subset(spring_dataset, test_indices), Subset(fall_dataset, test_indices)])
print(f"Test dataset size: {len(test_dataset)}")
eval_gt = winter_dataset_info['ground_truth_indices']
return train_dataset, val_dataset, test_dataset, train_gt, eval_gt
def get_datasets_for_oxford(esn_descriptors: Dict[str, torch.Tensor], config, eval_only = False):
day_dataset_info = load_np_file(config['dataset_oxford_day_dataset_file_path'])
night_dataset_info = load_np_file(config['dataset_oxford_night_dataset_file_path'])
val_test_splits = load_np_file(config['dataset_oxford_night_val_test_splits_indices_file_path'])
train_dataset = None
train_gt = None
if not eval_only:
train_gt = day_dataset_info['ground_truth_indices']
esn_descriptor_day = esn_descriptors['day']
day_image_idx = torch.from_numpy(day_dataset_info['image_indices'])
day_image_1_hot = torch.from_numpy(get_1_hot_encode(day_dataset_info['image_indices'], len(day_dataset_info['image_indices']))).type(torch.float)
train_dataset = TensorDataset(esn_descriptor_day, day_image_1_hot, day_image_idx)
print(f"Train dataset size: {len(train_dataset)}")
eval_gt = night_dataset_info['ground_truth_indices']
esn_descriptor_night = esn_descriptors['night']
night_image_idx = torch.from_numpy(night_dataset_info['image_indices'])
night_dataset = TensorDataset(esn_descriptor_night, night_image_idx)
val_dataset = Subset(night_dataset, val_test_splits['val_indices'])
print(f"Val dataset size: {len(val_dataset)}")
test_dataset = Subset(night_dataset, val_test_splits['test_indices'])
print(f"Test dataset size: {len(test_dataset)}")
return train_dataset, val_dataset, test_dataset, train_gt, eval_gt
def generate_indices_splits(continuity, desired_train_size, desired_val_size, desired_test_size):
n_train_splits = int(np.round(desired_train_size/continuity))
n_val_splits = int(np.round(desired_val_size/continuity))
n_test_splits = int(np.round(desired_test_size/continuity))
train_size = int(n_train_splits * continuity)
val_size = int(n_val_splits * continuity)
test_size = int(n_test_splits * continuity)
total_size = train_size + val_size + test_size
indices = (np.arange(0,total_size)).astype(int)
split_buckets = np.reshape(indices, [int(total_size/continuity), continuity])
split_bucket_indices = np.shape(split_buckets)[0]
ind_perm = np.random.permutation(split_bucket_indices)
val_indices = split_buckets[ind_perm[0:n_val_splits], :]
test_indices = split_buckets[ind_perm[n_val_splits:n_val_splits+n_test_splits], :]
train_indices = split_buckets[ind_perm[n_val_splits+n_test_splits:], :]
val_indices = np.sort(np.reshape(val_indices, [-1]))
test_indices = np.sort(np.reshape(test_indices, [-1]))
train_indices = np.sort(np.reshape(train_indices, [-1]))
return train_indices, val_indices, test_indices
| [
"numpy.shape",
"numpy.arange",
"numpy.reshape",
"echovpr.datasets.utils.load_np_file",
"torch.utils.data.dataset.TensorDataset",
"numpy.random.permutation",
"torch.utils.data.dataset.Subset",
"numpy.round",
"torch.from_numpy"
] | [((2556, 2621), 'echovpr.datasets.utils.load_np_file', 'load_np_file', (["config['dataset_nordland_summer_dataset_file_path']"], {}), "(config['dataset_nordland_summer_dataset_file_path'])\n", (2568, 2621), False, 'from echovpr.datasets.utils import get_1_hot_encode, load_np_file\n'), ((2648, 2713), 'echovpr.datasets.utils.load_np_file', 'load_np_file', (["config['dataset_nordland_winter_dataset_file_path']"], {}), "(config['dataset_nordland_winter_dataset_file_path'])\n", (2660, 2713), False, 'from echovpr.datasets.utils import get_1_hot_encode, load_np_file\n'), ((2736, 2822), 'echovpr.datasets.utils.load_np_file', 'load_np_file', (["config['dataset_nordland_winter_val_test_splits_indices_file_path']"], {}), "(config[\n 'dataset_nordland_winter_val_test_splits_indices_file_path'])\n", (2748, 2822), False, 'from echovpr.datasets.utils import get_1_hot_encode, load_np_file\n'), ((3558, 3612), 'torch.from_numpy', 'torch.from_numpy', (["winter_dataset_info['image_indices']"], {}), "(winter_dataset_info['image_indices'])\n", (3574, 3612), False, 'import torch\n'), ((3634, 3688), 'torch.utils.data.dataset.TensorDataset', 'TensorDataset', (['esn_descriptor_winter', 'winter_image_idx'], {}), '(esn_descriptor_winter, winter_image_idx)\n', (3647, 3688), False, 'from torch.utils.data.dataset import ConcatDataset, Subset, TensorDataset\n'), ((3708, 3762), 'torch.utils.data.dataset.Subset', 'Subset', (['winter_dataset', "val_test_splits['val_indices']"], {}), "(winter_dataset, val_test_splits['val_indices'])\n", (3714, 3762), False, 'from torch.utils.data.dataset import ConcatDataset, Subset, TensorDataset\n'), ((3834, 3889), 'torch.utils.data.dataset.Subset', 'Subset', (['winter_dataset', "val_test_splits['test_indices']"], {}), "(winter_dataset, val_test_splits['test_indices'])\n", (3840, 3889), False, 'from torch.utils.data.dataset import ConcatDataset, Subset, TensorDataset\n'), ((4513, 4578), 'echovpr.datasets.utils.load_np_file', 'load_np_file', (["config['dataset_nordland_summer_dataset_file_path']"], {}), "(config['dataset_nordland_summer_dataset_file_path'])\n", (4525, 4578), False, 'from echovpr.datasets.utils import get_1_hot_encode, load_np_file\n'), ((4656, 4710), 'torch.from_numpy', 'torch.from_numpy', (["summer_dataset_info['image_indices']"], {}), "(summer_dataset_info['image_indices'])\n", (4672, 4710), False, 'import torch\n'), ((4738, 4803), 'echovpr.datasets.utils.load_np_file', 'load_np_file', (["config['dataset_nordland_winter_dataset_file_path']"], {}), "(config['dataset_nordland_winter_dataset_file_path'])\n", (4750, 4803), False, 'from echovpr.datasets.utils import get_1_hot_encode, load_np_file\n'), ((4881, 4935), 'torch.from_numpy', 'torch.from_numpy', (["winter_dataset_info['image_indices']"], {}), "(winter_dataset_info['image_indices'])\n", (4897, 4935), False, 'import torch\n'), ((5832, 5886), 'torch.utils.data.dataset.TensorDataset', 'TensorDataset', (['esn_descriptor_summer', 'summer_image_idx'], {}), '(esn_descriptor_summer, summer_image_idx)\n', (5845, 5886), False, 'from torch.utils.data.dataset import ConcatDataset, Subset, TensorDataset\n'), ((5912, 5966), 'torch.utils.data.dataset.TensorDataset', 'TensorDataset', (['esn_descriptor_winter', 'winter_image_idx'], {}), '(esn_descriptor_winter, winter_image_idx)\n', (5925, 5966), False, 'from torch.utils.data.dataset import ConcatDataset, Subset, TensorDataset\n'), ((6161, 6226), 'echovpr.datasets.utils.load_np_file', 'load_np_file', (["config['dataset_nordland_spring_dataset_file_path']"], {}), "(config['dataset_nordland_spring_dataset_file_path'])\n", (6173, 6226), False, 'from echovpr.datasets.utils import get_1_hot_encode, load_np_file\n'), ((6309, 6363), 'torch.from_numpy', 'torch.from_numpy', (["spring_dataset_info['image_indices']"], {}), "(spring_dataset_info['image_indices'])\n", (6325, 6363), False, 'import torch\n'), ((6385, 6439), 'torch.utils.data.dataset.TensorDataset', 'TensorDataset', (['esn_descriptor_spring', 'spring_image_idx'], {}), '(esn_descriptor_spring, spring_image_idx)\n', (6398, 6439), False, 'from torch.utils.data.dataset import ConcatDataset, Subset, TensorDataset\n'), ((6465, 6528), 'echovpr.datasets.utils.load_np_file', 'load_np_file', (["config['dataset_nordland_fall_dataset_file_path']"], {}), "(config['dataset_nordland_fall_dataset_file_path'])\n", (6477, 6528), False, 'from echovpr.datasets.utils import get_1_hot_encode, load_np_file\n'), ((6601, 6653), 'torch.from_numpy', 'torch.from_numpy', (["fall_dataset_info['image_indices']"], {}), "(fall_dataset_info['image_indices'])\n", (6617, 6653), False, 'import torch\n'), ((6673, 6723), 'torch.utils.data.dataset.TensorDataset', 'TensorDataset', (['esn_descriptor_fall', 'fall_image_idx'], {}), '(esn_descriptor_fall, fall_image_idx)\n', (6686, 6723), False, 'from torch.utils.data.dataset import ConcatDataset, Subset, TensorDataset\n'), ((7140, 7200), 'echovpr.datasets.utils.load_np_file', 'load_np_file', (["config['dataset_oxford_day_dataset_file_path']"], {}), "(config['dataset_oxford_day_dataset_file_path'])\n", (7152, 7200), False, 'from echovpr.datasets.utils import get_1_hot_encode, load_np_file\n'), ((7226, 7288), 'echovpr.datasets.utils.load_np_file', 'load_np_file', (["config['dataset_oxford_night_dataset_file_path']"], {}), "(config['dataset_oxford_night_dataset_file_path'])\n", (7238, 7288), False, 'from echovpr.datasets.utils import get_1_hot_encode, load_np_file\n'), ((7311, 7389), 'echovpr.datasets.utils.load_np_file', 'load_np_file', (["config['dataset_oxford_night_val_test_splits_indices_file_path']"], {}), "(config['dataset_oxford_night_val_test_splits_indices_file_path'])\n", (7323, 7389), False, 'from echovpr.datasets.utils import get_1_hot_encode, load_np_file\n'), ((8093, 8146), 'torch.from_numpy', 'torch.from_numpy', (["night_dataset_info['image_indices']"], {}), "(night_dataset_info['image_indices'])\n", (8109, 8146), False, 'import torch\n'), ((8167, 8219), 'torch.utils.data.dataset.TensorDataset', 'TensorDataset', (['esn_descriptor_night', 'night_image_idx'], {}), '(esn_descriptor_night, night_image_idx)\n', (8180, 8219), False, 'from torch.utils.data.dataset import ConcatDataset, Subset, TensorDataset\n'), ((8239, 8292), 'torch.utils.data.dataset.Subset', 'Subset', (['night_dataset', "val_test_splits['val_indices']"], {}), "(night_dataset, val_test_splits['val_indices'])\n", (8245, 8292), False, 'from torch.utils.data.dataset import ConcatDataset, Subset, TensorDataset\n'), ((8364, 8418), 'torch.utils.data.dataset.Subset', 'Subset', (['night_dataset', "val_test_splits['test_indices']"], {}), "(night_dataset, val_test_splits['test_indices'])\n", (8370, 8418), False, 'from torch.utils.data.dataset import ConcatDataset, Subset, TensorDataset\n'), ((9250, 9293), 'numpy.random.permutation', 'np.random.permutation', (['split_bucket_indices'], {}), '(split_bucket_indices)\n', (9271, 9293), True, 'import numpy as np\n'), ((1713, 1778), 'echovpr.datasets.utils.load_np_file', 'load_np_file', (["config['dataset_nordland_summer_dataset_file_path']"], {}), "(config['dataset_nordland_summer_dataset_file_path'])\n", (1725, 1778), False, 'from echovpr.datasets.utils import get_1_hot_encode, load_np_file\n'), ((1809, 1874), 'echovpr.datasets.utils.load_np_file', 'load_np_file', (["config['dataset_nordland_winter_dataset_file_path']"], {}), "(config['dataset_nordland_winter_dataset_file_path'])\n", (1821, 1874), False, 'from echovpr.datasets.utils import get_1_hot_encode, load_np_file\n'), ((3036, 3090), 'torch.from_numpy', 'torch.from_numpy', (["summer_dataset_info['image_indices']"], {}), "(summer_dataset_info['image_indices'])\n", (3052, 3090), False, 'import torch\n'), ((3287, 3361), 'torch.utils.data.dataset.TensorDataset', 'TensorDataset', (['esn_descriptor_summer', 'summer_image_1_hot', 'summer_image_idx'], {}), '(esn_descriptor_summer, summer_image_1_hot, summer_image_idx)\n', (3300, 3361), False, 'from torch.utils.data.dataset import ConcatDataset, Subset, TensorDataset\n'), ((5267, 5341), 'torch.utils.data.dataset.TensorDataset', 'TensorDataset', (['esn_descriptor_summer', 'summer_image_1_hot', 'summer_image_idx'], {}), '(esn_descriptor_summer, summer_image_1_hot, summer_image_idx)\n', (5280, 5341), False, 'from torch.utils.data.dataset import ConcatDataset, Subset, TensorDataset\n'), ((5537, 5611), 'torch.utils.data.dataset.TensorDataset', 'TensorDataset', (['esn_descriptor_winter', 'winter_image_1_hot', 'winter_image_idx'], {}), '(esn_descriptor_winter, winter_image_1_hot, winter_image_idx)\n', (5550, 5611), False, 'from torch.utils.data.dataset import ConcatDataset, Subset, TensorDataset\n'), ((7596, 7647), 'torch.from_numpy', 'torch.from_numpy', (["day_dataset_info['image_indices']"], {}), "(day_dataset_info['image_indices'])\n", (7612, 7647), False, 'import torch\n'), ((7835, 7900), 'torch.utils.data.dataset.TensorDataset', 'TensorDataset', (['esn_descriptor_day', 'day_image_1_hot', 'day_image_idx'], {}), '(esn_descriptor_day, day_image_1_hot, day_image_idx)\n', (7848, 7900), False, 'from torch.utils.data.dataset import ConcatDataset, Subset, TensorDataset\n'), ((8669, 8710), 'numpy.round', 'np.round', (['(desired_train_size / continuity)'], {}), '(desired_train_size / continuity)\n', (8677, 8710), True, 'import numpy as np\n'), ((8733, 8772), 'numpy.round', 'np.round', (['(desired_val_size / continuity)'], {}), '(desired_val_size / continuity)\n', (8741, 8772), True, 'import numpy as np\n'), ((8796, 8836), 'numpy.round', 'np.round', (['(desired_test_size / continuity)'], {}), '(desired_test_size / continuity)\n', (8804, 8836), True, 'import numpy as np\n'), ((9208, 9231), 'numpy.shape', 'np.shape', (['split_buckets'], {}), '(split_buckets)\n', (9216, 9231), True, 'import numpy as np\n'), ((9554, 9583), 'numpy.reshape', 'np.reshape', (['val_indices', '[-1]'], {}), '(val_indices, [-1])\n', (9564, 9583), True, 'import numpy as np\n'), ((9612, 9642), 'numpy.reshape', 'np.reshape', (['test_indices', '[-1]'], {}), '(test_indices, [-1])\n', (9622, 9642), True, 'import numpy as np\n'), ((9672, 9703), 'numpy.reshape', 'np.reshape', (['train_indices', '[-1]'], {}), '(train_indices, [-1])\n', (9682, 9703), True, 'import numpy as np\n'), ((2143, 2203), 'echovpr.datasets.utils.load_np_file', 'load_np_file', (["config['dataset_oxford_day_dataset_file_path']"], {}), "(config['dataset_oxford_day_dataset_file_path'])\n", (2155, 2203), False, 'from echovpr.datasets.utils import get_1_hot_encode, load_np_file\n'), ((2233, 2295), 'echovpr.datasets.utils.load_np_file', 'load_np_file', (["config['dataset_oxford_night_dataset_file_path']"], {}), "(config['dataset_oxford_night_dataset_file_path'])\n", (2245, 2295), False, 'from echovpr.datasets.utils import get_1_hot_encode, load_np_file\n'), ((6000, 6039), 'torch.utils.data.dataset.Subset', 'Subset', (['summer_val_dataset', 'val_indices'], {}), '(summer_val_dataset, val_indices)\n', (6006, 6039), False, 'from torch.utils.data.dataset import ConcatDataset, Subset, TensorDataset\n'), ((6041, 6080), 'torch.utils.data.dataset.Subset', 'Subset', (['winter_val_dataset', 'val_indices'], {}), '(winter_val_dataset, val_indices)\n', (6047, 6080), False, 'from torch.utils.data.dataset import ConcatDataset, Subset, TensorDataset\n'), ((6759, 6795), 'torch.utils.data.dataset.Subset', 'Subset', (['spring_dataset', 'test_indices'], {}), '(spring_dataset, test_indices)\n', (6765, 6795), False, 'from torch.utils.data.dataset import ConcatDataset, Subset, TensorDataset\n'), ((6797, 6831), 'torch.utils.data.dataset.Subset', 'Subset', (['fall_dataset', 'test_indices'], {}), '(fall_dataset, test_indices)\n', (6803, 6831), False, 'from torch.utils.data.dataset import ConcatDataset, Subset, TensorDataset\n'), ((9052, 9076), 'numpy.arange', 'np.arange', (['(0)', 'total_size'], {}), '(0, total_size)\n', (9061, 9076), True, 'import numpy as np\n'), ((5652, 5695), 'torch.utils.data.dataset.Subset', 'Subset', (['summer_train_dataset', 'train_indices'], {}), '(summer_train_dataset, train_indices)\n', (5658, 5695), False, 'from torch.utils.data.dataset import ConcatDataset, Subset, TensorDataset\n'), ((5697, 5740), 'torch.utils.data.dataset.Subset', 'Subset', (['winter_train_dataset', 'train_indices'], {}), '(winter_train_dataset, train_indices)\n', (5703, 5740), False, 'from torch.utils.data.dataset import ConcatDataset, Subset, TensorDataset\n')] |
##multi feature
##实例代码,分别转化的是均值,二阶矩,三阶矩,中位数
##现在原本的输入是29维数据,输出是7维数据
##一些变形的分析以及代码
##现在就是减小时间跨度实现小维度的抽样
##现在的state的信息
##lane_vehicle_count 每个车道的数量(56)
##start_lane_vehicle_count 开始车道上的数量(28)
##lane_waiting_vehicle_count 每条车道上等待车的数量(速度小于0.1m/s)
##lane_vehicles 车道上的车的id
##vehicle_speed 每个车的速度
##vehicle_distance 每个车辆已经行驶的距离
##current_time 现在的时间
##current_phase 现在的相位
##current_phase_time 现在的相位持续的时间
import numpy as np
from numpy import random
import cmath
import os
#1.这个代码的思路在于假设已经设定了一定时间的灯的颜色,然后假设ns,在n/2的时候进行数据采样分析,然后寻找变点的变化度量值
#如果超过,说明行车状态变化范围大
#如果低于,说明行车状态变化范围较小
def changepointrealm(q):
'''
input: 29*30 matrix
return: (1,), distribution complexity
'''
n = len(q)
p = len(q[0])
XXX = [0]*n
XX = []
for i in range(n):
D = np.ones((p,p))*0
if i == 0:
aa = q[0,:]
bb = q[1:n,:]
else:
aa = q[0:i,:]
bb = q[i+1:n,:]
aa1 = np.cov(aa.T)
bb1 = np.cov(bb.T)
Sk = (aa1 * (i+1)+bb1*(n-i-1))/(n-2)
summ = np.diag(Sk)
for w in range(p):
D[w,w]=summ[w]
DD = np.linalg.inv(D)
sum1= aa.sum(axis =0)
sum2= q.sum(axis = 0)*(i+1)/n
ss = sum1-sum2
www= ss.reshape(ss.shape[0],1)
www1=www.T
DD1 =(www1/n/cmath.sqrt(p))@DD@www
DD2 = i*(n-i)*cmath.sqrt(p)*(1+2/n)/pow(n,2)
x = DD1-DD2
XXX[i]=x[0]
for h in range(3,n-3):
XX.append((XXX[h]+XXX[h-1]+XXX[h+1])/3)
XX = np.array(XX).reshape((-1))
return max(XX)
#这里可以提取数据以后将各个向量输入之后做简单线性回归和feature筛选(AIC/BIC)
def complex(a,b,c):
return 0.5*a+0.3*b+0.2*c
#这个函数是用来创造相应phase的持续时间
# int a1 #节点1
# int a2 #节点2
def complextime(ss):
currenttime = 0
simulatetime = 0
if ss <= a1:
currenttime = 5
elif ss > a1 and ss <= a2:
currenttime = 10
else:
currenttime = 15
simulatetime = f(x)
return currenttime+simulatetime
phase.settime(complextime)
#q_num抽样间隔导致的抽样数量
#feature_num我要产生的新的feature的个数
#这是初始states
config["state_size"] = len(config['lane_phase_info'][intersection_id]['start_lane']) + 1
config["state_size2"] = config["state_size"] + config["sizeplus"]
state.size = 29
state = env.get_state()
state = np.array(list(state['start_lane_vehicle_count'].values()) + [state['current_phase']] )
state = np.reshape(state, [1, state_size])
next_state = np.reshape(next_state, [1, state_size2])
feature_num = 4
config["sizeplus"] = feature_num
q = np.zeros([q_num,feature_num])
time = np.zeros(q_num)
for i in range(q_num):
median_matrix = np.zeros(0)
subsum1 = 0
subsum2 = 0
subsum3 = 0
subsum4 = 0
#here define how to get the feature
for j in range(partition_num):
subnumber = my_matrix[i*partition_num+j][1]
subsum1 = subnumber + subsum1
subsum2 = subnumber*subnumber + subsum2
subsum3 = subnumber*subnumber*subnumber + subsum3
median_matrix = np.append(median_matrix,subnumber)
subsum4 = my_matrix[i*partition_num+j][0] + subsum4
q[i][0] = subsum1/partition_num
q[i][1] = subsum2/partition_num
q[i][2] = subsum3/partition_num
q[i][3] = np.median(median_matrix)
if i ==0:
time[i] = subsum4
else:
time[i] = subsum4+time[i-1]
##关于reward的一些讨论
#reward
def returnSum(myDict):
sum = 0
for i in myDict:
sum = sum + myDict[i]
return sum
def get_reward(self):
para1 = 0
para2 = 0
para3 = 0
# a sample reward function which calculates the total of waiting vehicles
lane_waiting_vehicle_count = self.eng.get_lane_waiting_vehicle_count()
lane_vehicle_speed = self.eng.get_vehicle_speed()
reward1 = -1 * sum(list(lane_waiting_vehicle_count.values()))#100
reward2 = -1* phasechangetimes#5
reward3 = sum(list(lane_vehicle_speed.values()/returnSum(self.eng.get_lane_vehicle_count())))#10
reward11 = reward1/(1+Math.exp(reward1))
reward22 = reward2
reward33 = 1/(1+Math.exp(reward3))
return para1*reward11 + para2*reward22 + para3*reward33
| [
"cmath.sqrt",
"numpy.median",
"numpy.zeros",
"numpy.ones",
"numpy.append",
"numpy.linalg.inv",
"numpy.reshape",
"numpy.array",
"numpy.diag",
"numpy.cov"
] | [((2474, 2508), 'numpy.reshape', 'np.reshape', (['state', '[1, state_size]'], {}), '(state, [1, state_size])\n', (2484, 2508), True, 'import numpy as np\n'), ((2523, 2563), 'numpy.reshape', 'np.reshape', (['next_state', '[1, state_size2]'], {}), '(next_state, [1, state_size2])\n', (2533, 2563), True, 'import numpy as np\n'), ((2626, 2656), 'numpy.zeros', 'np.zeros', (['[q_num, feature_num]'], {}), '([q_num, feature_num])\n', (2634, 2656), True, 'import numpy as np\n'), ((2664, 2679), 'numpy.zeros', 'np.zeros', (['q_num'], {}), '(q_num)\n', (2672, 2679), True, 'import numpy as np\n'), ((2725, 2736), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (2733, 2736), True, 'import numpy as np\n'), ((3329, 3353), 'numpy.median', 'np.median', (['median_matrix'], {}), '(median_matrix)\n', (3338, 3353), True, 'import numpy as np\n'), ((1006, 1018), 'numpy.cov', 'np.cov', (['aa.T'], {}), '(aa.T)\n', (1012, 1018), True, 'import numpy as np\n'), ((1034, 1046), 'numpy.cov', 'np.cov', (['bb.T'], {}), '(bb.T)\n', (1040, 1046), True, 'import numpy as np\n'), ((1109, 1120), 'numpy.diag', 'np.diag', (['Sk'], {}), '(Sk)\n', (1116, 1120), True, 'import numpy as np\n'), ((1191, 1207), 'numpy.linalg.inv', 'np.linalg.inv', (['D'], {}), '(D)\n', (1204, 1207), True, 'import numpy as np\n'), ((3107, 3142), 'numpy.append', 'np.append', (['median_matrix', 'subnumber'], {}), '(median_matrix, subnumber)\n', (3116, 3142), True, 'import numpy as np\n'), ((831, 846), 'numpy.ones', 'np.ones', (['(p, p)'], {}), '((p, p))\n', (838, 846), True, 'import numpy as np\n'), ((1593, 1605), 'numpy.array', 'np.array', (['XX'], {}), '(XX)\n', (1601, 1605), True, 'import numpy as np\n'), ((1384, 1397), 'cmath.sqrt', 'cmath.sqrt', (['p'], {}), '(p)\n', (1394, 1397), False, 'import cmath\n'), ((1429, 1442), 'cmath.sqrt', 'cmath.sqrt', (['p'], {}), '(p)\n', (1439, 1442), False, 'import cmath\n')] |
import unittest
import numpy as np
from gradient_descent.Adam import Adam
class TestAdamClass(unittest.TestCase):
def setUp(self):
"""Setting up requirements for test
Params:
None
Returns:
None
"""
def f(x):
"""Apply function to point x
Args:
x (float): point on x-axis
Returns:
(float): f(x)
"""
return 4*x**2
def df(x):
"""Apply function gradient to point x
Args:
x (float): point on x-axis
Returns:
(float): df(x)
"""
return 8*x
self.adam = Adam(f, df, x_t=10, learning_rate=0.1,
max_iterations=1000, tolerance=1e-6,
n_history_points=1000, beta_1=0.9, beta_2=0.999)
def test_initizialization(self):
"""Testing Attributes initialization
Args:
None
Returns:
None
"""
self.assertEqual(self.adam.x_t, 10, 'incorrect initial value of x_t')
self.assertEqual(self.adam.learning_rate, 0.1,
'incorrect value of learning_rate')
self.assertEqual(self.adam.max_iterations, 1000,
'incorrect value of max_iterations')
self.assertEqual(self.adam.tolerance, 1e-6,
'incorrect value of tolerance')
self.assertEqual(self.adam.n_iterations, 0,
'incorrect value of n_iterations')
np.testing.assert_array_equal(self.adam.convergence_points,
np.array([None]*1000),
'incorrect inialization of array\
convergence_points')
self.assertEqual(self.adam.beta_1, 0.9,
'incorrect initilialization of beta_1')
self.assertEqual(self.adam.beta_2, 0.999,
'incorrect initilialization of beta_2')
self.assertEqual(self.adam._Adam__m_t, 0,
'incorrect initialization of m_t')
self.assertEqual(self.adam._Adam__m_t_1, 0,
'incorrect initialization of m_t_1')
self.assertEqual(self.adam._Adam__v_t, 0,
'incorrect initialization of v_t')
self.assertEqual(self.adam._Adam__v_t_1, 0,
'incorrect initialization of v_t_1')
def test_update_parameter(self):
"""Testing _update_parameter method
Args:
None
Returns:
None
"""
# Testing for x_t = 10
epsilon = 1e-8
test_x_t = 10
self.adam.n_iterations = 1
m_t = self.adam._Adam__m_t
v_t = self.adam._Adam__v_t
m_t_1 = self.adam.beta_1*m_t + (1 - self.adam.beta_1) \
* self.adam.df(test_x_t)
v_t_1 = self.adam.beta_2*v_t + (1 - self.adam.beta_2) \
* self.adam.df(test_x_t)**2
m_hat_t = m_t_1/(1 - self.adam.beta_1**self.adam.n_iterations)
v_hat_t = v_t_1/(1 - self.adam.beta_2**self.adam.n_iterations)
self.assertAlmostEqual(self.adam._update_parameter(test_x_t),
self.adam.learning_rate
* m_hat_t/(np.sqrt(v_hat_t) + epsilon),
'incorect return of _update parameter')
# Testing for x_t = 3
epsilon = 1e-8
test_x_t = 3
m_t = self.adam._Adam__m_t
v_t = self.adam._Adam__v_t
m_t_1 = self.adam.beta_1*m_t + (1 - self.adam.beta_1) \
* self.adam.df(test_x_t)
v_t_1 = self.adam.beta_2*v_t + (1 - self.adam.beta_2) \
* self.adam.df(test_x_t)**2
m_hat_t = m_t_1/(1 - self.adam.beta_1**self.adam.n_iterations)
v_hat_t = v_t_1/(1 - self.adam.beta_2**self.adam.n_iterations)
self.assertAlmostEqual(self.adam._update_parameter(test_x_t),
self.adam.learning_rate *
m_hat_t/(np.sqrt(v_hat_t) + epsilon),
'incorect return of _update parameter')
def test_optimization(self):
"""Test the optimization algorithm
Args:
None
Returns:
None
"""
self.assertLessEqual(self.adam.fit(), 1e-4,
'Failed to converge to zero for the function: \
4x**2')
self.assertGreaterEqual(self.adam.n_iterations, 1,
"n_iterations wasn't properly updated")
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.sqrt",
"numpy.array",
"gradient_descent.Adam.Adam"
] | [((4744, 4759), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4757, 4759), False, 'import unittest\n'), ((724, 853), 'gradient_descent.Adam.Adam', 'Adam', (['f', 'df'], {'x_t': '(10)', 'learning_rate': '(0.1)', 'max_iterations': '(1000)', 'tolerance': '(1e-06)', 'n_history_points': '(1000)', 'beta_1': '(0.9)', 'beta_2': '(0.999)'}), '(f, df, x_t=10, learning_rate=0.1, max_iterations=1000, tolerance=1e-06,\n n_history_points=1000, beta_1=0.9, beta_2=0.999)\n', (728, 853), False, 'from gradient_descent.Adam import Adam\n'), ((1701, 1724), 'numpy.array', 'np.array', (['([None] * 1000)'], {}), '([None] * 1000)\n', (1709, 1724), True, 'import numpy as np\n'), ((3393, 3409), 'numpy.sqrt', 'np.sqrt', (['v_hat_t'], {}), '(v_hat_t)\n', (3400, 3409), True, 'import numpy as np\n'), ((4155, 4171), 'numpy.sqrt', 'np.sqrt', (['v_hat_t'], {}), '(v_hat_t)\n', (4162, 4171), True, 'import numpy as np\n')] |
__description__ = \
"""
Find the eyes of people in a frame and then give them glowing orbs instead
of normal eyes.
"""
__author__ = "<NAME>"
__date__ = "2018-12-19"
import pyfx
from .base import Effect
import numpy as np
class GlowingEyes(Effect):
"""
Give people in a collection of frames glowing eyes. The eyes are found
along the entirety of the video clip, and then interpolated for gaps where
the eyes were missed. As a result, this clas has a huge .bake() call with
a lot of options.
Waypoint properties
eye_scalar: float, how big to make the eyes relative to the size of the
eye found by dlib.
"""
def __init__(self,workspace):
self._default_waypoint = {"eye_scalar":1.0}
super().__init__(workspace)
self._baked = False
def bake(self,
training_data=None,
max_time_gap=5,
p_cutoff=0.9,
real_cutoff=100,
min_time_visible=5,
smooth_window_len=0):
"""
Prep for the glowing eyes effect by finding eyes over the course of the
video clip.
training_data: dlib face training data. If None, use the default model
max_time_gap: maximum time gap over which a facial feature is not seen
that can be interpolated.
max_time_gap: maximum time over which two similar faces are considered
the same without observing the face at intermediate times
p_cutoff: minimum probability at which two faces are considered the same
when comparing a newly found face to a collection of previously
identified faces.
real_cutoff: maximum Euclidian distance between two faces for which they
are considered the same
min_time_visible: do not return any face stacks in which the minimum time
seen is less than min_time_visible.
smooth_window_len: how much to smooth the interpolated trajectories.
"""
self._human_faces = pyfx.processors.HumanFaces(self._workspace,
training_data,
max_time_gap,
p_cutoff,
real_cutoff,
min_time_visible)
self._human_faces.bake()
self._eye_sprite = pyfx.visuals.sprites.GlowingParticle(radius=4)
self._left_eye_coord = {}
self._right_eye_coord = {}
for f in self._human_faces.face_stacks:
r = f.get_centroid("right_eye")
rt = r[0]
rc = r[1][0]
for i in range(len(rt)):
# make sure radius does not end up negaitve due to numerical
# error
if rc[i,2] < 0: rc[i,2] = 0.0
# Right eye coordinates (x, y, r)
to_write = (rc[i,1],rc[i,0],rc[i,2])
try:
self._right_eye_coord[rt[i]].append(to_write)
except KeyError:
self._right_eye_coord[rt[i]] = [to_write]
l = f.get_centroid("left_eye")
lt = l[0]
lc = l[1][0]
for i in range(len(lt)):
# make sure radius does not end up negaitve due to numerical
# error
if lc[i,2] < 0: lc[i,2] = 0.0
# Left eye coordinates (x, y, r)
to_write = (lc[i,1],lc[i,0],lc[i,2])
try:
self._left_eye_coord[lt[i]].append(to_write)
except KeyError:
self._left_eye_coord[lt[i]] = [to_write]
self._interpolate_waypoints(smooth_window_len)
# make sure interpolated eye scalar does not end up negative
self.eye_scalar[self.eye_scalar < 0] = 0.0
self._baked = True
def render(self,img):
t = self._workspace.current_time
if not self._baked:
self.bake()
tmp_img = np.zeros((img.shape),dtype=np.uint8)
try:
left_eyes = self._left_eye_coord[t]
for eye in left_eyes:
self._eye_sprite.radius = eye[2]*self.eye_scalar[t]
self._eye_sprite.write_to_image(eye[:2],tmp_img)
except KeyError:
pass
try:
right_eyes = self._right_eye_coord[t]
for eye in right_eyes:
self._eye_sprite.radius = eye[2]*self.eye_scalar[t]
self._eye_sprite.write_to_image(eye[:2],tmp_img)
except KeyError:
pass
return pyfx.util.alpha_composite(img,tmp_img)
| [
"pyfx.util.alpha_composite",
"pyfx.processors.HumanFaces",
"numpy.zeros",
"pyfx.visuals.sprites.GlowingParticle"
] | [((2086, 2203), 'pyfx.processors.HumanFaces', 'pyfx.processors.HumanFaces', (['self._workspace', 'training_data', 'max_time_gap', 'p_cutoff', 'real_cutoff', 'min_time_visible'], {}), '(self._workspace, training_data, max_time_gap,\n p_cutoff, real_cutoff, min_time_visible)\n', (2112, 2203), False, 'import pyfx\n'), ((2536, 2582), 'pyfx.visuals.sprites.GlowingParticle', 'pyfx.visuals.sprites.GlowingParticle', ([], {'radius': '(4)'}), '(radius=4)\n', (2572, 2582), False, 'import pyfx\n'), ((4173, 4208), 'numpy.zeros', 'np.zeros', (['img.shape'], {'dtype': 'np.uint8'}), '(img.shape, dtype=np.uint8)\n', (4181, 4208), True, 'import numpy as np\n'), ((4771, 4810), 'pyfx.util.alpha_composite', 'pyfx.util.alpha_composite', (['img', 'tmp_img'], {}), '(img, tmp_img)\n', (4796, 4810), False, 'import pyfx\n')] |
import numpy as np
import matplotlib.pyplot as plt
iterations=500
from numpy import loadtxt
elapsed = loadtxt("SaveFiles/JetsonNano/test_HN2_32.txt", delimiter=" ", unpack=False)
x=range(iterations)
y=elapsed
plt.figure(figsize=(8, 8))
plt.plot(x, y)
plt.xlabel("Image")
plt.ylabel("Seconds")
plt.title('HN2 fp32 Processing Time /per Image')
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((104, 180), 'numpy.loadtxt', 'loadtxt', (['"""SaveFiles/JetsonNano/test_HN2_32.txt"""'], {'delimiter': '""" """', 'unpack': '(False)'}), "('SaveFiles/JetsonNano/test_HN2_32.txt', delimiter=' ', unpack=False)\n", (111, 180), False, 'from numpy import loadtxt\n'), ((214, 240), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (224, 240), True, 'import matplotlib.pyplot as plt\n'), ((241, 255), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (249, 255), True, 'import matplotlib.pyplot as plt\n'), ((256, 275), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Image"""'], {}), "('Image')\n", (266, 275), True, 'import matplotlib.pyplot as plt\n'), ((276, 297), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Seconds"""'], {}), "('Seconds')\n", (286, 297), True, 'import matplotlib.pyplot as plt\n'), ((298, 346), 'matplotlib.pyplot.title', 'plt.title', (['"""HN2 fp32 Processing Time /per Image"""'], {}), "('HN2 fp32 Processing Time /per Image')\n", (307, 346), True, 'import matplotlib.pyplot as plt\n'), ((348, 358), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (356, 358), True, 'import matplotlib.pyplot as plt\n')] |
import sys
sys.path.insert(1, '/data/s2675544/git/neural_deprojection/')
sys.path.insert(1, '/home/matthijs/git/neural_deprojection/')
from neural_deprojection.graph_net_utils import AbstractModule, \
histogramdd, efficient_nn_index
from graph_nets import blocks
from graph_nets.modules import SelfAttention
from sonnet.src import utils, once
from graph_nets.utils_tf import fully_connect_graph_static, concat
import numpy as np
import tensorflow as tf
import sonnet as snt
from graph_nets.graphs import GraphsTuple
from graph_nets.utils_tf import fully_connect_graph_dynamic
from neural_deprojection.graph_net_utils import AbstractModule, gaussian_loss_function, \
reconstruct_fields_from_gaussians
import tensorflow_probability as tfp
class MultiHeadLinear(AbstractModule):
"""Linear module, optionally including bias."""
def __init__(self,
output_size: int,
num_heads: int = 1,
with_bias: bool = True,
w_init=None,
b_init=None,
name=None):
"""Constructs a `Linear` module.
Args:
output_size: Output dimensionality.
with_bias: Whether to include bias parameters. Default `True`.
w_init: Optional initializer for the weights. By default the weights are
initialized truncated random normal values with a standard deviation of
`1 / sqrt(input_feature_size)`, which is commonly used when the inputs
are zero centered (see https://arxiv.org/abs/1502.03167v3).
b_init: Optional initializer for the bias. By default the bias is
initialized to zero.
name: Name of the module.
"""
super(MultiHeadLinear, self).__init__(name=name)
self.output_size = output_size
self.with_bias = with_bias
self.w_init = w_init
self.num_heads = num_heads
if with_bias:
self.b_init = b_init if b_init is not None else snt.initializers.Zeros()
elif b_init is not None:
raise ValueError("When not using a bias the b_init must be None.")
@once.once
def _initialize(self, inputs: tf.Tensor):
"""Constructs parameters used by this module."""
utils.assert_minimum_rank(inputs, 2)
input_size = inputs.shape[-1]
if input_size is None: # Can happen inside an @tf.function.
raise ValueError("Input size must be specified at module build time.")
self.input_size = input_size
if self.w_init is None:
# See https://arxiv.org/abs/1502.03167v3.
stddev = 1 / tf.math.sqrt(self.input_size * 1.0)
self.w_init = snt.initializers.TruncatedNormal(stddev=stddev)
self.w = tf.Variable(
self.w_init([self.num_heads, self.input_size, self.output_size], inputs.dtype),
name="w")
if self.with_bias:
self.b = tf.Variable(
self.b_init([self.num_heads, self.output_size], inputs.dtype), name="b")
def _build(self, inputs: tf.Tensor) -> tf.Tensor:
self._initialize(inputs)
# [num_nodes, node_size].[num_heads, node_size, output_size] -> [num_nodes, num_heads, output_size]
outputs = tf.einsum('ns,hso->nho', inputs, self.w, optimize='optimal')
# outputs = tf.matmul(inputs, self.w)
if self.with_bias:
outputs = tf.add(outputs, self.b)
return outputs
class RelationNetwork(AbstractModule):
"""Implementation of a Relation Network.
See https://arxiv.org/abs/1706.01427 for more details.
The global and edges features of the input graph are not used, and are
allowed to be `None` (the receivers and senders properties must be present).
The output graph has updated, non-`None`, globals.
"""
def \
__init__(self,
edge_model_fn,
global_model_fn,
reducer=tf.math.unsorted_segment_mean,
use_globals=False,
name="relation_network"):
"""Initializes the RelationNetwork module.
Args:
edge_model_fn: A callable that will be passed to EdgeBlock to perform
per-edge computations. The callable must return a Sonnet module (or
equivalent; see EdgeBlock for details).
global_model_fn: A callable that will be passed to GlobalBlock to perform
per-global computations. The callable must return a Sonnet module (or
equivalent; see GlobalBlock for details).
reducer: Reducer to be used by GlobalBlock to aggregate edges. Defaults to
tf.math.unsorted_segment_sum.
name: The module name.
"""
super(RelationNetwork, self).__init__(name=name)
self._edge_block = blocks.EdgeBlock(
edge_model_fn=edge_model_fn,
use_edges=False,
use_receiver_nodes=True,
use_sender_nodes=True,
use_globals=use_globals)
self._global_block = blocks.GlobalBlock(
global_model_fn=global_model_fn,
use_edges=True,
use_nodes=False,
use_globals=use_globals,
edges_reducer=reducer)
def _build(self, graph):
"""Connects the RelationNetwork.
Args:
graph: A `graphs.GraphsTuple` containing `Tensor`s, except for the edges
and global properties which may be `None`.
Returns:
A `graphs.GraphsTuple` with updated globals.
Raises:
ValueError: If any of `graph.nodes`, `graph.receivers` or `graph.senders`
is `None`.
"""
edge_block = self._edge_block(graph)
output_graph = self._global_block(edge_block)
return output_graph
# TODO: give option to feed position in the core network
class EncodeProcessDecode_E(AbstractModule):
"""Full encode-process-decode model.
The model we explore includes three components:
- An "Encoder" graph net, which independently encodes the edge, node, and
global attributes (does not compute relations etc.).
- A "Core" graph net, which performs N rounds of processing (message-passing)
steps. The input to the Core is the concatenation of the Encoder's output
and the previous output of the Core (labeled "Hidden(t)" below, where "t" is
the processing step).
- A "Decoder" graph net, which independently decodes the edge, node, and
global attributes (does not compute relations etc.), on each message-passing
step.
Hidden(t) Hidden(t+1)
| ^
*---------* | *------* | *---------*
| | | | | | | |
Input --->| Encoder | *->| Core |--*->| Decoder |---> Output(t)
| |---->| | | |
*---------* *------* *---------*
"""
def __init__(self,
encoder,
core,
decoder,
name="EncodeProcessDecode_E"):
super(EncodeProcessDecode_E, self).__init__(name=name)
self._encoder = encoder
self._core = core
self._decoder = decoder
def _build(self, input_graph, num_processing_steps, positions):
latent_graph = self._encoder(input_graph, positions)
# for _ in range(num_processing_steps):
# latent_graph = self._core(latent_graph)
# state = (counter, latent_graph)
_, latent_graph = tf.while_loop(cond=lambda const, state: const < num_processing_steps,
body=lambda const, state: (const+1, self._core(state, positions)),
loop_vars=(tf.constant(0), latent_graph))
return self._decoder(latent_graph, positions)
class EncodeProcessDecode_D(AbstractModule):
"""Full encode-process-decode model.
The model we explore includes three components:
- An "Encoder" graph net, which independently encodes the edge, node, and
global attributes (does not compute relations etc.).
- A "Core" graph net, which performs N rounds of processing (message-passing)
steps. The input to the Core is the concatenation of the Encoder's output
and the previous output of the Core (labeled "Hidden(t)" below, where "t" is
the processing step).
- A "Decoder" graph net, which independently decodes the edge, node, and
global attributes (does not compute relations etc.), on each message-passing
step.
Hidden(t) Hidden(t+1)
| ^
*---------* | *------* | *---------*
| | | | | | | |
Input --->| Encoder | *->| Core |--*->| Decoder |---> Output(t)
| |---->| | | |
*---------* *------* *---------*
"""
def __init__(self,
encoder,
core,
decoder,
name="EncodeProcessDecode_D"):
super(EncodeProcessDecode_D, self).__init__(name=name)
self._encoder = encoder
self._core = core
self._decoder = decoder
def _build(self, input_graph, num_processing_steps, positions):
latent_graph = self._encoder(input_graph, positions)
_, latent_graph = tf.while_loop(cond=lambda const, state: const < num_processing_steps,
body=lambda const, state: (const+1, self._core(state, positions)),
loop_vars=(tf.constant(0), latent_graph))
return self._decoder(latent_graph)
class CoreNetwork(AbstractModule):
"""
Core network which can be used in the EncodeProcessDecode network. Consists of a (full) graph network block
and a self attention block.
"""
def __init__(self,
num_heads,
multi_head_output_size,
input_node_size,
name=None):
super(CoreNetwork, self).__init__(name=name)
self.num_heads = num_heads
self.multi_head_output_size = multi_head_output_size
self.output_linear = snt.Linear(output_size=input_node_size)
self.FFN = snt.nets.MLP([32, input_node_size], activate_final=False) # Feed forward network
self.normalization = lambda x: (x - tf.reduce_mean(x)) / tf.math.reduce_std(x)
self.ln1 = snt.LayerNorm(axis=1, eps=1e-6, create_scale=True, create_offset=True)
self.ln2 = snt.LayerNorm(axis=1, eps=1e-6, create_scale=True, create_offset=True)
self.v_linear = MultiHeadLinear(output_size=multi_head_output_size, num_heads=num_heads) # values
self.k_linear = MultiHeadLinear(output_size=multi_head_output_size, num_heads=num_heads) # keys
self.q_linear = MultiHeadLinear(output_size=multi_head_output_size, num_heads=num_heads) # queries
self.self_attention = SelfAttention()
def _build(self, latent, positions=None):
node_values = self.v_linear(latent.nodes)
node_keys = self.k_linear(latent.nodes)
node_queries = self.q_linear(latent.nodes)
attended_latent = self.self_attention(node_values=node_values,
node_keys=node_keys,
node_queries=node_queries,
attention_graph=latent)
output_nodes = tf.reshape(attended_latent.nodes, (-1, self.num_heads * self.multi_head_output_size))
output_nodes = self.ln1(self.output_linear(output_nodes) + latent.nodes)
output_nodes = self.ln2(self.FFN(output_nodes)+output_nodes)
output_graph = latent.replace(nodes=output_nodes)
if positions is not None:
prepend_nodes = tf.concat([positions, output_graph.nodes[:, 3:]], axis=1)
output_graph = output_graph.replace(nodes=prepend_nodes)
return output_graph
class EncoderNetwork(AbstractModule):
"""
Encoder network that updates the graph to viable input for the Core network.
Contains a node block to update the edges and a relation network to generate edges and globals.
"""
def __init__(self,
edge_model_fn,
node_model_fn,
global_model_fn,
name=None):
super(EncoderNetwork, self).__init__(name=name)
self.node_block = blocks.NodeBlock(node_model_fn,
use_received_edges=False,
use_sent_edges=False,
use_nodes=True,
use_globals=False)
self.relation_network = RelationNetwork(edge_model_fn=edge_model_fn,
global_model_fn=global_model_fn)
def _build(self, input_graph, positions):
latent = self.node_block(input_graph)
if positions is not None:
prepend_nodes = tf.concat([positions, latent.nodes[:, 3:]], axis=1)
latent = latent.replace(nodes=prepend_nodes)
output = self.relation_network(latent)
return output
class DecoderNetwork(AbstractModule):
"""
Encoder network that updates the graph to viable input for the Core network.
Contains a node block to update the edges and a relation network to generate edges and globals.
"""
def __init__(self,
node_model_fn,
name=None):
super(DecoderNetwork, self).__init__(name=name)
self.node_block = blocks.NodeBlock(node_model_fn,
use_received_edges=False,
use_sent_edges=False,
use_nodes=False,
use_globals=True)
def _build(self, input_graph, positions):
output = self.node_block(input_graph.replace(n_node=tf.constant([positions.shape[0]], dtype=tf.int32)))
output = output._replace(edges=tf.constant(1.))
if positions is not None:
prepend_nodes = tf.concat([positions, output.nodes[:, 3:]], axis=1)
output = output.replace(nodes=prepend_nodes)
return output
class Model(AbstractModule):
"""Model inherits from AbstractModule, which contains a __call__ function which executes a _build function
that is to be specified in the child class. So for example:
model = Model(), then model() returns the output of _build()
AbstractModule inherits from snt.Module, which has useful functions that can return the (trainable) variables,
so the Model class has this functionality as well
An instance of the RelationNetwork class also inherits from AbstractModule,
so it also executes its _build() function when called and it can return its (trainable) variables
A RelationNetwork contains an edge block and a global block:
The edge block generally uses the edge, receiver, sender and global attributes of the input graph
to calculate the new edges.
In our case we currently only use the receiver and sender attributes to calculate the edges.
The global block generally uses the aggregated edge, aggregated node and the global attributes of the input graph
to calculate the new globals.
In our case we currently only use the aggregated edge attributes to calculate the new globals.
As input the RelationNetwork needs two (neural network) functions:
one to calculate the new edges from receiver and sender nodes
and one to calculate the globals from the aggregated edges.
The new edges will be a vector with size 16 (i.e. the output of the first function in the RelationNetwork)
The new globals will also be a vector with size 16 (i.e. the output of the second function in the RelationNetwork)
The image_cnn downscales the image (currently from 4880x4880 to 35x35) and encodes the image in 16 channels.
So we (currently) go from (4880,4880,1) to (35,35,16)
"""
def __init__(self,
mlp_size=16,
cluster_encoded_size=10,
num_heads=10,
core_steps=10, name=None):
super(Model, self).__init__(name=name)
self.epd_encoder = EncodeProcessDecode_E(encoder=EncoderNetwork(edge_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True, activation=tf.nn.leaky_relu),
node_model_fn=lambda: snt.Linear(cluster_encoded_size),
global_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True, activation=tf.nn.leaky_relu)),
core=CoreNetwork(num_heads=num_heads,
multi_head_output_size=cluster_encoded_size,
input_node_size=cluster_encoded_size),
decoder=EncoderNetwork(edge_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True, activation=tf.nn.leaky_relu),
node_model_fn=lambda: snt.Linear(cluster_encoded_size),
global_model_fn=lambda: snt.nets.MLP([32, 32, 64], activate_final=True, activation=tf.nn.leaky_relu)))
self.epd_decoder = EncodeProcessDecode_D(encoder=DecoderNetwork(node_model_fn=lambda: snt.nets.MLP([32, 32, cluster_encoded_size], activate_final=True, activation=tf.nn.leaky_relu)),
core=CoreNetwork(num_heads=num_heads,
multi_head_output_size=cluster_encoded_size,
input_node_size=cluster_encoded_size),
decoder=snt.Sequential([RelationNetwork(edge_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True, activation=tf.nn.leaky_relu),
global_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True, activation=tf.nn.leaky_relu)),
blocks.NodeBlock(
node_model_fn=lambda: snt.nets.MLP(
[cluster_encoded_size-3], activate_final=True, activation=tf.nn.leaky_relu),
use_received_edges=True,
use_sent_edges=True,
use_nodes=True,
use_globals=True)
])
)
self._core_steps = core_steps
@property
def step(self):
if self._step is None:
raise ValueError("Need to set step idx variable. model.step = epoch")
return self._step
@step.setter
def step(self, value):
self._step = value
def _build(self, batch, *args, **kwargs):
graph = batch
# del img
# del c
positions = graph.nodes[:, :3]
for i in range(3, 10):
image_before, _ = histogramdd(positions[:, :2], bins=50, weights=graph.nodes[:, i])
image_before -= tf.reduce_min(image_before)
image_before /= tf.reduce_max(image_before)
tf.summary.image(f"{i}_xy_image_before", image_before[None, :, :, None], step=self.step)
tf.summary.scalar(f"properties{i}_std_before", tf.math.reduce_std(graph.nodes[:,i]), step=self.step)
encoded_graph = self.epd_encoder(graph, self._core_steps, positions)
encoded_graph = encoded_graph._replace(nodes=None, edges=None, receivers=None, senders=None) # only pass through globals for sure
number_of_nodes = 1000
decode_positions = tf.random.uniform(shape=(number_of_nodes, 3),
minval=tf.reduce_min(positions, axis=0),
maxval=tf.reduce_max(positions, axis=0))
# distance_matrix = util.pairwise_square_distance_matrix(positions, decode_positions, 1) # [10000, 1000]
# nn_index = tf.argmin(distance_matrix, axis=0) # [1000]
nn_index = efficient_nn_index(query_positions=decode_positions, positions=positions)
encoded_graph = encoded_graph._replace(nodes=decode_positions)
encoded_graph = fully_connect_graph_static(encoded_graph) # TODO: only works if batch_size=1, might need to use dynamic
n_edges = encoded_graph.edges.shape[0]
p = 2 * np.log(number_of_nodes) / number_of_nodes
encoded_graph = encoded_graph._replace(nodes=None)
decoded_graph = self.epd_decoder(encoded_graph, self._core_steps, decode_positions)
for i in range(7):
image_after, _ = histogramdd(decode_positions[:, :2], bins=50, weights=decoded_graph.nodes[:, i])
image_after -= tf.reduce_min(image_after)
image_after /= tf.reduce_max(image_after)
tf.summary.image(f"{i+3}_xy_image_after", image_after[None, :, :, None], step=self.step)
tf.summary.scalar(f"properties{i+3}_std_after", tf.math.reduce_std(decoded_graph.nodes[:,i]), step=self.step)
return decoded_graph, nn_index
| [
"tensorflow.einsum",
"sonnet.LayerNorm",
"tensorflow.reshape",
"tensorflow.math.reduce_std",
"sonnet.initializers.Zeros",
"graph_nets.utils_tf.fully_connect_graph_static",
"sonnet.nets.MLP",
"neural_deprojection.graph_net_utils.histogramdd",
"tensorflow.reduce_max",
"graph_nets.blocks.GlobalBlock"... | [((12, 73), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""/data/s2675544/git/neural_deprojection/"""'], {}), "(1, '/data/s2675544/git/neural_deprojection/')\n", (27, 73), False, 'import sys\n'), ((74, 135), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""/home/matthijs/git/neural_deprojection/"""'], {}), "(1, '/home/matthijs/git/neural_deprojection/')\n", (89, 135), False, 'import sys\n'), ((2261, 2297), 'sonnet.src.utils.assert_minimum_rank', 'utils.assert_minimum_rank', (['inputs', '(2)'], {}), '(inputs, 2)\n', (2286, 2297), False, 'from sonnet.src import utils, once\n'), ((3260, 3320), 'tensorflow.einsum', 'tf.einsum', (['"""ns,hso->nho"""', 'inputs', 'self.w'], {'optimize': '"""optimal"""'}), "('ns,hso->nho', inputs, self.w, optimize='optimal')\n", (3269, 3320), True, 'import tensorflow as tf\n'), ((4843, 4982), 'graph_nets.blocks.EdgeBlock', 'blocks.EdgeBlock', ([], {'edge_model_fn': 'edge_model_fn', 'use_edges': '(False)', 'use_receiver_nodes': '(True)', 'use_sender_nodes': '(True)', 'use_globals': 'use_globals'}), '(edge_model_fn=edge_model_fn, use_edges=False,\n use_receiver_nodes=True, use_sender_nodes=True, use_globals=use_globals)\n', (4859, 4982), False, 'from graph_nets import blocks\n'), ((5070, 5206), 'graph_nets.blocks.GlobalBlock', 'blocks.GlobalBlock', ([], {'global_model_fn': 'global_model_fn', 'use_edges': '(True)', 'use_nodes': '(False)', 'use_globals': 'use_globals', 'edges_reducer': 'reducer'}), '(global_model_fn=global_model_fn, use_edges=True,\n use_nodes=False, use_globals=use_globals, edges_reducer=reducer)\n', (5088, 5206), False, 'from graph_nets import blocks\n'), ((10233, 10272), 'sonnet.Linear', 'snt.Linear', ([], {'output_size': 'input_node_size'}), '(output_size=input_node_size)\n', (10243, 10272), True, 'import sonnet as snt\n'), ((10292, 10349), 'sonnet.nets.MLP', 'snt.nets.MLP', (['[32, input_node_size]'], {'activate_final': '(False)'}), '([32, input_node_size], activate_final=False)\n', (10304, 10349), True, 'import sonnet as snt\n'), ((10480, 10551), 'sonnet.LayerNorm', 'snt.LayerNorm', ([], {'axis': '(1)', 'eps': '(1e-06)', 'create_scale': '(True)', 'create_offset': '(True)'}), '(axis=1, eps=1e-06, create_scale=True, create_offset=True)\n', (10493, 10551), True, 'import sonnet as snt\n'), ((10570, 10641), 'sonnet.LayerNorm', 'snt.LayerNorm', ([], {'axis': '(1)', 'eps': '(1e-06)', 'create_scale': '(True)', 'create_offset': '(True)'}), '(axis=1, eps=1e-06, create_scale=True, create_offset=True)\n', (10583, 10641), True, 'import sonnet as snt\n'), ((10992, 11007), 'graph_nets.modules.SelfAttention', 'SelfAttention', ([], {}), '()\n', (11005, 11007), False, 'from graph_nets.modules import SelfAttention\n'), ((11508, 11598), 'tensorflow.reshape', 'tf.reshape', (['attended_latent.nodes', '(-1, self.num_heads * self.multi_head_output_size)'], {}), '(attended_latent.nodes, (-1, self.num_heads * self.\n multi_head_output_size))\n', (11518, 11598), True, 'import tensorflow as tf\n'), ((12489, 12608), 'graph_nets.blocks.NodeBlock', 'blocks.NodeBlock', (['node_model_fn'], {'use_received_edges': '(False)', 'use_sent_edges': '(False)', 'use_nodes': '(True)', 'use_globals': '(False)'}), '(node_model_fn, use_received_edges=False, use_sent_edges=\n False, use_nodes=True, use_globals=False)\n', (12505, 12608), False, 'from graph_nets import blocks\n'), ((13673, 13792), 'graph_nets.blocks.NodeBlock', 'blocks.NodeBlock', (['node_model_fn'], {'use_received_edges': '(False)', 'use_sent_edges': '(False)', 'use_nodes': '(False)', 'use_globals': '(True)'}), '(node_model_fn, use_received_edges=False, use_sent_edges=\n False, use_nodes=False, use_globals=True)\n', (13689, 13792), False, 'from graph_nets import blocks\n'), ((20907, 20980), 'neural_deprojection.graph_net_utils.efficient_nn_index', 'efficient_nn_index', ([], {'query_positions': 'decode_positions', 'positions': 'positions'}), '(query_positions=decode_positions, positions=positions)\n', (20925, 20980), False, 'from neural_deprojection.graph_net_utils import AbstractModule, histogramdd, efficient_nn_index\n'), ((21076, 21117), 'graph_nets.utils_tf.fully_connect_graph_static', 'fully_connect_graph_static', (['encoded_graph'], {}), '(encoded_graph)\n', (21102, 21117), False, 'from graph_nets.utils_tf import fully_connect_graph_static, concat\n'), ((2701, 2748), 'sonnet.initializers.TruncatedNormal', 'snt.initializers.TruncatedNormal', ([], {'stddev': 'stddev'}), '(stddev=stddev)\n', (2733, 2748), True, 'import sonnet as snt\n'), ((3416, 3439), 'tensorflow.add', 'tf.add', (['outputs', 'self.b'], {}), '(outputs, self.b)\n', (3422, 3439), True, 'import tensorflow as tf\n'), ((11864, 11921), 'tensorflow.concat', 'tf.concat', (['[positions, output_graph.nodes[:, 3:]]'], {'axis': '(1)'}), '([positions, output_graph.nodes[:, 3:]], axis=1)\n', (11873, 11921), True, 'import tensorflow as tf\n'), ((13090, 13141), 'tensorflow.concat', 'tf.concat', (['[positions, latent.nodes[:, 3:]]'], {'axis': '(1)'}), '([positions, latent.nodes[:, 3:]], axis=1)\n', (13099, 13141), True, 'import tensorflow as tf\n'), ((14237, 14288), 'tensorflow.concat', 'tf.concat', (['[positions, output.nodes[:, 3:]]'], {'axis': '(1)'}), '([positions, output.nodes[:, 3:]], axis=1)\n', (14246, 14288), True, 'import tensorflow as tf\n'), ((19819, 19884), 'neural_deprojection.graph_net_utils.histogramdd', 'histogramdd', (['positions[:, :2]'], {'bins': '(50)', 'weights': 'graph.nodes[:, i]'}), '(positions[:, :2], bins=50, weights=graph.nodes[:, i])\n', (19830, 19884), False, 'from neural_deprojection.graph_net_utils import AbstractModule, histogramdd, efficient_nn_index\n'), ((19913, 19940), 'tensorflow.reduce_min', 'tf.reduce_min', (['image_before'], {}), '(image_before)\n', (19926, 19940), True, 'import tensorflow as tf\n'), ((19969, 19996), 'tensorflow.reduce_max', 'tf.reduce_max', (['image_before'], {}), '(image_before)\n', (19982, 19996), True, 'import tensorflow as tf\n'), ((20009, 20101), 'tensorflow.summary.image', 'tf.summary.image', (['f"""{i}_xy_image_before"""', 'image_before[None, :, :, None]'], {'step': 'self.step'}), "(f'{i}_xy_image_before', image_before[None, :, :, None],\n step=self.step)\n", (20025, 20101), True, 'import tensorflow as tf\n'), ((21494, 21579), 'neural_deprojection.graph_net_utils.histogramdd', 'histogramdd', (['decode_positions[:, :2]'], {'bins': '(50)', 'weights': 'decoded_graph.nodes[:, i]'}), '(decode_positions[:, :2], bins=50, weights=decoded_graph.nodes[:, i]\n )\n', (21505, 21579), False, 'from neural_deprojection.graph_net_utils import AbstractModule, histogramdd, efficient_nn_index\n'), ((21602, 21628), 'tensorflow.reduce_min', 'tf.reduce_min', (['image_after'], {}), '(image_after)\n', (21615, 21628), True, 'import tensorflow as tf\n'), ((21656, 21682), 'tensorflow.reduce_max', 'tf.reduce_max', (['image_after'], {}), '(image_after)\n', (21669, 21682), True, 'import tensorflow as tf\n'), ((21695, 21789), 'tensorflow.summary.image', 'tf.summary.image', (['f"""{i + 3}_xy_image_after"""', 'image_after[None, :, :, None]'], {'step': 'self.step'}), "(f'{i + 3}_xy_image_after', image_after[None, :, :, None],\n step=self.step)\n", (21711, 21789), True, 'import tensorflow as tf\n'), ((1997, 2021), 'sonnet.initializers.Zeros', 'snt.initializers.Zeros', ([], {}), '()\n', (2019, 2021), True, 'import sonnet as snt\n'), ((2639, 2674), 'tensorflow.math.sqrt', 'tf.math.sqrt', (['(self.input_size * 1.0)'], {}), '(self.input_size * 1.0)\n', (2651, 2674), True, 'import tensorflow as tf\n'), ((10439, 10460), 'tensorflow.math.reduce_std', 'tf.math.reduce_std', (['x'], {}), '(x)\n', (10457, 10460), True, 'import tensorflow as tf\n'), ((14158, 14174), 'tensorflow.constant', 'tf.constant', (['(1.0)'], {}), '(1.0)\n', (14169, 14174), True, 'import tensorflow as tf\n'), ((20158, 20195), 'tensorflow.math.reduce_std', 'tf.math.reduce_std', (['graph.nodes[:, i]'], {}), '(graph.nodes[:, i])\n', (20176, 20195), True, 'import tensorflow as tf\n'), ((20587, 20619), 'tensorflow.reduce_min', 'tf.reduce_min', (['positions'], {'axis': '(0)'}), '(positions, axis=0)\n', (20600, 20619), True, 'import tensorflow as tf\n'), ((20673, 20705), 'tensorflow.reduce_max', 'tf.reduce_max', (['positions'], {'axis': '(0)'}), '(positions, axis=0)\n', (20686, 20705), True, 'import tensorflow as tf\n'), ((21244, 21267), 'numpy.log', 'np.log', (['number_of_nodes'], {}), '(number_of_nodes)\n', (21250, 21267), True, 'import numpy as np\n'), ((21844, 21889), 'tensorflow.math.reduce_std', 'tf.math.reduce_std', (['decoded_graph.nodes[:, i]'], {}), '(decoded_graph.nodes[:, i])\n', (21862, 21889), True, 'import tensorflow as tf\n'), ((7783, 7797), 'tensorflow.constant', 'tf.constant', (['(0)'], {}), '(0)\n', (7794, 7797), True, 'import tensorflow as tf\n'), ((9626, 9640), 'tensorflow.constant', 'tf.constant', (['(0)'], {}), '(0)\n', (9637, 9640), True, 'import tensorflow as tf\n'), ((10418, 10435), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['x'], {}), '(x)\n', (10432, 10435), True, 'import tensorflow as tf\n'), ((14067, 14116), 'tensorflow.constant', 'tf.constant', (['[positions.shape[0]]'], {'dtype': 'tf.int32'}), '([positions.shape[0]], dtype=tf.int32)\n', (14078, 14116), True, 'import tensorflow as tf\n'), ((16469, 16543), 'sonnet.nets.MLP', 'snt.nets.MLP', (['[mlp_size]'], {'activate_final': '(True)', 'activation': 'tf.nn.leaky_relu'}), '([mlp_size], activate_final=True, activation=tf.nn.leaky_relu)\n', (16481, 16543), True, 'import sonnet as snt\n'), ((16637, 16669), 'sonnet.Linear', 'snt.Linear', (['cluster_encoded_size'], {}), '(cluster_encoded_size)\n', (16647, 16669), True, 'import sonnet as snt\n'), ((16765, 16839), 'sonnet.nets.MLP', 'snt.nets.MLP', (['[mlp_size]'], {'activate_final': '(True)', 'activation': 'tf.nn.leaky_relu'}), '([mlp_size], activate_final=True, activation=tf.nn.leaky_relu)\n', (16777, 16839), True, 'import sonnet as snt\n'), ((17231, 17305), 'sonnet.nets.MLP', 'snt.nets.MLP', (['[mlp_size]'], {'activate_final': '(True)', 'activation': 'tf.nn.leaky_relu'}), '([mlp_size], activate_final=True, activation=tf.nn.leaky_relu)\n', (17243, 17305), True, 'import sonnet as snt\n'), ((17399, 17431), 'sonnet.Linear', 'snt.Linear', (['cluster_encoded_size'], {}), '(cluster_encoded_size)\n', (17409, 17431), True, 'import sonnet as snt\n'), ((17527, 17603), 'sonnet.nets.MLP', 'snt.nets.MLP', (['[32, 32, 64]'], {'activate_final': '(True)', 'activation': 'tf.nn.leaky_relu'}), '([32, 32, 64], activate_final=True, activation=tf.nn.leaky_relu)\n', (17539, 17603), True, 'import sonnet as snt\n'), ((17701, 17799), 'sonnet.nets.MLP', 'snt.nets.MLP', (['[32, 32, cluster_encoded_size]'], {'activate_final': '(True)', 'activation': 'tf.nn.leaky_relu'}), '([32, 32, cluster_encoded_size], activate_final=True,\n activation=tf.nn.leaky_relu)\n', (17713, 17799), True, 'import sonnet as snt\n'), ((18204, 18278), 'sonnet.nets.MLP', 'snt.nets.MLP', (['[mlp_size]'], {'activate_final': '(True)', 'activation': 'tf.nn.leaky_relu'}), '([mlp_size], activate_final=True, activation=tf.nn.leaky_relu)\n', (18216, 18278), True, 'import sonnet as snt\n'), ((18391, 18465), 'sonnet.nets.MLP', 'snt.nets.MLP', (['[mlp_size]'], {'activate_final': '(True)', 'activation': 'tf.nn.leaky_relu'}), '([mlp_size], activate_final=True, activation=tf.nn.leaky_relu)\n', (18403, 18465), True, 'import sonnet as snt\n'), ((18654, 18749), 'sonnet.nets.MLP', 'snt.nets.MLP', (['[cluster_encoded_size - 3]'], {'activate_final': '(True)', 'activation': 'tf.nn.leaky_relu'}), '([cluster_encoded_size - 3], activate_final=True, activation=tf\n .nn.leaky_relu)\n', (18666, 18749), True, 'import sonnet as snt\n')] |
from math import log,exp,sqrt
import numpy as np
# Child Adult
# Basic contact matrix C = Child [ C_cc C_ca ]
# Adult [ C_ac C_aa ]
#
#
# N_i = number of people in group i (i=c or a)
# C_{ij} = (number of people from group j encountered by an individual in group i)
# C_ca/C_ac = N_a/N_c
#
# I_c1 = children infected by children
# I_c2 = children infected by adults
# I_c1, I_c2, S_c, I_a, S_a represent absolute numbers of their respective populations
#
# dI_c1/dt = S_c*C_cc*(beta_cc1.I_c1+beta_cc2.I_c2)/N_c
# dI_c2/dt = S_c*C_ca*(beta_ca.I_a)/N_a
# dI_a/dt = S_a*(C_ac*(beta_ac1.I_c1+beta_ac2.I_c2)/N_c+C_aa*beta_aa.I_a/N_a)
#
# beta_ij = P(person from j is not isolating|infected)*P(encounter would cause an infection j->i | person from j is infected and not isolating)
# = P(person from j is not isolating|infected)*Transmissibility(j)*Susceptibility(i)
# (i=c,a; j=c1,c2,a)
#
# Leads to derived contact matrix
#
# c1 c2 a
# D = c1 [ C_cc.beta_cc1 C_cc.beta_cc2 ]
# c2 [ C_ca.beta_ca ]
# a [ C_ac.beta_ac1 C_ac.beta_ac2 C_aa.beta_aa ]
#
# so that
#
# [ F_c1 ] = D . [ I_c1/N_c ]
# [ F_c2 ] [ I_c2/N_c ]
# [ F_a ] [ I_a/N_a ]
#
# and
#
# d/dt [ I_c1 ] = [ S_c.F_c1 ] - gamma.[ I_c1 ]
# [ I_c2 ] [ S_c.F_c2 ] [ I_c2 ]
# [ I_a ] [ S_a.F_a ] [ I_a ]
#
# d/dt [ S_c ] = - [ S_c.(F_c1+F_c2) ]
# [ S_a ] [ S_a.F_a ]
# Order of indices is c,a or c1,c2,a
######################################################################################
# PARAMETERS
# https://explore-education-statistics.service.gov.uk/find-statistics/school-pupils-and-their-characteristics
N = [ 8.9e6, 57.8e6 ]
# Guesstimates (not to be relied on)
suscep = np.array([ 0.25, 0.25 ])
transm = np.array([ 0.15, 0.15, 0.25 ])
nonisolate = [ 0.5, 0.2, 0.5]# Probability of not isolating given infected
C = np.array([[ 8, 3],
[ -1, 3]], dtype=float)
C[1,0]=C[0,1]*N[0]/N[1]
beta=np.zeros((2,3))
for i in range(2):
for j in range(3):
beta[i,j]=transm[j]*suscep[i]*nonisolate[j]
# ONS infection survey, 18 Sep shows ~0.3% prevalence
I = np.array([ 0.0015*N[0], 0.0015*N[0], 0.003*N[1]])
S = np.array([ 0.85*N[0], 0.85*N[1]])
gamma = 0.1 # recovery rate
#######################################################################################
D = np.array([[C[0,0]*beta[0,0], C[0,0]*beta[0,1], 0],
[0, 0, C[0,1]*beta[0,2]],
[C[1,0]*beta[1,0], C[1,0]*beta[1,1], C[1,1]*beta[1,2]]])
NN=np.array([N[0], N[0], N[1]])
print('"Child1" means Children infected by children')
print('"Child2" means Children infected by adults')
print()
print("Susceptibilities (child, adult):",suscep)
print("Transmissibilities (child1, child2, adult):",transm)
print("Non-isolation probabilities (child1, child2, adult):",nonisolate)
print()
print("Simple contact matrix:")
print(C)
print()
print("Derived contact matrix:")
print(D)
print()
print("I_c1 = Children infected by children, as a proportion of all children")
print("I_c1 = Children infected by adults, as a proportion of all children")
print("I_a = Infected adults, as a proportion of all adults")
print()
print(" Day I_c1 %% I_c2 %% I_a %%")
subdiv=1000
delta=1/subdiv
days=60
for step in range(days*subdiv+1):
if step%subdiv==0: print("%4d"%(step//subdiv),' '.join("%10.6f"%(x*100) for x in I/NN))
F=np.matmul(D,I/NN)
T=[S[0], S[0], S[1]]*F
I+=delta*(T-gamma*I)
S-=delta*np.array([T[0]+T[1], T[2]])
| [
"numpy.zeros",
"numpy.array",
"numpy.matmul"
] | [((1950, 1972), 'numpy.array', 'np.array', (['[0.25, 0.25]'], {}), '([0.25, 0.25])\n', (1958, 1972), True, 'import numpy as np\n'), ((1984, 2012), 'numpy.array', 'np.array', (['[0.15, 0.15, 0.25]'], {}), '([0.15, 0.15, 0.25])\n', (1992, 2012), True, 'import numpy as np\n'), ((2095, 2135), 'numpy.array', 'np.array', (['[[8, 3], [-1, 3]]'], {'dtype': 'float'}), '([[8, 3], [-1, 3]], dtype=float)\n', (2103, 2135), True, 'import numpy as np\n'), ((2183, 2199), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (2191, 2199), True, 'import numpy as np\n'), ((2346, 2400), 'numpy.array', 'np.array', (['[0.0015 * N[0], 0.0015 * N[0], 0.003 * N[1]]'], {}), '([0.0015 * N[0], 0.0015 * N[0], 0.003 * N[1]])\n', (2354, 2400), True, 'import numpy as np\n'), ((2401, 2437), 'numpy.array', 'np.array', (['[0.85 * N[0], 0.85 * N[1]]'], {}), '([0.85 * N[0], 0.85 * N[1]])\n', (2409, 2437), True, 'import numpy as np\n'), ((2558, 2723), 'numpy.array', 'np.array', (['[[C[0, 0] * beta[0, 0], C[0, 0] * beta[0, 1], 0], [0, 0, C[0, 1] * beta[0, \n 2]], [C[1, 0] * beta[1, 0], C[1, 0] * beta[1, 1], C[1, 1] * beta[1, 2]]]'], {}), '([[C[0, 0] * beta[0, 0], C[0, 0] * beta[0, 1], 0], [0, 0, C[0, 1] *\n beta[0, 2]], [C[1, 0] * beta[1, 0], C[1, 0] * beta[1, 1], C[1, 1] *\n beta[1, 2]]])\n', (2566, 2723), True, 'import numpy as np\n'), ((2754, 2782), 'numpy.array', 'np.array', (['[N[0], N[0], N[1]]'], {}), '([N[0], N[0], N[1]])\n', (2762, 2782), True, 'import numpy as np\n'), ((3632, 3652), 'numpy.matmul', 'np.matmul', (['D', '(I / NN)'], {}), '(D, I / NN)\n', (3641, 3652), True, 'import numpy as np\n'), ((3709, 3738), 'numpy.array', 'np.array', (['[T[0] + T[1], T[2]]'], {}), '([T[0] + T[1], T[2]])\n', (3717, 3738), True, 'import numpy as np\n')] |
import numpy as np
import rowan
from numpy.linalg import norm
from ef.config.component import ConfigComponent
from ef.util.serializable_h5 import SerializableH5
__all__ = ['Shape', 'Box', 'Cylinder', 'Tube', 'Sphere', 'Cone']
class Shape(ConfigComponent, SerializableH5):
def visualize(self, visualizer, **kwargs):
raise NotImplementedError()
def are_positions_inside(self, positions):
raise NotImplementedError()
def generate_uniform_random_position(self, random_state):
return self.generate_uniform_random_posititons(random_state, 1)[0]
def generate_uniform_random_posititons(self, random_state, n):
raise NotImplementedError()
def rotation_from_z(vector):
"""
Find a quaternion that rotates z-axis into a given vector.
:param vector: Any non-zero 3-component vector
:return: Array of length 4 with the rotation quaternion
"""
cos2 = (vector / norm(vector))[2]
cos = np.sqrt((1 + cos2) / 2)
sin = np.sqrt((1 - cos2) / 2)
axis = np.cross((0, 0, 1), vector)
vector_component = (axis / norm(axis)) * sin
return np.concatenate(([cos], vector_component))
class Box(Shape):
def __init__(self, origin=(0, 0, 0), size=(1, 1, 1)):
self.origin = np.array(origin, np.float)
self.size = np.array(size, np.float)
def visualize(self, visualizer, **kwargs):
visualizer.draw_box(self.size, self.origin, **kwargs)
def are_positions_inside(self, positions):
return np.logical_and(np.all(positions >= self.origin, axis=-1),
np.all(positions <= self.origin + self.size, axis=-1))
def generate_uniform_random_posititons(self, random_state, n):
return random_state.uniform(self.origin, self.origin + self.size, (n, 3))
class Cylinder(Shape):
def __init__(self, start=(0, 0, 0), end=(1, 0, 0), radius=1):
self.start = np.array(start, np.float)
self.end = np.array(end, np.float)
self.r = float(radius)
self._rotation = rotation_from_z(self.end - self.start)
def visualize(self, visualizer, **kwargs):
visualizer.draw_cylinder(self.start, self.end, self.r, **kwargs)
def are_positions_inside(self, positions):
pointvec = positions - self.start
axisvec = self.end - self.start
axis = norm(axisvec)
unit_axisvec = axisvec / axis
# for one-point case, dot would return a scalar, so it's cast to array explicitly
projection = np.asarray(np.dot(pointvec, unit_axisvec))
perp_to_axis = norm(pointvec - unit_axisvec[np.newaxis] * projection[..., np.newaxis], axis=-1)
result = np.logical_and.reduce([0 <= projection, projection <= axis, perp_to_axis <= self.r])
return result
def generate_uniform_random_posititons(self, random_state, n):
r = np.sqrt(random_state.uniform(0.0, 1.0, n)) * self.r
phi = random_state.uniform(0.0, 2.0 * np.pi, n)
x = r * np.cos(phi)
y = r * np.sin(phi)
z = random_state.uniform(0.0, norm(self.end - self.start), n)
points = np.stack((x, y, z), -1)
return rowan.rotate(self._rotation, points) + self.start
class Tube(Shape):
def __init__(self, start=(0, 0, 0), end=(1, 0, 0), inner_radius=1, outer_radius=2):
self.start = np.array(start, np.float)
self.end = np.array(end, np.float)
self.r = float(inner_radius)
self.R = float(outer_radius)
self._rotation = rotation_from_z(self.end - self.start)
def visualize(self, visualizer, **kwargs):
visualizer.draw_tube(self.start, self.end, self.r, self.R, **kwargs)
def are_positions_inside(self, positions):
pointvec = positions - self.start
axisvec = self.end - self.start
axis = norm(axisvec)
unit_axisvec = axisvec / axis
# for one-point case, dot would return a scalar, so it's cast to array explicitly
projection = np.asarray(np.dot(pointvec, unit_axisvec))
perp_to_axis = norm(pointvec - unit_axisvec[np.newaxis] * projection[..., np.newaxis], axis=-1)
return np.logical_and.reduce(
[0 <= projection, projection <= axis, self.r <= perp_to_axis, perp_to_axis <= self.R])
def generate_uniform_random_posititons(self, random_state, n):
r = np.sqrt(random_state.uniform(self.r / self.R, 1.0, n)) * self.R
phi = random_state.uniform(0.0, 2.0 * np.pi, n)
x = r * np.cos(phi)
y = r * np.sin(phi)
z = random_state.uniform(0.0, norm(self.end - self.start), n)
points = np.stack((x, y, z), -1)
return rowan.rotate(self._rotation, points) + self.start
class Sphere(Shape):
def __init__(self, origin=(0, 0, 0), radius=1):
self.origin = np.array(origin)
self.r = float(radius)
def visualize(self, visualizer, **kwargs):
visualizer.draw_sphere(self.origin, self.r, **kwargs)
def are_positions_inside(self, positions):
return norm(positions - self.origin, axis=-1) <= self.r
def generate_uniform_random_posititons(self, random_state, n):
while True:
p = random_state.uniform(0, 1, (n * 2, 3)) * self.r + self.origin
p = p.compress(self.are_positions_inside(p), 0)
if len(p) > n:
return p[:n]
class Cone(Shape):
def __init__(self, start=(0, 0, 0, 1),
start_radii=(1, 2), end_radii=(3, 4)):
self.start = np.array(start, np.float)
self.start_radii = np.array(start_radii, np.float)
self.end_radii = np.array(end_radii, np.float)
def visualize(self, visualizer, **kwargs):
visualizer.draw_cone(self.start, self.end,
self.start_radii, self.end_radii, **kwargs)
# TODO: def are_positions_inside(self, point)
| [
"numpy.stack",
"rowan.rotate",
"numpy.cross",
"numpy.logical_and.reduce",
"numpy.all",
"numpy.sin",
"numpy.array",
"numpy.linalg.norm",
"numpy.cos",
"numpy.dot",
"numpy.concatenate",
"numpy.sqrt"
] | [((954, 977), 'numpy.sqrt', 'np.sqrt', (['((1 + cos2) / 2)'], {}), '((1 + cos2) / 2)\n', (961, 977), True, 'import numpy as np\n'), ((988, 1011), 'numpy.sqrt', 'np.sqrt', (['((1 - cos2) / 2)'], {}), '((1 - cos2) / 2)\n', (995, 1011), True, 'import numpy as np\n'), ((1023, 1050), 'numpy.cross', 'np.cross', (['(0, 0, 1)', 'vector'], {}), '((0, 0, 1), vector)\n', (1031, 1050), True, 'import numpy as np\n'), ((1111, 1152), 'numpy.concatenate', 'np.concatenate', (['([cos], vector_component)'], {}), '(([cos], vector_component))\n', (1125, 1152), True, 'import numpy as np\n'), ((1253, 1279), 'numpy.array', 'np.array', (['origin', 'np.float'], {}), '(origin, np.float)\n', (1261, 1279), True, 'import numpy as np\n'), ((1300, 1324), 'numpy.array', 'np.array', (['size', 'np.float'], {}), '(size, np.float)\n', (1308, 1324), True, 'import numpy as np\n'), ((1903, 1928), 'numpy.array', 'np.array', (['start', 'np.float'], {}), '(start, np.float)\n', (1911, 1928), True, 'import numpy as np\n'), ((1948, 1971), 'numpy.array', 'np.array', (['end', 'np.float'], {}), '(end, np.float)\n', (1956, 1971), True, 'import numpy as np\n'), ((2333, 2346), 'numpy.linalg.norm', 'norm', (['axisvec'], {}), '(axisvec)\n', (2337, 2346), False, 'from numpy.linalg import norm\n'), ((2562, 2647), 'numpy.linalg.norm', 'norm', (['(pointvec - unit_axisvec[np.newaxis] * projection[..., np.newaxis])'], {'axis': '(-1)'}), '(pointvec - unit_axisvec[np.newaxis] * projection[..., np.newaxis], axis=-1\n )\n', (2566, 2647), False, 'from numpy.linalg import norm\n'), ((2660, 2748), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['[0 <= projection, projection <= axis, perp_to_axis <= self.r]'], {}), '([0 <= projection, projection <= axis, perp_to_axis <=\n self.r])\n', (2681, 2748), True, 'import numpy as np\n'), ((3098, 3121), 'numpy.stack', 'np.stack', (['(x, y, z)', '(-1)'], {}), '((x, y, z), -1)\n', (3106, 3121), True, 'import numpy as np\n'), ((3317, 3342), 'numpy.array', 'np.array', (['start', 'np.float'], {}), '(start, np.float)\n', (3325, 3342), True, 'import numpy as np\n'), ((3362, 3385), 'numpy.array', 'np.array', (['end', 'np.float'], {}), '(end, np.float)\n', (3370, 3385), True, 'import numpy as np\n'), ((3794, 3807), 'numpy.linalg.norm', 'norm', (['axisvec'], {}), '(axisvec)\n', (3798, 3807), False, 'from numpy.linalg import norm\n'), ((4023, 4108), 'numpy.linalg.norm', 'norm', (['(pointvec - unit_axisvec[np.newaxis] * projection[..., np.newaxis])'], {'axis': '(-1)'}), '(pointvec - unit_axisvec[np.newaxis] * projection[..., np.newaxis], axis=-1\n )\n', (4027, 4108), False, 'from numpy.linalg import norm\n'), ((4119, 4231), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['[0 <= projection, projection <= axis, self.r <= perp_to_axis, perp_to_axis <=\n self.R]'], {}), '([0 <= projection, projection <= axis, self.r <=\n perp_to_axis, perp_to_axis <= self.R])\n', (4140, 4231), True, 'import numpy as np\n'), ((4584, 4607), 'numpy.stack', 'np.stack', (['(x, y, z)', '(-1)'], {}), '((x, y, z), -1)\n', (4592, 4607), True, 'import numpy as np\n'), ((4770, 4786), 'numpy.array', 'np.array', (['origin'], {}), '(origin)\n', (4778, 4786), True, 'import numpy as np\n'), ((5463, 5488), 'numpy.array', 'np.array', (['start', 'np.float'], {}), '(start, np.float)\n', (5471, 5488), True, 'import numpy as np\n'), ((5516, 5547), 'numpy.array', 'np.array', (['start_radii', 'np.float'], {}), '(start_radii, np.float)\n', (5524, 5547), True, 'import numpy as np\n'), ((5573, 5602), 'numpy.array', 'np.array', (['end_radii', 'np.float'], {}), '(end_radii, np.float)\n', (5581, 5602), True, 'import numpy as np\n'), ((927, 939), 'numpy.linalg.norm', 'norm', (['vector'], {}), '(vector)\n', (931, 939), False, 'from numpy.linalg import norm\n'), ((1082, 1092), 'numpy.linalg.norm', 'norm', (['axis'], {}), '(axis)\n', (1086, 1092), False, 'from numpy.linalg import norm\n'), ((1513, 1554), 'numpy.all', 'np.all', (['(positions >= self.origin)'], {'axis': '(-1)'}), '(positions >= self.origin, axis=-1)\n', (1519, 1554), True, 'import numpy as np\n'), ((1586, 1639), 'numpy.all', 'np.all', (['(positions <= self.origin + self.size)'], {'axis': '(-1)'}), '(positions <= self.origin + self.size, axis=-1)\n', (1592, 1639), True, 'import numpy as np\n'), ((2507, 2537), 'numpy.dot', 'np.dot', (['pointvec', 'unit_axisvec'], {}), '(pointvec, unit_axisvec)\n', (2513, 2537), True, 'import numpy as np\n'), ((2971, 2982), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (2977, 2982), True, 'import numpy as np\n'), ((2999, 3010), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (3005, 3010), True, 'import numpy as np\n'), ((3049, 3076), 'numpy.linalg.norm', 'norm', (['(self.end - self.start)'], {}), '(self.end - self.start)\n', (3053, 3076), False, 'from numpy.linalg import norm\n'), ((3137, 3173), 'rowan.rotate', 'rowan.rotate', (['self._rotation', 'points'], {}), '(self._rotation, points)\n', (3149, 3173), False, 'import rowan\n'), ((3968, 3998), 'numpy.dot', 'np.dot', (['pointvec', 'unit_axisvec'], {}), '(pointvec, unit_axisvec)\n', (3974, 3998), True, 'import numpy as np\n'), ((4457, 4468), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (4463, 4468), True, 'import numpy as np\n'), ((4485, 4496), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (4491, 4496), True, 'import numpy as np\n'), ((4535, 4562), 'numpy.linalg.norm', 'norm', (['(self.end - self.start)'], {}), '(self.end - self.start)\n', (4539, 4562), False, 'from numpy.linalg import norm\n'), ((4623, 4659), 'rowan.rotate', 'rowan.rotate', (['self._rotation', 'points'], {}), '(self._rotation, points)\n', (4635, 4659), False, 'import rowan\n'), ((4991, 5029), 'numpy.linalg.norm', 'norm', (['(positions - self.origin)'], {'axis': '(-1)'}), '(positions - self.origin, axis=-1)\n', (4995, 5029), False, 'from numpy.linalg import norm\n')] |
import numpy as np
l = list(range(10))
print(l)
# [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
print(l[4:8])
# [4, 5, 6, 7]
print(l[-5:-2])
# [5, 6, 7]
print(l[::-1])
# [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
a = np.arange(10)
print(a)
# [0 1 2 3 4 5 6 7 8 9]
print(a[4:8])
# [4 5 6 7]
print(a[-5:-2])
# [5 6 7]
print(a[::-1])
# [9 8 7 6 5 4 3 2 1 0]
a[3:6] = 100
print(a)
# [ 0 1 2 100 100 100 6 7 8 9]
a[3:6] = [100, 200, 300]
print(a)
# [ 0 1 2 100 200 300 6 7 8 9]
# a[3:6] = [100, 200, 300, 400]
# ValueError: cannot copy sequence with size 4 to array axis with dimension 3
a = np.arange(10)
print(a)
# [0 1 2 3 4 5 6 7 8 9]
print(a[2:8:2])
# [2 4 6]
a[2:8:2] = 100
print(a)
# [ 0 1 100 3 100 5 100 7 8 9]
a[2:8:2] = [100, 200, 300]
print(a)
# [ 0 1 100 3 200 5 300 7 8 9]
a = np.arange(12).reshape((3, 4))
print(a)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
print(a[1:, 1:3])
# [[ 5 6]
# [ 9 10]]
print(a[1:, :])
# [[ 4 5 6 7]
# [ 8 9 10 11]]
print(a[1:])
# [[ 4 5 6 7]
# [ 8 9 10 11]]
print(a[1])
# [4 5 6 7]
print(a[1].shape)
# (4,)
print(a[1:2])
# [[4 5 6 7]]
print(a[1:2].shape)
# (1, 4)
print(a[:, 1:3])
# [[ 1 2]
# [ 5 6]
# [ 9 10]]
print(a[:, 1])
# [1 5 9]
print(a[:, 1].shape)
# (3,)
print(a[:, 1:2])
# [[1]
# [5]
# [9]]
print(a[:, 1:2].shape)
# (3, 1)
a = np.arange(12).reshape((3, 4))
print(a)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
print(a[1:, 1:3])
# [[ 5 6]
# [ 9 10]]
a[1:, 1:3] = 100
print(a)
# [[ 0 1 2 3]
# [ 4 100 100 7]
# [ 8 100 100 11]]
a[1:, 1:3] = [100, 200]
print(a)
# [[ 0 1 2 3]
# [ 4 100 200 7]
# [ 8 100 200 11]]
a[1:, 1:3] = [[100, 200], [300, 400]]
print(a)
# [[ 0 1 2 3]
# [ 4 100 200 7]
# [ 8 300 400 11]]
a = np.arange(12).reshape((3, 4))
print(a)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
print(a[1:, ::2])
# [[ 4 6]
# [ 8 10]]
a[1:, ::2] = 100
print(a)
# [[ 0 1 2 3]
# [100 5 100 7]
# [100 9 100 11]]
a[1:, ::2] = [100, 200]
print(a)
# [[ 0 1 2 3]
# [100 5 200 7]
# [100 9 200 11]]
a[1:, ::2] = [[100, 200], [300, 400]]
print(a)
# [[ 0 1 2 3]
# [100 5 200 7]
# [300 9 400 11]]
a = np.arange(12).reshape((3, 4))
print(a)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
a_slice = a[1:, 1:3]
print(a_slice)
# [[ 5 6]
# [ 9 10]]
a_slice[0, 0] = 100
print(a_slice)
# [[100 6]
# [ 9 10]]
print(a)
# [[ 0 1 2 3]
# [ 4 100 6 7]
# [ 8 9 10 11]]
a = np.arange(12).reshape((3, 4))
print(a)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
a_slice_copy = a[1:, 1:3].copy()
print(a_slice_copy)
# [[ 5 6]
# [ 9 10]]
a_slice_copy[0, 0] = 100
print(a_slice_copy)
# [[100 6]
# [ 9 10]]
print(a)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
a = np.arange(12).reshape((3, 4))
print(a)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
print(a[[0, 2], 1:3])
# [[ 1 2]
# [ 9 10]]
a[[0, 2], 1:3] = 100
print(a)
# [[ 0 100 100 3]
# [ 4 5 6 7]
# [ 8 100 100 11]]
a[[0, 2], 1:3] = [100, 200]
print(a)
# [[ 0 100 200 3]
# [ 4 5 6 7]
# [ 8 100 200 11]]
a[[0, 2], 1:3] = [[100, 200], [300, 400]]
print(a)
# [[ 0 100 200 3]
# [ 4 5 6 7]
# [ 8 300 400 11]]
a_subset = a[[0, 2], 1:3]
print(a_subset)
# [[100 200]
# [300 400]]
a_subset[0, 0] = -1
print(a_subset)
# [[ -1 200]
# [300 400]]
print(a)
# [[ 0 100 200 3]
# [ 4 5 6 7]
# [ 8 300 400 11]]
| [
"numpy.arange"
] | [((195, 208), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (204, 208), True, 'import numpy as np\n'), ((598, 611), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (607, 611), True, 'import numpy as np\n'), ((827, 840), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (836, 840), True, 'import numpy as np\n'), ((1356, 1369), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (1365, 1369), True, 'import numpy as np\n'), ((1795, 1808), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (1804, 1808), True, 'import numpy as np\n'), ((2234, 2247), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (2243, 2247), True, 'import numpy as np\n'), ((2527, 2540), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (2536, 2540), True, 'import numpy as np\n'), ((2835, 2848), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (2844, 2848), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
# Create helix:
def make_helix(n):
theta_max = 8 * np.pi
theta = np.linspace(0, theta_max, n)
x, y, z = theta / (8 * np.pi), 0.5 * np.sin(theta) + 0.5, 0.5 * np.cos(theta) + 0.5
helix = np.stack((x, y, z))
return helix.T
def main():
# Attach 3D axis to the figure
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Define no. data points and create helix:
n = 100
data = make_helix(n)
with open('input-helix.txt', 'w') as f:
f.write("x\ty\tz\t# points describing a helix\n")
for point in data:
f.write(str(point[0]))
f.write('\t')
f.write(str(point[1]))
f.write('\t')
f.write(str(point[2]))
f.write('\n')
for i in range(n):
# ax.scatter(data[0][i], data[1][i], data[2][i])
ax.scatter(data[i][0], data[i][1], data[i][2])
ax.set_xlabel('X Axis')
ax.set_ylabel('Y Axis')
ax.set_zlabel('Z Axis')
plt.show()
if __name__ == '__main__':
main()
| [
"numpy.stack",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.linspace",
"numpy.cos"
] | [((167, 195), 'numpy.linspace', 'np.linspace', (['(0)', 'theta_max', 'n'], {}), '(0, theta_max, n)\n', (178, 195), True, 'import numpy as np\n'), ((297, 316), 'numpy.stack', 'np.stack', (['(x, y, z)'], {}), '((x, y, z))\n', (305, 316), True, 'import numpy as np\n'), ((396, 408), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (406, 408), True, 'import matplotlib.pyplot as plt\n'), ((1080, 1090), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1088, 1090), True, 'import matplotlib.pyplot as plt\n'), ((237, 250), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (243, 250), True, 'import numpy as np\n'), ((264, 277), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (270, 277), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from chainer import testing
from chainer.testing import attr
from chainercv.datasets import coco_instance_segmentation_label_names
from chainercv.datasets import COCOInstanceSegmentationDataset
from chainercv.utils import assert_is_instance_segmentation_dataset
try:
import pycocotools # NOQA
_available = True
except ImportError:
_available = False
def _create_paramters():
split_years = testing.product({
'split': ['train', 'val'],
'year': ['2014', '2017']})
split_years += [{'split': 'minival', 'year': '2014'},
{'split': 'valminusminival', 'year': '2014'}]
use_and_return_args = testing.product({
'use_crowded': [False, True],
'return_crowded': [False, True],
'return_area': [False, True]})
params = testing.product_dict(
split_years,
use_and_return_args)
return params
@testing.parameterize(*_create_paramters())
class TestCOCOInstanceSegmentationDataset(unittest.TestCase):
def setUp(self):
self.dataset = COCOInstanceSegmentationDataset(
split=self.split, year=self.year,
use_crowded=self.use_crowded, return_crowded=self.return_crowded,
return_area=self.return_area)
@attr.slow
@unittest.skipUnless(_available, 'pycocotools is not installed')
def test_coco_instance_segmentation_dataset(self):
assert_is_instance_segmentation_dataset(
self.dataset,
len(coco_instance_segmentation_label_names),
n_example=10)
if self.return_area:
for _ in range(10):
i = np.random.randint(0, len(self.dataset))
_, mask, _, area = self.dataset[i][:4]
self.assertIsInstance(area, np.ndarray)
self.assertEqual(area.dtype, np.float32)
self.assertEqual(area.shape, (mask.shape[0],))
if self.return_crowded:
for _ in range(10):
i = np.random.randint(0, len(self.dataset))
example = self.dataset[i]
crowded = example[-1]
mask = example[1]
self.assertIsInstance(crowded, np.ndarray)
self.assertEqual(crowded.dtype, np.bool)
self.assertEqual(crowded.shape, (mask.shape[0],))
if not self.use_crowded:
np.testing.assert_equal(crowded, 0)
testing.run_module(__name__, __file__)
| [
"chainer.testing.product",
"chainercv.datasets.COCOInstanceSegmentationDataset",
"unittest.skipUnless",
"numpy.testing.assert_equal",
"chainer.testing.run_module",
"chainer.testing.product_dict"
] | [((2448, 2486), 'chainer.testing.run_module', 'testing.run_module', (['__name__', '__file__'], {}), '(__name__, __file__)\n', (2466, 2486), False, 'from chainer import testing\n'), ((447, 517), 'chainer.testing.product', 'testing.product', (["{'split': ['train', 'val'], 'year': ['2014', '2017']}"], {}), "({'split': ['train', 'val'], 'year': ['2014', '2017']})\n", (462, 517), False, 'from chainer import testing\n'), ((685, 800), 'chainer.testing.product', 'testing.product', (["{'use_crowded': [False, True], 'return_crowded': [False, True],\n 'return_area': [False, True]}"], {}), "({'use_crowded': [False, True], 'return_crowded': [False, \n True], 'return_area': [False, True]})\n", (700, 800), False, 'from chainer import testing\n'), ((834, 888), 'chainer.testing.product_dict', 'testing.product_dict', (['split_years', 'use_and_return_args'], {}), '(split_years, use_and_return_args)\n', (854, 888), False, 'from chainer import testing\n'), ((1297, 1360), 'unittest.skipUnless', 'unittest.skipUnless', (['_available', '"""pycocotools is not installed"""'], {}), "(_available, 'pycocotools is not installed')\n", (1316, 1360), False, 'import unittest\n'), ((1077, 1246), 'chainercv.datasets.COCOInstanceSegmentationDataset', 'COCOInstanceSegmentationDataset', ([], {'split': 'self.split', 'year': 'self.year', 'use_crowded': 'self.use_crowded', 'return_crowded': 'self.return_crowded', 'return_area': 'self.return_area'}), '(split=self.split, year=self.year,\n use_crowded=self.use_crowded, return_crowded=self.return_crowded,\n return_area=self.return_area)\n', (1108, 1246), False, 'from chainercv.datasets import COCOInstanceSegmentationDataset\n'), ((2410, 2445), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['crowded', '(0)'], {}), '(crowded, 0)\n', (2433, 2445), True, 'import numpy as np\n')] |
import numpy as np
import copy
from profilehooks import profile
import cv2
import time
class Segmentor_grab:
def __init__(self, img):
self.img = copy.copy(img)
self.mask_color = (1, 255, 255)
@profile
def segment(self, rect):
start = time.time()
mask = np.zeros(self.img.shape[:2], np.uint8)
bgdModel = np.zeros((1, 65), np.float64)
fgdModel = np.zeros((1, 65), np.float64)
# rect = (165, 125, 200, 200)
cv2.grabCut(self.img, mask, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_RECT)
mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')
img = self.img * mask2[:, :, np.newaxis]
end = time.time()
return end - start, img
| [
"cv2.grabCut",
"numpy.zeros",
"copy.copy",
"time.time",
"numpy.where"
] | [((161, 175), 'copy.copy', 'copy.copy', (['img'], {}), '(img)\n', (170, 175), False, 'import copy\n'), ((275, 286), 'time.time', 'time.time', ([], {}), '()\n', (284, 286), False, 'import time\n'), ((302, 340), 'numpy.zeros', 'np.zeros', (['self.img.shape[:2]', 'np.uint8'], {}), '(self.img.shape[:2], np.uint8)\n', (310, 340), True, 'import numpy as np\n'), ((360, 389), 'numpy.zeros', 'np.zeros', (['(1, 65)', 'np.float64'], {}), '((1, 65), np.float64)\n', (368, 389), True, 'import numpy as np\n'), ((409, 438), 'numpy.zeros', 'np.zeros', (['(1, 65)', 'np.float64'], {}), '((1, 65), np.float64)\n', (417, 438), True, 'import numpy as np\n'), ((485, 564), 'cv2.grabCut', 'cv2.grabCut', (['self.img', 'mask', 'rect', 'bgdModel', 'fgdModel', '(5)', 'cv2.GC_INIT_WITH_RECT'], {}), '(self.img, mask, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_RECT)\n', (496, 564), False, 'import cv2\n'), ((702, 713), 'time.time', 'time.time', ([], {}), '()\n', (711, 713), False, 'import time\n'), ((581, 622), 'numpy.where', 'np.where', (['((mask == 2) | (mask == 0))', '(0)', '(1)'], {}), '((mask == 2) | (mask == 0), 0, 1)\n', (589, 622), True, 'import numpy as np\n')] |
#%%
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import scipy as sp
from scipy import stats
import matplotlib.patches as mpl_patches
#%%
#Read CSV file in the folder
data = pd.read_csv('chart1.csv')
data.to_csv('new_chart.csv', index=False)
print(type(max(data['Observed'])))
#%%
#Get to know header names:
print(list(data))
#Choose x and y axis depending on the headers
xaxis = input('Choose x axis: ')
xlabel = input('Input x axis label: ')
yaxis = input('Choose y axis: ')
ylabel = input('Input y axis label: ')
#Input title
title = input('Input plots\'s title: ')
#Choose show trendline or not
trendline = input('Do you want to plot the trendline (Yes/No)?: ')
#%%
# Change aspect ratop of graph
width = int(input('Change width aspect of the graph: '))
height = int(input('Change height aspect of the graph: '))
grid = input('With or without grid (True/False): ')
#%%
#Export file name
name = input("Save file as: ")
#Choose file type and dpi values:
print("Available file type: jpg, png, svg.")
filetype= input('Choose export file type: ')
print("Recommended dpi values: 300, 600, 1200.")
dpi = int(input('Choose desired dpi: '))
#%%
def make_scatter_plot():
fig, ax = plt.subplots(figsize=(width,height))
plt.scatter(data[xaxis],data[yaxis], s=2, c='r', marker='*')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.tight_layout()
plt.grid(grid)
#Add trendline if Yes
if trendline == 'Yes':
z = np.polyfit(data[xaxis], data[yaxis], 1)
p = np.poly1d(z)
#Calculate linear equation by scipy
slope, intercept, r_value, p_value, std_err = sp.stats.linregress(data['Observed'], data['Simulated'])
r = r_value**2
plt.plot(data[xaxis],p(data[xaxis]),"k-")
if z[1] >= 0:
#positioning trendline equation
# create a list with two empty handles (or more if needed)
handles = [mpl_patches.Rectangle((0, 0), 1, 1, fc="white", ec="white",
lw=0, alpha=0)] * 2
# create the corresponding number of labels (= the text you want to display)
labels = []
labels.append('$y=%.3fx%.3f$'%(z[0],z[1]))
labels.append('$R^{2}$ = ' + str('%.5f'%(r)))
# create the legend, supressing the blank space of the empty line symbol and the
# padding between symbol and label by setting handlelenght and handletextpad
plt.legend(handles, labels, loc='best', fontsize='small',
fancybox=True, framealpha=0.7,
handlelength=0, handletextpad=0)
elif z[1] < 0: #just to make the equation "prettier" if b is negative
#positioning trendline equation
# create a list with two empty handles (or more if needed)
handles = [mpl_patches.Rectangle((0, 0), 1, 1, fc="white", ec="white",
lw=0, alpha=0)] * 2
# create the corresponding number of labels (= the text you want to display)
labels = []
labels.append('$y=%.3fx%.3f$'%(z[0],z[1]))
labels.append('$R^{2}$ = ' + str('%.5f'%(r)))
# create the legend, supressing the blank space of the empty line symbol and the
# padding between symbol and label by setting handlelenght and handletextpad
plt.legend(handles, labels, loc='best', fontsize='small',
fancybox=True, framealpha=0.7,
handlelength=0, handletextpad=0)
fig.savefig(name + '.' + filetype, format=filetype, dpi=dpi)
make_scatter_plot()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.tight_layout",
"numpy.poly1d",
"numpy.polyfit",
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"matplotlib.patches.Rectangle",
"scipy.stats.linregress",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotli... | [((210, 235), 'pandas.read_csv', 'pd.read_csv', (['"""chart1.csv"""'], {}), "('chart1.csv')\n", (221, 235), True, 'import pandas as pd\n'), ((1276, 1313), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(width, height)'}), '(figsize=(width, height))\n', (1288, 1313), True, 'import matplotlib.pyplot as plt\n'), ((1318, 1379), 'matplotlib.pyplot.scatter', 'plt.scatter', (['data[xaxis]', 'data[yaxis]'], {'s': '(2)', 'c': '"""r"""', 'marker': '"""*"""'}), "(data[xaxis], data[yaxis], s=2, c='r', marker='*')\n", (1329, 1379), True, 'import matplotlib.pyplot as plt\n'), ((1384, 1402), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (1394, 1402), True, 'import matplotlib.pyplot as plt\n'), ((1408, 1426), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (1418, 1426), True, 'import matplotlib.pyplot as plt\n'), ((1432, 1448), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1441, 1448), True, 'import matplotlib.pyplot as plt\n'), ((1454, 1472), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1470, 1472), True, 'import matplotlib.pyplot as plt\n'), ((1478, 1492), 'matplotlib.pyplot.grid', 'plt.grid', (['grid'], {}), '(grid)\n', (1486, 1492), True, 'import matplotlib.pyplot as plt\n'), ((1567, 1606), 'numpy.polyfit', 'np.polyfit', (['data[xaxis]', 'data[yaxis]', '(1)'], {}), '(data[xaxis], data[yaxis], 1)\n', (1577, 1606), True, 'import numpy as np\n'), ((1620, 1632), 'numpy.poly1d', 'np.poly1d', (['z'], {}), '(z)\n', (1629, 1632), True, 'import numpy as np\n'), ((1743, 1799), 'scipy.stats.linregress', 'sp.stats.linregress', (["data['Observed']", "data['Simulated']"], {}), "(data['Observed'], data['Simulated'])\n", (1762, 1799), True, 'import scipy as sp\n'), ((2610, 2735), 'matplotlib.pyplot.legend', 'plt.legend', (['handles', 'labels'], {'loc': '"""best"""', 'fontsize': '"""small"""', 'fancybox': '(True)', 'framealpha': '(0.7)', 'handlelength': '(0)', 'handletextpad': '(0)'}), "(handles, labels, loc='best', fontsize='small', fancybox=True,\n framealpha=0.7, handlelength=0, handletextpad=0)\n", (2620, 2735), True, 'import matplotlib.pyplot as plt\n'), ((3560, 3685), 'matplotlib.pyplot.legend', 'plt.legend', (['handles', 'labels'], {'loc': '"""best"""', 'fontsize': '"""small"""', 'fancybox': '(True)', 'framealpha': '(0.7)', 'handlelength': '(0)', 'handletextpad': '(0)'}), "(handles, labels, loc='best', fontsize='small', fancybox=True,\n framealpha=0.7, handlelength=0, handletextpad=0)\n", (3570, 3685), True, 'import matplotlib.pyplot as plt\n'), ((2052, 2126), 'matplotlib.patches.Rectangle', 'mpl_patches.Rectangle', (['(0, 0)', '(1)', '(1)'], {'fc': '"""white"""', 'ec': '"""white"""', 'lw': '(0)', 'alpha': '(0)'}), "((0, 0), 1, 1, fc='white', ec='white', lw=0, alpha=0)\n", (2073, 2126), True, 'import matplotlib.patches as mpl_patches\n'), ((3002, 3076), 'matplotlib.patches.Rectangle', 'mpl_patches.Rectangle', (['(0, 0)', '(1)', '(1)'], {'fc': '"""white"""', 'ec': '"""white"""', 'lw': '(0)', 'alpha': '(0)'}), "((0, 0), 1, 1, fc='white', ec='white', lw=0, alpha=0)\n", (3023, 3076), True, 'import matplotlib.patches as mpl_patches\n')] |
#!/usr/bin/env python
import numpy as np
from mpi4py import MPI
# Communicator group
comm = MPI.COMM_WORLD
# Number of processes in the communicator group
size = comm.Get_size()
# Get the rank of the current process in the communicator group
rank = comm.Get_rank()
# Initialize array and table
row = np.zeros(size)
table = np.zeros((size, size))
# Each process computes the local values and fills its array
for i in range(size):
j = i * rank
row[i] = j
# Print array in each process
print(f'Process {rank} table before Allgather: {table}\n')
# Gathering occurs
comm.Allgather([row, MPI.INT], [table, MPI.INT])
# Print table in each process after gathering
print(f'Process {rank} table after Allgather: {table}\n')
| [
"numpy.zeros"
] | [((305, 319), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (313, 319), True, 'import numpy as np\n'), ((328, 350), 'numpy.zeros', 'np.zeros', (['(size, size)'], {}), '((size, size))\n', (336, 350), True, 'import numpy as np\n')] |
import SimpleITK as sitk
import numpy as np
import tensorflow as tf
from medpy.metric import hd,asd
from config.Defines import Get_Name_By_Index
from dirutil.helper import get_name_wo_suffix
from evaluate.metric import calculate_binary_dice, neg_jac, print_mean_and_std
from excelutil.output2excel import outpu2excel
from tfop import utils as util, layers as layer, losses as loss
from tfop.losses import restore_loss
from learn2reg.challenge_sampler import CHallengeSampler
from learn2reg.loss import NVISimilarity
from learn2reg.sampler import MMSampler
from model.base_model import BaseModelV2
from sitkImageIO.itkdatawriter import sitk_write_lab,sitk_write_images,sitk_write_labs
class MMReg_base(BaseModelV2):
def __init__(self,sess,args):
BaseModelV2.__init__(self, sess, args)
self.train_sampler = MMSampler(self.args, 'train')
self.validate_sampler = MMSampler(self.args, 'validate')
self.minibatch_size = self.args.batch_size
self.image_size = [self.args.image_size, self.args.image_size, self.args.image_size]
self.grid_ref = util.get_reference_grid(self.image_size)
if args.phase == 'train':
self.is_train = True
else:
self.is_train = False
self.build_network()
self.summary()
def warp_image(self, input_,ddf):
return util.resample_linear(input_, self.grid_ref+ ddf)
def _regnet(self, mv_img,mv_lab, fix_img,fix_lab, reuse=False,scop_name="shared_regnet"):
input_layer = tf.concat([layer.resize_volume(mv_img, self.image_size), fix_img], axis=4)
ddf_levels = [0, 1, 2, 3, 4]
self.num_channel_initial = self.args.num_channel_initial
nc = [int(self.num_channel_initial * (2 ** i)) for i in range(5)]
min_level = min(ddf_levels)
with tf.variable_scope(scop_name,reuse=reuse):
h0, hc0 = layer.downsample_resnet_block(self.is_train, input_layer, 2, nc[0], k_conv0=[7, 7, 7],name='local_down_0')
h1, hc1 = layer.downsample_resnet_block(self.is_train, h0, nc[0], nc[1], name='local_down_1')
h2, hc2 = layer.downsample_resnet_block(self.is_train, h1, nc[1], nc[2], name='local_down_2')
h3, hc3 = layer.downsample_resnet_block(self.is_train, h2, nc[2], nc[3], name='local_down_3')
hm = [layer.conv3_block(self.is_train, h3, nc[3], nc[4], name='local_deep_4')]
hm += [layer.upsample_resnet_block(self.is_train, hm[0], hc3, nc[4], nc[3],name='local_up_3')] if min_level < 4 else []
hm += [layer.upsample_resnet_block(self.is_train, hm[1], hc2, nc[3], nc[2],name='local_up_2')] if min_level < 3 else []
hm += [layer.upsample_resnet_block(self.is_train, hm[2], hc1, nc[2], nc[1],name='local_up_1')] if min_level < 2 else []
hm += [layer.upsample_resnet_block(self.is_train, hm[3], hc0, nc[1], nc[0],name='local_up_0')] if min_level < 1 else []
ddf_list = [layer.ddf_summand(hm[4 - idx], nc[idx], self.image_size, name='ddf1_sum_%d' % idx) for idx in ddf_levels]
ddf_list = tf.stack(ddf_list, axis=5)
ddf_MV_FIX = tf.reduce_sum(ddf_list, axis=5)
ddf_list2 = [layer.ddf_summand(hm[4 - idx], nc[idx], self.image_size, name='ddf2_sum_%d' % idx) for idx in ddf_levels]
ddf_list2 = tf.stack(ddf_list2, axis=5)
ddf_FIX_MV = tf.reduce_sum(ddf_list2, axis=5)
w_mv_img = self.warp_image(mv_img, ddf_MV_FIX)
w_mv_lab = self.warp_image(mv_lab, ddf_MV_FIX)
r_mv_img = self.warp_image(w_mv_img, ddf_FIX_MV)
w_fix_img = self.warp_image(fix_img, ddf_FIX_MV)
w_fix_lab = self.warp_image(fix_lab, ddf_FIX_MV)
r_fix_img = self.warp_image(w_fix_img, ddf_MV_FIX)
return ddf_MV_FIX,ddf_FIX_MV,w_mv_img,w_mv_lab,r_mv_img,w_fix_img,w_fix_lab,r_fix_img
def cal_nvi_loss(self,w_mv_img,i_fix_img,w_fix_img,i_mv_img):
nvi_loss_1 = self.multiScaleNVILoss(w_mv_img, i_fix_img)
nvi_loss_2 = self.multiScaleNVILoss(w_fix_img, i_mv_img)
nvi_loss = nvi_loss_1 + nvi_loss_2
return nvi_loss
def consis_loss(self,i_mv_img,r_mv_img,i_fix_img,r_fix_img):
consistent = (restore_loss(i_mv_img, r_mv_img) + restore_loss(i_fix_img, r_fix_img))
return consistent
def bend_loss(self,ddf_mv_f,ddf_f_mv):
# create loss
ddf1_bend = tf.reduce_mean(loss.local_displacement_energy(ddf_mv_f, 'bending', 1))
ddf2_bend = tf.reduce_mean(loss.local_displacement_energy(ddf_f_mv, 'bending', 1))
ddf_bend = (ddf1_bend + ddf2_bend)
return ddf_bend
def multiScaleNVILoss(self, warped_mv1_img, input_FIX_image):
grad_loss=0
scales=[1,2,3]
for s in scales :
grad_loss=grad_loss+NVISimilarity(warped_mv1_img, input_FIX_image, s)
return grad_loss/len(scales)
def train(self):
self.is_train=True
init_op = tf.global_variables_initializer()
self.sess.run(init_op)
self.writer = tf.summary.FileWriter(self.args.log_dir, self.sess.graph)
self.saver = tf.train.Saver()
for glob_step in range(self.args.iteration):
mv_img1s, mv_lab1s, mv_img2s, mv_lab2s, fix_imgs, fix_labs=self.train_sampler.next_sample()
trainFeed = self.create_feed_dict(mv_img1s, mv_lab1s, mv_img2s, mv_lab2s, fix_imgs, fix_labs, is_aug=True)
_,nv_loss,cyc_consis,bend,multi_consis,summary=self.sess.run([self.train_op, self.nvi_loss, self.cycle_consistent, self.ddf_bend, self.multi_consis, self.summary_all], feed_dict=trainFeed)
self.writer.add_summary(summary,glob_step)
self.logger.debug("step %d: nv_loss=%f,cyc_consis=%f,bend=%f,multi_consis=%f"%(glob_step,nv_loss,cyc_consis,bend,multi_consis))
if np.mod(glob_step, self.args.print_freq) == 1:
# self.sample(glob_step)
self.validate_set()
if np.mod(glob_step, self.args.save_freq) == 1:
self.save(self.args.checkpoint_dir, glob_step)
def summary(self):
tf.summary.scalar("nvi_loss_1",self.nvi1)
tf.summary.scalar("nvi_loss_2", self.nvi2)
tf.summary.scalar("ddf1_bend", self.bend1)
tf.summary.scalar("ddf2_bend",self.bend2)
tf.summary.scalar('multi_consis', self.multi_consis)
tf.summary.scalar("cycle_consis", self.cycle_consistent)
# tf.summary.scalar("anti_folding_loss", self.anti_folding_loss)
tf.summary.image("fix_img", tf.expand_dims(self.i_fix_img[:, :, 48, :, 0], -1))
tf.summary.image("warped_fix_img", tf.expand_dims(self.w_fix1_img[:, :, 48, :, 0], -1))
tf.summary.image("mv1_img", tf.expand_dims(self.i_mv1_img[:, :, 48, :, 0], -1))
tf.summary.image("warped_mv1_img", tf.expand_dims(self.w_mv1_img[:, :, 48, :, 0], -1))
tf.summary.image("mv2_img", tf.expand_dims(self.i_mv2_img[:, :, 48, :, 0], -1))
tf.summary.image("warped_mv2_img", tf.expand_dims(self.w_mv2_img[:, :, 48, :, 0], -1))
self.summary_all=tf.summary.merge_all()
def sample(self, iter,write_img=False):
p_img_mv1s,p_lab_mv1s, p_img_mv2s,p_lab_mv2s,p_img_fixs, p_lab_fixs = self.validate_sampler.get_data_path()
img_mv1s, lab_mv1s, img_mv2s, lab_mv2s, img_fixs, lab_fixs = self.validate_sampler.get_batch_data(p_img_mv1s,p_lab_mv1s, p_img_mv2s,p_lab_mv2s,p_img_fixs, p_lab_fixs)
trainFeed = self.create_feed_dict(img_mv1s, lab_mv1s, img_mv2s, lab_mv2s, img_fixs, lab_fixs,is_aug=False)
warped_mv1_lab,warped_mv2_lab,input_mv_lab1,input_mv_lab2,input_fix_lab=self.sess.run([self.w_mv1_lab, self.w_mv2_lab, self.i_mv1_lab,self.i_mv2_lab, self.i_fix_lab], feed_dict=trainFeed)
if write_img:
sitk_write_labs(warped_mv1_lab, None, self.args.sample_dir, '%d_warped_mv1_lab' % (iter))
sitk_write_labs(warped_mv1_lab, None, self.args.sample_dir, '%d_warped_mv2_lab' % (iter))
sitk_write_labs(input_fix_lab, None, self.args.sample_dir, '%d_fixe_lab' % (iter))
warped_mv1_img, warped_mv2_img, input_fix_img,input_mv1_img,input_mv2_img ,i_mv1_lab,i_mv2_lab= self.sess.run([self.w_mv1_img, self.w_mv2_img, self.i_fix_img,self.i_mv1_img,self.i_mv2_img,self.i_mv1_lab,self.i_mv2_lab], feed_dict=trainFeed)
sitk_write_images(warped_mv1_img, None, self.args.sample_dir, '%d_warped_mv1_img' % (iter))
sitk_write_images(input_mv1_img, None, self.args.sample_dir, '%d_input_mv1_img' % (iter))
sitk_write_labs(i_mv1_lab, None, self.args.sample_dir, '%d_input_mv1_lab' % (iter))
sitk_write_images(warped_mv2_img, None, self.args.sample_dir, '%d_warped_mv2_img' % (iter))
sitk_write_images(input_mv2_img, None, self.args.sample_dir, '%d_input_mv2_img' % (iter))
sitk_write_labs(i_mv2_lab, None, self.args.sample_dir, '%d_input_mv2_lab' % (iter))
sitk_write_images(input_fix_img, None, self.args.sample_dir, '%d_fixe_img' % (iter))
dice_before_reg1 = calculate_binary_dice(input_mv_lab1, input_fix_lab)
dice_before_reg2 = calculate_binary_dice(input_mv_lab2, input_fix_lab)
warp_mv1_dice=calculate_binary_dice(warped_mv1_lab, input_fix_lab)
warp_mv2_dice=calculate_binary_dice(warped_mv2_lab, input_fix_lab)
para=sitk.ReadImage(p_lab_fixs[0])
mv1_hd=asd(np.squeeze(warped_mv1_lab[0,...]),np.squeeze(input_fix_lab[0,...]),voxelspacing=para.GetSpacing())
mv2_hd=asd(np.squeeze(warped_mv2_lab[0,...]),np.squeeze(input_mv_lab1[0,...]),voxelspacing=para.GetSpacing())
ddf_mv1_f,ddf_mv2_f=self.sess.run([self.ddf_mv1_f, self.ddf_f_mv1], feed_dict=trainFeed)
_,_,neg_ddf_mv1_f=neg_jac(ddf_mv1_f[0,...])
_,_,neg_ddf_mv2_f=neg_jac(ddf_mv2_f[0,...])
self.logger.debug("test_step %d: before_reg_dice=%f, mv1_dice =%f , mv2_dice=%f, mv1_hd=%f, mv2_hd=%f neg_jac %d %d"%(iter,dice_before_reg1,warp_mv1_dice,warp_mv2_dice,mv1_hd,mv2_hd,neg_ddf_mv1_f,neg_ddf_mv2_f))
return dice_before_reg1,dice_before_reg2,warp_mv1_dice,warp_mv2_dice,mv1_hd,mv2_hd,neg_ddf_mv1_f,neg_ddf_mv2_f
def validate(self):
self.is_train=False
init_op = tf.global_variables_initializer()
self.saver = tf.train.Saver()
self.sess.run(init_op)
if self.load(self.args.checkpoint_dir):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
# res={'mv1_dice':[],'mv1_hd':[],'mv2_dice':[],'mv2_hd':[],'neg_ddf1':[],'neg_ddf2':[]}
self.validate_set(True)
def validate_set(self ,write_img=False):
res={'mv1_dice':[],'mv1_asd':[],'mv2_dice':[],'mv2_asd':[],'bf_reg1':[],'bf_reg2':[]}
for i in range(self.validate_sampler.nb_pairs):
_bf_reg1,_bf_reg2,_mv1_dice, _mv2_dice, _mv1_hd, _mv2_hd, _neg_ddf1, _neg_ddf2 = self.sample(i,write_img)
res["mv1_dice"].append(_mv1_dice)
res["mv2_dice"].append(_mv2_dice)
res["mv1_asd"].append(_mv1_hd)
res["mv2_asd"].append(_mv2_hd)
res["bf_reg1"].append(_bf_reg1)
res["bf_reg2"].append(_bf_reg2)
# res["neg_ddf1"].append(_neg_ddf1)
# res["neg_ddf2"].append(_neg_ddf2)
print(Get_Name_By_Index(self.args.component))
print("=============%s================" % (self.args.mode))
for itr in ['mv1_dice','mv2_dice','mv1_asd','mv2_asd','bf_reg1','bf_reg2']:
print(itr)
outpu2excel(self.args.res_excel, self.args.MODEL_ID + "_" + itr, res[itr])
print_mean_and_std(res[itr], itr)
def test(self):
self.is_train = False
init_op = tf.global_variables_initializer()
self.saver = tf.train.Saver()
self.sess.run(init_op)
if self.load(self.args.checkpoint_dir):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
csample = CHallengeSampler(self.args,self.is_train)
for atlas_ind in range(csample.len_mv):
for tgt_ind in range(csample.len_fix):
fix_imgs, fix_labs,mv_imgs,mv_labs=csample.get_batch_data([atlas_ind],[tgt_ind])
trainFeed = self.create_feed_dict(fix_imgs, fix_labs, mv_imgs, mv_labs,is_aug=False)
warp_mv_img, warp_mv_label = self.sess.run([self.w_mv1_img, self.warped_MV_label], feed_dict=trainFeed)
p_ata=csample.img_mv[atlas_ind]
p_tgt=csample.img_fix[tgt_ind]
outputdir= self.args.test_dir+"/atlas_%s/"%(get_name_wo_suffix(p_ata))
name=get_name_wo_suffix(p_tgt).replace('image','label')
sitk_write_lab(warp_mv_label[0,...],sitk.ReadImage(p_tgt),outputdir,name)
def create_feed_dict(self, mv_img1s, mv_lab1s, mv_img2s, mv_lab2s, fix_imgs, fix_labs, is_aug=False):
trainFeed = {self.ph_mv_img1: mv_img1s,
self.ph_mv_lab1: mv_lab1s,
self.ph_mv_img2: mv_img2s,
self.ph_mv_lab2: mv_lab2s,
self.ph_fix_img: fix_imgs,
self.ph_fix_lab: fix_labs,
self.ph_fixed_affine: util.random_transform_generator(self.args.batch_size),
self.ph_moving_affine1: util.random_transform_generator(self.args.batch_size, 0.1),
self.ph_moving_affine2: util.random_transform_generator(self.args.batch_size, 0.1),
}
if is_aug==True:
pass
else:
trainFeed = {self.ph_mv_img1: mv_img1s,
self.ph_mv_lab1: mv_lab1s,
self.ph_mv_img2: mv_img2s,
self.ph_mv_lab2: mv_lab2s,
self.ph_fix_img: fix_imgs,
self.ph_fix_lab: fix_labs,
self.ph_fixed_affine: util.initial_transform_generator(self.args.batch_size),
self.ph_moving_affine1: util.initial_transform_generator(self.args.batch_size),
self.ph_moving_affine2: util.initial_transform_generator(self.args.batch_size),
}
return trainFeed
class MMReg(MMReg_base):
def build_network(self):
self.global_step = tf.Variable(0, trainable=False)
self.learning_rate = tf.train.exponential_decay(self.args.lr, self.global_step, self.args.decay_freq, 0.96,staircase=True)
# input
self.ph_mv_img1 = tf.placeholder(tf.float32, [self.args.batch_size] + self.image_size + [1])
self.ph_mv_lab1 = tf.placeholder(tf.float32, [self.args.batch_size] + self.image_size + [1])
self.ph_mv_img2 = tf.placeholder(tf.float32, [self.args.batch_size] + self.image_size + [1])
self.ph_mv_lab2 = tf.placeholder(tf.float32, [self.args.batch_size] + self.image_size + [1])
self.ph_fix_img = tf.placeholder(tf.float32, [self.args.batch_size] + self.image_size + [1])
self.ph_fix_lab = tf.placeholder(tf.float32, [self.args.batch_size] + self.image_size + [1])
self.ph_moving_affine1 = tf.placeholder(tf.float32, [self.args.batch_size] + [1, 12]) # 数据进行augment,4x4矩阵,但是最后四个参数为0001,所以一共12个参数
self.ph_moving_affine2 = tf.placeholder(tf.float32, [self.args.batch_size] + [1, 12]) # 数据进行augment,4x4矩阵,但是最后四个参数为0001,所以一共12个参数
self.ph_fixed_affine = tf.placeholder(tf.float32, [self.args.batch_size] + [1,12])
#data augmentation
self.i_mv1_img, self.i_mv1_lab=util.augment_3Ddata_by_affine(self.ph_mv_img1, self.ph_mv_lab1, self.ph_moving_affine1)
self.i_mv2_img, self.i_mv2_lab=util.augment_3Ddata_by_affine(self.ph_mv_img2, self.ph_mv_lab2, self.ph_moving_affine2)
self.i_fix_img, self.i_fix_lab=util.augment_3Ddata_by_affine(self.ph_fix_img, self.ph_fix_lab, self.ph_fixed_affine)
self.ddf_mv1_f, self.ddf_f_mv1, self.w_mv1_img,self.w_mv1_lab ,self.r_mv1_img, self.w_fix1_img,self.w_fix1_lab, self.r_fix1_img=self._regnet(self.i_mv1_img,self.i_mv1_lab,self.i_fix_img,self.i_fix_lab,scop_name="regA")
self.ddf_mv2_f, self.ddf_f_mv2, self.w_mv2_img,self.w_mv2_lab ,self.r_mv2_img, self.w_fix2_img,self.w_fix2_lab, self.r_fix2_img=self._regnet(self.i_mv2_img,self.i_mv2_lab,self.i_fix_img,self.i_fix_lab,scop_name='reg_b')
self.bend1=self.bend_loss(self.ddf_f_mv1,self.ddf_mv1_f)
self.bend2=self.bend_loss(self.ddf_f_mv2,self.ddf_mv2_f)
self.ddf_bend=self.bend1+self.bend2
self.cyc_consis1=self.consis_loss(self.i_mv1_img, self.r_mv1_img, self.i_fix_img, self.r_fix1_img)
self.cyc_consis2=self.consis_loss(self.i_mv2_img, self.r_mv2_img, self.i_fix_img, self.r_fix2_img)
self.cycle_consistent = self.cyc_consis1 + self.cyc_consis2
'''
#这个和后面的nvil+nvi2重复,因为nvi1会让w_mv1_img和i_fix_img相同,而nvi2会让w_mv2_img和i_fix_img相同.
等效于w_mv1_img==w_mv2_img
'''
# self.consis=restore_loss(self.w_mv1_img, self.w_mv2_img)
# self.multi_consis=tf.reduce_mean(loss.multi_scale_loss(self.w_mv1_lab, self.w_mv2_lab, 'dice', [0, 1, 2, 4]))
_warp_mv1_mv2=self.warp_image(self.w_mv1_img,self.ddf_f_mv2)
_warp_mv2_mv1=self.warp_image(self.w_mv2_img,self.ddf_f_mv1)
self.multi_consis = self.cal_nvi_loss(self.i_mv1_img, _warp_mv2_mv1, self.i_mv2_img, _warp_mv1_mv2)
self.nvi1=self.cal_nvi_loss(self.w_mv1_img, self.i_fix_img, self.w_fix1_img, self.i_mv1_img)
self.nvi2=self.cal_nvi_loss(self.w_mv2_img, self.i_fix_img, self.w_fix2_img, self.i_mv2_img)
self.nvi_loss= self.nvi1 + self.nvi2
# self.anti_folding_loss = self.args.lambda_anti* (loss.anti_folding(self.ddf_mv1_f) + loss.anti_folding(self.ddf_f_mv1))
self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(
self.nvi_loss
+ self.args.lambda_bend*self.ddf_bend
+self.args.lambda_cycle_consis*self.cycle_consistent
+self.args.lambda_multi_consis*self.multi_consis,
global_step=self.global_step)
self.logger.debug("build network finish")
| [
"tensorflow.reduce_sum",
"tfop.losses.local_displacement_energy",
"excelutil.output2excel.outpu2excel",
"tfop.layers.upsample_resnet_block",
"learn2reg.loss.NVISimilarity",
"tensorflow.Variable",
"evaluate.metric.print_mean_and_std",
"tfop.utils.random_transform_generator",
"tfop.layers.ddf_summand"... | [((760, 798), 'model.base_model.BaseModelV2.__init__', 'BaseModelV2.__init__', (['self', 'sess', 'args'], {}), '(self, sess, args)\n', (780, 798), False, 'from model.base_model import BaseModelV2\n'), ((829, 858), 'learn2reg.sampler.MMSampler', 'MMSampler', (['self.args', '"""train"""'], {}), "(self.args, 'train')\n", (838, 858), False, 'from learn2reg.sampler import MMSampler\n'), ((891, 923), 'learn2reg.sampler.MMSampler', 'MMSampler', (['self.args', '"""validate"""'], {}), "(self.args, 'validate')\n", (900, 923), False, 'from learn2reg.sampler import MMSampler\n'), ((1092, 1132), 'tfop.utils.get_reference_grid', 'util.get_reference_grid', (['self.image_size'], {}), '(self.image_size)\n', (1115, 1132), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((1353, 1402), 'tfop.utils.resample_linear', 'util.resample_linear', (['input_', '(self.grid_ref + ddf)'], {}), '(input_, self.grid_ref + ddf)\n', (1373, 1402), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((4929, 4962), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4960, 4962), True, 'import tensorflow as tf\n'), ((5016, 5073), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['self.args.log_dir', 'self.sess.graph'], {}), '(self.args.log_dir, self.sess.graph)\n', (5037, 5073), True, 'import tensorflow as tf\n'), ((5095, 5111), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5109, 5111), True, 'import tensorflow as tf\n'), ((6079, 6121), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""nvi_loss_1"""', 'self.nvi1'], {}), "('nvi_loss_1', self.nvi1)\n", (6096, 6121), True, 'import tensorflow as tf\n'), ((6129, 6171), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""nvi_loss_2"""', 'self.nvi2'], {}), "('nvi_loss_2', self.nvi2)\n", (6146, 6171), True, 'import tensorflow as tf\n'), ((6180, 6222), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""ddf1_bend"""', 'self.bend1'], {}), "('ddf1_bend', self.bend1)\n", (6197, 6222), True, 'import tensorflow as tf\n'), ((6231, 6273), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""ddf2_bend"""', 'self.bend2'], {}), "('ddf2_bend', self.bend2)\n", (6248, 6273), True, 'import tensorflow as tf\n'), ((6281, 6333), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""multi_consis"""', 'self.multi_consis'], {}), "('multi_consis', self.multi_consis)\n", (6298, 6333), True, 'import tensorflow as tf\n'), ((6342, 6398), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""cycle_consis"""', 'self.cycle_consistent'], {}), "('cycle_consis', self.cycle_consistent)\n", (6359, 6398), True, 'import tensorflow as tf\n'), ((7047, 7069), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (7067, 7069), True, 'import tensorflow as tf\n'), ((9026, 9077), 'evaluate.metric.calculate_binary_dice', 'calculate_binary_dice', (['input_mv_lab1', 'input_fix_lab'], {}), '(input_mv_lab1, input_fix_lab)\n', (9047, 9077), False, 'from evaluate.metric import calculate_binary_dice, neg_jac, print_mean_and_std\n'), ((9105, 9156), 'evaluate.metric.calculate_binary_dice', 'calculate_binary_dice', (['input_mv_lab2', 'input_fix_lab'], {}), '(input_mv_lab2, input_fix_lab)\n', (9126, 9156), False, 'from evaluate.metric import calculate_binary_dice, neg_jac, print_mean_and_std\n'), ((9179, 9231), 'evaluate.metric.calculate_binary_dice', 'calculate_binary_dice', (['warped_mv1_lab', 'input_fix_lab'], {}), '(warped_mv1_lab, input_fix_lab)\n', (9200, 9231), False, 'from evaluate.metric import calculate_binary_dice, neg_jac, print_mean_and_std\n'), ((9254, 9306), 'evaluate.metric.calculate_binary_dice', 'calculate_binary_dice', (['warped_mv2_lab', 'input_fix_lab'], {}), '(warped_mv2_lab, input_fix_lab)\n', (9275, 9306), False, 'from evaluate.metric import calculate_binary_dice, neg_jac, print_mean_and_std\n'), ((9321, 9350), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['p_lab_fixs[0]'], {}), '(p_lab_fixs[0])\n', (9335, 9350), True, 'import SimpleITK as sitk\n'), ((9711, 9737), 'evaluate.metric.neg_jac', 'neg_jac', (['ddf_mv1_f[0, ...]'], {}), '(ddf_mv1_f[0, ...])\n', (9718, 9737), False, 'from evaluate.metric import calculate_binary_dice, neg_jac, print_mean_and_std\n'), ((9763, 9789), 'evaluate.metric.neg_jac', 'neg_jac', (['ddf_mv2_f[0, ...]'], {}), '(ddf_mv2_f[0, ...])\n', (9770, 9789), False, 'from evaluate.metric import calculate_binary_dice, neg_jac, print_mean_and_std\n'), ((10199, 10232), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (10230, 10232), True, 'import tensorflow as tf\n'), ((10254, 10270), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (10268, 10270), True, 'import tensorflow as tf\n'), ((11681, 11714), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (11712, 11714), True, 'import tensorflow as tf\n'), ((11736, 11752), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (11750, 11752), True, 'import tensorflow as tf\n'), ((11944, 11986), 'learn2reg.challenge_sampler.CHallengeSampler', 'CHallengeSampler', (['self.args', 'self.is_train'], {}), '(self.args, self.is_train)\n', (11960, 11986), False, 'from learn2reg.challenge_sampler import CHallengeSampler\n'), ((14292, 14323), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (14303, 14323), True, 'import tensorflow as tf\n'), ((14353, 14460), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['self.args.lr', 'self.global_step', 'self.args.decay_freq', '(0.96)'], {'staircase': '(True)'}), '(self.args.lr, self.global_step, self.args.\n decay_freq, 0.96, staircase=True)\n', (14379, 14460), True, 'import tensorflow as tf\n'), ((14498, 14572), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '([self.args.batch_size] + self.image_size + [1])'], {}), '(tf.float32, [self.args.batch_size] + self.image_size + [1])\n', (14512, 14572), True, 'import tensorflow as tf\n'), ((14599, 14673), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '([self.args.batch_size] + self.image_size + [1])'], {}), '(tf.float32, [self.args.batch_size] + self.image_size + [1])\n', (14613, 14673), True, 'import tensorflow as tf\n'), ((14700, 14774), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '([self.args.batch_size] + self.image_size + [1])'], {}), '(tf.float32, [self.args.batch_size] + self.image_size + [1])\n', (14714, 14774), True, 'import tensorflow as tf\n'), ((14801, 14875), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '([self.args.batch_size] + self.image_size + [1])'], {}), '(tf.float32, [self.args.batch_size] + self.image_size + [1])\n', (14815, 14875), True, 'import tensorflow as tf\n'), ((14902, 14976), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '([self.args.batch_size] + self.image_size + [1])'], {}), '(tf.float32, [self.args.batch_size] + self.image_size + [1])\n', (14916, 14976), True, 'import tensorflow as tf\n'), ((15003, 15077), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '([self.args.batch_size] + self.image_size + [1])'], {}), '(tf.float32, [self.args.batch_size] + self.image_size + [1])\n', (15017, 15077), True, 'import tensorflow as tf\n'), ((15112, 15172), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '([self.args.batch_size] + [1, 12])'], {}), '(tf.float32, [self.args.batch_size] + [1, 12])\n', (15126, 15172), True, 'import tensorflow as tf\n'), ((15251, 15311), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '([self.args.batch_size] + [1, 12])'], {}), '(tf.float32, [self.args.batch_size] + [1, 12])\n', (15265, 15311), True, 'import tensorflow as tf\n'), ((15388, 15448), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '([self.args.batch_size] + [1, 12])'], {}), '(tf.float32, [self.args.batch_size] + [1, 12])\n', (15402, 15448), True, 'import tensorflow as tf\n'), ((15515, 15607), 'tfop.utils.augment_3Ddata_by_affine', 'util.augment_3Ddata_by_affine', (['self.ph_mv_img1', 'self.ph_mv_lab1', 'self.ph_moving_affine1'], {}), '(self.ph_mv_img1, self.ph_mv_lab1, self.\n ph_moving_affine1)\n', (15544, 15607), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((15642, 15734), 'tfop.utils.augment_3Ddata_by_affine', 'util.augment_3Ddata_by_affine', (['self.ph_mv_img2', 'self.ph_mv_lab2', 'self.ph_moving_affine2'], {}), '(self.ph_mv_img2, self.ph_mv_lab2, self.\n ph_moving_affine2)\n', (15671, 15734), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((15769, 15859), 'tfop.utils.augment_3Ddata_by_affine', 'util.augment_3Ddata_by_affine', (['self.ph_fix_img', 'self.ph_fix_lab', 'self.ph_fixed_affine'], {}), '(self.ph_fix_img, self.ph_fix_lab, self.\n ph_fixed_affine)\n', (15798, 15859), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((1819, 1860), 'tensorflow.variable_scope', 'tf.variable_scope', (['scop_name'], {'reuse': 'reuse'}), '(scop_name, reuse=reuse)\n', (1836, 1860), True, 'import tensorflow as tf\n'), ((1883, 1995), 'tfop.layers.downsample_resnet_block', 'layer.downsample_resnet_block', (['self.is_train', 'input_layer', '(2)', 'nc[0]'], {'k_conv0': '[7, 7, 7]', 'name': '"""local_down_0"""'}), "(self.is_train, input_layer, 2, nc[0], k_conv0\n =[7, 7, 7], name='local_down_0')\n", (1912, 1995), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((2012, 2100), 'tfop.layers.downsample_resnet_block', 'layer.downsample_resnet_block', (['self.is_train', 'h0', 'nc[0]', 'nc[1]'], {'name': '"""local_down_1"""'}), "(self.is_train, h0, nc[0], nc[1], name=\n 'local_down_1')\n", (2041, 2100), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((2118, 2206), 'tfop.layers.downsample_resnet_block', 'layer.downsample_resnet_block', (['self.is_train', 'h1', 'nc[1]', 'nc[2]'], {'name': '"""local_down_2"""'}), "(self.is_train, h1, nc[1], nc[2], name=\n 'local_down_2')\n", (2147, 2206), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((2224, 2312), 'tfop.layers.downsample_resnet_block', 'layer.downsample_resnet_block', (['self.is_train', 'h2', 'nc[2]', 'nc[3]'], {'name': '"""local_down_3"""'}), "(self.is_train, h2, nc[2], nc[3], name=\n 'local_down_3')\n", (2253, 2312), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((3080, 3106), 'tensorflow.stack', 'tf.stack', (['ddf_list'], {'axis': '(5)'}), '(ddf_list, axis=5)\n', (3088, 3106), True, 'import tensorflow as tf\n'), ((3132, 3163), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['ddf_list'], {'axis': '(5)'}), '(ddf_list, axis=5)\n', (3145, 3163), True, 'import tensorflow as tf\n'), ((3320, 3347), 'tensorflow.stack', 'tf.stack', (['ddf_list2'], {'axis': '(5)'}), '(ddf_list2, axis=5)\n', (3328, 3347), True, 'import tensorflow as tf\n'), ((3373, 3405), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['ddf_list2'], {'axis': '(5)'}), '(ddf_list2, axis=5)\n', (3386, 3405), True, 'import tensorflow as tf\n'), ((4195, 4227), 'tfop.losses.restore_loss', 'restore_loss', (['i_mv_img', 'r_mv_img'], {}), '(i_mv_img, r_mv_img)\n', (4207, 4227), False, 'from tfop.losses import restore_loss\n'), ((4230, 4264), 'tfop.losses.restore_loss', 'restore_loss', (['i_fix_img', 'r_fix_img'], {}), '(i_fix_img, r_fix_img)\n', (4242, 4264), False, 'from tfop.losses import restore_loss\n'), ((4393, 4447), 'tfop.losses.local_displacement_energy', 'loss.local_displacement_energy', (['ddf_mv_f', '"""bending"""', '(1)'], {}), "(ddf_mv_f, 'bending', 1)\n", (4423, 4447), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((4484, 4538), 'tfop.losses.local_displacement_energy', 'loss.local_displacement_energy', (['ddf_f_mv', '"""bending"""', '(1)'], {}), "(ddf_f_mv, 'bending', 1)\n", (4514, 4538), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((6508, 6558), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.i_fix_img[:, :, 48, :, 0]', '(-1)'], {}), '(self.i_fix_img[:, :, 48, :, 0], -1)\n', (6522, 6558), True, 'import tensorflow as tf\n'), ((6603, 6654), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.w_fix1_img[:, :, 48, :, 0]', '(-1)'], {}), '(self.w_fix1_img[:, :, 48, :, 0], -1)\n', (6617, 6654), True, 'import tensorflow as tf\n'), ((6692, 6742), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.i_mv1_img[:, :, 48, :, 0]', '(-1)'], {}), '(self.i_mv1_img[:, :, 48, :, 0], -1)\n', (6706, 6742), True, 'import tensorflow as tf\n'), ((6787, 6837), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.w_mv1_img[:, :, 48, :, 0]', '(-1)'], {}), '(self.w_mv1_img[:, :, 48, :, 0], -1)\n', (6801, 6837), True, 'import tensorflow as tf\n'), ((6875, 6925), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.i_mv2_img[:, :, 48, :, 0]', '(-1)'], {}), '(self.i_mv2_img[:, :, 48, :, 0], -1)\n', (6889, 6925), True, 'import tensorflow as tf\n'), ((6970, 7020), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.w_mv2_img[:, :, 48, :, 0]', '(-1)'], {}), '(self.w_mv2_img[:, :, 48, :, 0], -1)\n', (6984, 7020), True, 'import tensorflow as tf\n'), ((7756, 7848), 'sitkImageIO.itkdatawriter.sitk_write_labs', 'sitk_write_labs', (['warped_mv1_lab', 'None', 'self.args.sample_dir', "('%d_warped_mv1_lab' % iter)"], {}), "(warped_mv1_lab, None, self.args.sample_dir, \n '%d_warped_mv1_lab' % iter)\n", (7771, 7848), False, 'from sitkImageIO.itkdatawriter import sitk_write_lab, sitk_write_images, sitk_write_labs\n'), ((7858, 7950), 'sitkImageIO.itkdatawriter.sitk_write_labs', 'sitk_write_labs', (['warped_mv1_lab', 'None', 'self.args.sample_dir', "('%d_warped_mv2_lab' % iter)"], {}), "(warped_mv1_lab, None, self.args.sample_dir, \n '%d_warped_mv2_lab' % iter)\n", (7873, 7950), False, 'from sitkImageIO.itkdatawriter import sitk_write_lab, sitk_write_images, sitk_write_labs\n'), ((7960, 8045), 'sitkImageIO.itkdatawriter.sitk_write_labs', 'sitk_write_labs', (['input_fix_lab', 'None', 'self.args.sample_dir', "('%d_fixe_lab' % iter)"], {}), "(input_fix_lab, None, self.args.sample_dir, '%d_fixe_lab' % iter\n )\n", (7975, 8045), False, 'from sitkImageIO.itkdatawriter import sitk_write_lab, sitk_write_images, sitk_write_labs\n'), ((8309, 8403), 'sitkImageIO.itkdatawriter.sitk_write_images', 'sitk_write_images', (['warped_mv1_img', 'None', 'self.args.sample_dir', "('%d_warped_mv1_img' % iter)"], {}), "(warped_mv1_img, None, self.args.sample_dir, \n '%d_warped_mv1_img' % iter)\n", (8326, 8403), False, 'from sitkImageIO.itkdatawriter import sitk_write_lab, sitk_write_images, sitk_write_labs\n'), ((8413, 8505), 'sitkImageIO.itkdatawriter.sitk_write_images', 'sitk_write_images', (['input_mv1_img', 'None', 'self.args.sample_dir', "('%d_input_mv1_img' % iter)"], {}), "(input_mv1_img, None, self.args.sample_dir, \n '%d_input_mv1_img' % iter)\n", (8430, 8505), False, 'from sitkImageIO.itkdatawriter import sitk_write_lab, sitk_write_images, sitk_write_labs\n'), ((8515, 8600), 'sitkImageIO.itkdatawriter.sitk_write_labs', 'sitk_write_labs', (['i_mv1_lab', 'None', 'self.args.sample_dir', "('%d_input_mv1_lab' % iter)"], {}), "(i_mv1_lab, None, self.args.sample_dir, '%d_input_mv1_lab' %\n iter)\n", (8530, 8600), False, 'from sitkImageIO.itkdatawriter import sitk_write_lab, sitk_write_images, sitk_write_labs\n'), ((8611, 8705), 'sitkImageIO.itkdatawriter.sitk_write_images', 'sitk_write_images', (['warped_mv2_img', 'None', 'self.args.sample_dir', "('%d_warped_mv2_img' % iter)"], {}), "(warped_mv2_img, None, self.args.sample_dir, \n '%d_warped_mv2_img' % iter)\n", (8628, 8705), False, 'from sitkImageIO.itkdatawriter import sitk_write_lab, sitk_write_images, sitk_write_labs\n'), ((8715, 8807), 'sitkImageIO.itkdatawriter.sitk_write_images', 'sitk_write_images', (['input_mv2_img', 'None', 'self.args.sample_dir', "('%d_input_mv2_img' % iter)"], {}), "(input_mv2_img, None, self.args.sample_dir, \n '%d_input_mv2_img' % iter)\n", (8732, 8807), False, 'from sitkImageIO.itkdatawriter import sitk_write_lab, sitk_write_images, sitk_write_labs\n'), ((8817, 8902), 'sitkImageIO.itkdatawriter.sitk_write_labs', 'sitk_write_labs', (['i_mv2_lab', 'None', 'self.args.sample_dir', "('%d_input_mv2_lab' % iter)"], {}), "(i_mv2_lab, None, self.args.sample_dir, '%d_input_mv2_lab' %\n iter)\n", (8832, 8902), False, 'from sitkImageIO.itkdatawriter import sitk_write_lab, sitk_write_images, sitk_write_labs\n'), ((8913, 8999), 'sitkImageIO.itkdatawriter.sitk_write_images', 'sitk_write_images', (['input_fix_img', 'None', 'self.args.sample_dir', "('%d_fixe_img' % iter)"], {}), "(input_fix_img, None, self.args.sample_dir, '%d_fixe_img' %\n iter)\n", (8930, 8999), False, 'from sitkImageIO.itkdatawriter import sitk_write_lab, sitk_write_images, sitk_write_labs\n'), ((9370, 9404), 'numpy.squeeze', 'np.squeeze', (['warped_mv1_lab[0, ...]'], {}), '(warped_mv1_lab[0, ...])\n', (9380, 9404), True, 'import numpy as np\n'), ((9404, 9437), 'numpy.squeeze', 'np.squeeze', (['input_fix_lab[0, ...]'], {}), '(input_fix_lab[0, ...])\n', (9414, 9437), True, 'import numpy as np\n'), ((9488, 9522), 'numpy.squeeze', 'np.squeeze', (['warped_mv2_lab[0, ...]'], {}), '(warped_mv2_lab[0, ...])\n', (9498, 9522), True, 'import numpy as np\n'), ((9522, 9555), 'numpy.squeeze', 'np.squeeze', (['input_mv_lab1[0, ...]'], {}), '(input_mv_lab1[0, ...])\n', (9532, 9555), True, 'import numpy as np\n'), ((11264, 11302), 'config.Defines.Get_Name_By_Index', 'Get_Name_By_Index', (['self.args.component'], {}), '(self.args.component)\n', (11281, 11302), False, 'from config.Defines import Get_Name_By_Index\n'), ((11491, 11565), 'excelutil.output2excel.outpu2excel', 'outpu2excel', (['self.args.res_excel', "(self.args.MODEL_ID + '_' + itr)", 'res[itr]'], {}), "(self.args.res_excel, self.args.MODEL_ID + '_' + itr, res[itr])\n", (11502, 11565), False, 'from excelutil.output2excel import outpu2excel\n'), ((11578, 11611), 'evaluate.metric.print_mean_and_std', 'print_mean_and_std', (['res[itr]', 'itr'], {}), '(res[itr], itr)\n', (11596, 11611), False, 'from evaluate.metric import calculate_binary_dice, neg_jac, print_mean_and_std\n'), ((13187, 13240), 'tfop.utils.random_transform_generator', 'util.random_transform_generator', (['self.args.batch_size'], {}), '(self.args.batch_size)\n', (13218, 13240), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((13287, 13345), 'tfop.utils.random_transform_generator', 'util.random_transform_generator', (['self.args.batch_size', '(0.1)'], {}), '(self.args.batch_size, 0.1)\n', (13318, 13345), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((13392, 13450), 'tfop.utils.random_transform_generator', 'util.random_transform_generator', (['self.args.batch_size', '(0.1)'], {}), '(self.args.batch_size, 0.1)\n', (13423, 13450), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((1530, 1574), 'tfop.layers.resize_volume', 'layer.resize_volume', (['mv_img', 'self.image_size'], {}), '(mv_img, self.image_size)\n', (1549, 1574), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((2326, 2397), 'tfop.layers.conv3_block', 'layer.conv3_block', (['self.is_train', 'h3', 'nc[3]', 'nc[4]'], {'name': '"""local_deep_4"""'}), "(self.is_train, h3, nc[3], nc[4], name='local_deep_4')\n", (2343, 2397), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((2951, 3037), 'tfop.layers.ddf_summand', 'layer.ddf_summand', (['hm[4 - idx]', 'nc[idx]', 'self.image_size'], {'name': "('ddf1_sum_%d' % idx)"}), "(hm[4 - idx], nc[idx], self.image_size, name='ddf1_sum_%d' %\n idx)\n", (2968, 3037), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((3190, 3276), 'tfop.layers.ddf_summand', 'layer.ddf_summand', (['hm[4 - idx]', 'nc[idx]', 'self.image_size'], {'name': "('ddf2_sum_%d' % idx)"}), "(hm[4 - idx], nc[idx], self.image_size, name='ddf2_sum_%d' %\n idx)\n", (3207, 3276), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((4775, 4824), 'learn2reg.loss.NVISimilarity', 'NVISimilarity', (['warped_mv1_img', 'input_FIX_image', 's'], {}), '(warped_mv1_img, input_FIX_image, s)\n', (4788, 4824), False, 'from learn2reg.loss import NVISimilarity\n'), ((5801, 5840), 'numpy.mod', 'np.mod', (['glob_step', 'self.args.print_freq'], {}), '(glob_step, self.args.print_freq)\n', (5807, 5840), True, 'import numpy as np\n'), ((5939, 5977), 'numpy.mod', 'np.mod', (['glob_step', 'self.args.save_freq'], {}), '(glob_step, self.args.save_freq)\n', (5945, 5977), True, 'import numpy as np\n'), ((13890, 13944), 'tfop.utils.initial_transform_generator', 'util.initial_transform_generator', (['self.args.batch_size'], {}), '(self.args.batch_size)\n', (13922, 13944), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((13995, 14049), 'tfop.utils.initial_transform_generator', 'util.initial_transform_generator', (['self.args.batch_size'], {}), '(self.args.batch_size)\n', (14027, 14049), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((14100, 14154), 'tfop.utils.initial_transform_generator', 'util.initial_transform_generator', (['self.args.batch_size'], {}), '(self.args.batch_size)\n', (14132, 14154), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((17748, 17790), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.learning_rate'], {}), '(self.learning_rate)\n', (17770, 17790), True, 'import tensorflow as tf\n'), ((2418, 2510), 'tfop.layers.upsample_resnet_block', 'layer.upsample_resnet_block', (['self.is_train', 'hm[0]', 'hc3', 'nc[4]', 'nc[3]'], {'name': '"""local_up_3"""'}), "(self.is_train, hm[0], hc3, nc[4], nc[3], name=\n 'local_up_3')\n", (2445, 2510), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((2550, 2642), 'tfop.layers.upsample_resnet_block', 'layer.upsample_resnet_block', (['self.is_train', 'hm[1]', 'hc2', 'nc[3]', 'nc[2]'], {'name': '"""local_up_2"""'}), "(self.is_train, hm[1], hc2, nc[3], nc[2], name=\n 'local_up_2')\n", (2577, 2642), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((2682, 2774), 'tfop.layers.upsample_resnet_block', 'layer.upsample_resnet_block', (['self.is_train', 'hm[2]', 'hc1', 'nc[2]', 'nc[1]'], {'name': '"""local_up_1"""'}), "(self.is_train, hm[2], hc1, nc[2], nc[1], name=\n 'local_up_1')\n", (2709, 2774), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((2814, 2906), 'tfop.layers.upsample_resnet_block', 'layer.upsample_resnet_block', (['self.is_train', 'hm[3]', 'hc0', 'nc[1]', 'nc[0]'], {'name': '"""local_up_0"""'}), "(self.is_train, hm[3], hc0, nc[1], nc[0], name=\n 'local_up_0')\n", (2841, 2906), True, 'from tfop import utils as util, layers as layer, losses as loss\n'), ((12711, 12732), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['p_tgt'], {}), '(p_tgt)\n', (12725, 12732), True, 'import SimpleITK as sitk\n'), ((12559, 12584), 'dirutil.helper.get_name_wo_suffix', 'get_name_wo_suffix', (['p_ata'], {}), '(p_ata)\n', (12577, 12584), False, 'from dirutil.helper import get_name_wo_suffix\n'), ((12607, 12632), 'dirutil.helper.get_name_wo_suffix', 'get_name_wo_suffix', (['p_tgt'], {}), '(p_tgt)\n', (12625, 12632), False, 'from dirutil.helper import get_name_wo_suffix\n')] |
import sys
sys.path.insert(0, '/media/sidk/Data/sidk/Research/OP3/')
from rlkit.torch.monet.monet import MonetVAE
from rlkit.torch.conv_networks import BroadcastCNN
import rlkit.torch.monet.monet as monet
from rlkit.torch.monet.unet import UNet
from rlkit.torch.monet.monet_trainer import MonetTrainer
import rlkit.torch.pytorch_util as ptu
from rlkit.pythonplusplus import identity
from rlkit.launchers.launcher_util import run_experiment
from rlkit.core import logger
import numpy as np
import h5py
def load_dataset(data_path, train=True):
hdf5_file = h5py.File(data_path, 'r') # RV: Data file
if 'clevr' in data_path:
return np.array(hdf5_file['features'])
else:
if train:
feats = np.array(hdf5_file['training']['features'])
else:
feats = np.array(hdf5_file['test']['features'])
data = feats.reshape((-1, 64, 64, 3))
data = (data * 255).astype(np.uint8)
data = np.swapaxes(data, 1, 3)
return data
def train_vae(variant):
#train_path = '/home/jcoreyes/objects/rlkit/examples/monet/clevr_train_10000.hdf5'
#test_path = '/home/jcoreyes/objects/rlkit/examples/monet/clevr_test.hdf5'
train_path = '/home/jcoreyes/objects/RailResearch/DataGeneration/ColorTwoBallSmall.h5'
test_path = '/home/jcoreyes/objects/RailResearch/DataGeneration/ColorTwoBallSmall.h5'
train_data = load_dataset(train_path, train=True)
test_data = load_dataset(test_path, train=False)
train_data = train_data.reshape((train_data.shape[0], -1))
test_data = test_data.reshape((test_data.shape[0], -1))
#logger.save_extra_data(info)
logger.get_snapshot_dir()
variant['vae_kwargs']['architecture'] = monet.imsize64_monet_architecture #monet.imsize84_monet_architecture
variant['vae_kwargs']['decoder_output_activation'] = identity
variant['vae_kwargs']['decoder_class'] = BroadcastCNN
attention_net = UNet(in_channels=4, n_classes=1, up_mode='upsample', depth=3,
padding=True)
m = MonetVAE(
**variant['vae_kwargs'],
attention_net=attention_net
)
m.to(ptu.device)
t = MonetTrainer(train_data, test_data, m,
**variant['algo_kwargs'])
save_period = variant['save_period']
for epoch in range(variant['num_epochs']):
should_save_imgs = (epoch % save_period == 0)
t.train_epoch(epoch)
t.test_epoch(
epoch,
save_reconstruction=should_save_imgs,
)
if should_save_imgs:
t.dump_samples(epoch)
logger.save_extra_data(m, 'vae.pkl', mode='pickle')
if __name__ == "__main__":
variant = dict(
vae_kwargs = dict(
imsize=64,
representation_size=16,
input_channels=4,
decoder_distribution='gaussian_identity_variance'
),
algo_kwargs = dict(
beta=0.5,
gamma=0.5,
batch_size=16,
lr=1e-4,
log_interval=0,
),
num_epochs=1500,
algorithm='VAE',
save_period=5,
)
run_experiment(
train_vae,
exp_prefix='vae-clevr',
mode='here_no_doodad',
variant=variant,
use_gpu=True, # Turn on if you have a GPU
)
| [
"rlkit.core.logger.save_extra_data",
"h5py.File",
"rlkit.torch.monet.unet.UNet",
"sys.path.insert",
"rlkit.torch.monet.monet.MonetVAE",
"numpy.array",
"numpy.swapaxes",
"rlkit.core.logger.get_snapshot_dir",
"rlkit.torch.monet.monet_trainer.MonetTrainer",
"rlkit.launchers.launcher_util.run_experime... | [((11, 68), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/media/sidk/Data/sidk/Research/OP3/"""'], {}), "(0, '/media/sidk/Data/sidk/Research/OP3/')\n", (26, 68), False, 'import sys\n'), ((560, 585), 'h5py.File', 'h5py.File', (['data_path', '"""r"""'], {}), "(data_path, 'r')\n", (569, 585), False, 'import h5py\n'), ((1639, 1664), 'rlkit.core.logger.get_snapshot_dir', 'logger.get_snapshot_dir', ([], {}), '()\n', (1662, 1664), False, 'from rlkit.core import logger\n'), ((1923, 1998), 'rlkit.torch.monet.unet.UNet', 'UNet', ([], {'in_channels': '(4)', 'n_classes': '(1)', 'up_mode': '"""upsample"""', 'depth': '(3)', 'padding': '(True)'}), "(in_channels=4, n_classes=1, up_mode='upsample', depth=3, padding=True)\n", (1927, 1998), False, 'from rlkit.torch.monet.unet import UNet\n'), ((2032, 2094), 'rlkit.torch.monet.monet.MonetVAE', 'MonetVAE', ([], {'attention_net': 'attention_net'}), "(**variant['vae_kwargs'], attention_net=attention_net)\n", (2040, 2094), False, 'from rlkit.torch.monet.monet import MonetVAE\n'), ((2147, 2211), 'rlkit.torch.monet.monet_trainer.MonetTrainer', 'MonetTrainer', (['train_data', 'test_data', 'm'], {}), "(train_data, test_data, m, **variant['algo_kwargs'])\n", (2159, 2211), False, 'from rlkit.torch.monet.monet_trainer import MonetTrainer\n'), ((2574, 2625), 'rlkit.core.logger.save_extra_data', 'logger.save_extra_data', (['m', '"""vae.pkl"""'], {'mode': '"""pickle"""'}), "(m, 'vae.pkl', mode='pickle')\n", (2596, 2625), False, 'from rlkit.core import logger\n'), ((3109, 3216), 'rlkit.launchers.launcher_util.run_experiment', 'run_experiment', (['train_vae'], {'exp_prefix': '"""vae-clevr"""', 'mode': '"""here_no_doodad"""', 'variant': 'variant', 'use_gpu': '(True)'}), "(train_vae, exp_prefix='vae-clevr', mode='here_no_doodad',\n variant=variant, use_gpu=True)\n", (3123, 3216), False, 'from rlkit.launchers.launcher_util import run_experiment\n'), ((647, 678), 'numpy.array', 'np.array', (["hdf5_file['features']"], {}), "(hdf5_file['features'])\n", (655, 678), True, 'import numpy as np\n'), ((951, 974), 'numpy.swapaxes', 'np.swapaxes', (['data', '(1)', '(3)'], {}), '(data, 1, 3)\n', (962, 974), True, 'import numpy as np\n'), ((727, 770), 'numpy.array', 'np.array', (["hdf5_file['training']['features']"], {}), "(hdf5_file['training']['features'])\n", (735, 770), True, 'import numpy as np\n'), ((805, 844), 'numpy.array', 'np.array', (["hdf5_file['test']['features']"], {}), "(hdf5_file['test']['features'])\n", (813, 844), True, 'import numpy as np\n')] |
import collections
import multiprocessing as mp
import multiprocessing.pool
import functools
import numpy as np
from buzzard._actors.message import Msg
from buzzard._actors.pool_job import ProductionJobWaiting, PoolJobWorking
class ActorComputer(object):
"""Actor that takes care of sheduling computations by using user's `compute_array` function"""
def __init__(self, raster):
self._raster = raster
self._alive = True
computation_pool = raster.computation_pool
if computation_pool is not None:
self._waiting_room_address = '/Pool{}/WaitingRoom'.format(id(computation_pool))
self._working_room_address = '/Pool{}/WorkingRoom'.format(id(computation_pool))
if isinstance(computation_pool, mp.pool.ThreadPool):
self._same_address_space = True
elif isinstance(computation_pool, mp.pool.Pool):
self._same_address_space = False
else: # pragma: no cover
assert False, 'Type should be checked in facade'
self._waiting_jobs_per_query = collections.defaultdict(set)
self._working_jobs = set()
self._performed_computations = set() # type: Set[Footprint]
self.address = '/Raster{}/Computer'.format(self._raster.uid)
@property
def alive(self):
return self._alive
# ******************************************************************************************* **
def receive_compute_this_array(self, qi, compute_idx):
"""Receive message: Start making this array"""
msgs = []
if self._raster.computation_pool is None:
work = self._create_work_job(qi, compute_idx)
compute_fp = qi.cache_computation.list_of_compute_fp[compute_idx]
if compute_fp not in self._performed_computations:
res = work.func()
res = self._normalize_user_result(compute_fp, res)
self._raster.debug_mngr.event('object_allocated', res)
self._performed_computations.add(compute_fp)
msgs += self._commit_work_result(work, res)
else:
wait = Wait(self, qi, compute_idx)
self._waiting_jobs_per_query[qi].add(wait)
msgs += [Msg(self._waiting_room_address, 'schedule_job', wait)]
return msgs
def receive_token_to_working_room(self, job, token):
msgs = []
self._waiting_jobs_per_query[job.qi].remove(job)
if len(self._waiting_jobs_per_query[job.qi]) == 0:
del self._waiting_jobs_per_query[job.qi]
work = self._create_work_job(job.qi, job.compute_idx)
compute_fp = job.qi.cache_computation.list_of_compute_fp[job.compute_idx]
if compute_fp not in self._performed_computations:
msgs += [Msg(self._working_room_address, 'launch_job_with_token', work, token)]
self._performed_computations.add(compute_fp)
self._working_jobs.add(work)
else:
msgs += [Msg(self._working_room_address, 'salvage_token', token)]
return msgs
def receive_job_done(self, job, result):
result = self._normalize_user_result(job.compute_fp, result)
self._raster.debug_mngr.event('object_allocated', result)
self._working_jobs.remove(job)
return self._commit_work_result(job, result)
def receive_cancel_this_query(self, qi):
"""Receive message: One query was dropped
Parameters
----------
qi: _actors.cached.query_infos.QueryInfos
"""
msgs = []
for job in self._waiting_jobs_per_query[qi]:
msgs += [Msg(self._waiting_room_address, 'unschedule_job', job)]
del self._waiting_jobs_per_query[qi]
return msgs
def receive_die(self):
"""Receive message: The raster was killed"""
assert self._alive
self._alive = False
msgs = []
msgs += [
Msg(self._waiting_room_address, 'unschedule_job', job)
for jobs in self._waiting_jobs_per_query.values()
for job in jobs
]
self._waiting_jobs_per_query.clear()
msgs += [
Msg(self._working_room_address, 'cancel_job', job)
for job in self._working_jobs
]
self._working_jobs.clear()
self._raster = None
return msgs
# ******************************************************************************************* **
def _create_work_job(self, qi, compute_idx):
return Work(
self, qi, compute_idx,
)
def _commit_work_result(self, work_job, res):
return [Msg('ComputationAccumulator', 'combine_this_array', work_job.compute_fp, res)]
def _normalize_user_result(self, compute_fp, res):
if not isinstance(res, np.ndarray): # pragma: no cover
raise ValueError("Result of recipe's `compute_array` have type {}, it should be ndarray".format(
type(res)
))
res = np.atleast_3d(res)
y, x, c = res.shape
if (y, x) != tuple(compute_fp.shape): # pragma: no cover
raise ValueError("Result of recipe's `compute_array` have shape `{}`, should start with {}".format(
res.shape,
compute_fp.shape,
))
if c != len(self._raster): # pragma: no cover
raise ValueError("Result of recipe's `compute_array` have shape `{}`, should have {} bands".format(
res.shape,
len(self._raster),
))
res = res.astype(self._raster.dtype, copy=False)
return res
# ******************************************************************************************* **
class Wait(ProductionJobWaiting):
def __init__(self, actor, qi, compute_idx):
self.qi = qi
self.compute_idx = compute_idx
qicc = qi.cache_computation
compute_fp = qicc.list_of_compute_fp[compute_idx]
prod_idx = qicc.dict_of_min_prod_idx_per_compute_fp[compute_fp]
super().__init__(actor.address, qi, prod_idx, 4, compute_fp)
class Work(PoolJobWorking):
def __init__(self, actor, qi, compute_idx):
qicc = qi.cache_computation
assert qicc.collected_count == compute_idx, (qicc.collected_count, compute_idx)
compute_fp = qicc.list_of_compute_fp[compute_idx]
self.compute_fp = compute_fp
primitive_arrays = {}
primitive_footprints = {}
for prim_name, queue in qicc.primitive_queue_per_primitive.items():
primitive_arrays[prim_name] = queue.get_nowait()
primitive_footprints[prim_name] = qicc.primitive_fps_per_primitive[prim_name][compute_idx]
qicc.collected_count += 1
if actor._raster.computation_pool is None or actor._same_address_space:
func = functools.partial(
actor._raster.compute_array,
compute_fp,
primitive_footprints,
primitive_arrays,
actor._raster.facade_proxy
)
else:
func = functools.partial(
actor._raster.compute_array,
compute_fp,
primitive_footprints,
primitive_arrays,
None,
)
actor._raster.debug_mngr.event('object_allocated', func)
super().__init__(actor.address, func)
| [
"collections.defaultdict",
"functools.partial",
"numpy.atleast_3d",
"buzzard._actors.message.Msg"
] | [((1087, 1115), 'collections.defaultdict', 'collections.defaultdict', (['set'], {}), '(set)\n', (1110, 1115), False, 'import collections\n'), ((5029, 5047), 'numpy.atleast_3d', 'np.atleast_3d', (['res'], {}), '(res)\n', (5042, 5047), True, 'import numpy as np\n'), ((3965, 4019), 'buzzard._actors.message.Msg', 'Msg', (['self._waiting_room_address', '"""unschedule_job"""', 'job'], {}), "(self._waiting_room_address, 'unschedule_job', job)\n", (3968, 4019), False, 'from buzzard._actors.message import Msg\n'), ((4196, 4246), 'buzzard._actors.message.Msg', 'Msg', (['self._working_room_address', '"""cancel_job"""', 'job'], {}), "(self._working_room_address, 'cancel_job', job)\n", (4199, 4246), False, 'from buzzard._actors.message import Msg\n'), ((4667, 4744), 'buzzard._actors.message.Msg', 'Msg', (['"""ComputationAccumulator"""', '"""combine_this_array"""', 'work_job.compute_fp', 'res'], {}), "('ComputationAccumulator', 'combine_this_array', work_job.compute_fp, res)\n", (4670, 4744), False, 'from buzzard._actors.message import Msg\n'), ((6868, 6998), 'functools.partial', 'functools.partial', (['actor._raster.compute_array', 'compute_fp', 'primitive_footprints', 'primitive_arrays', 'actor._raster.facade_proxy'], {}), '(actor._raster.compute_array, compute_fp,\n primitive_footprints, primitive_arrays, actor._raster.facade_proxy)\n', (6885, 6998), False, 'import functools\n'), ((7122, 7230), 'functools.partial', 'functools.partial', (['actor._raster.compute_array', 'compute_fp', 'primitive_footprints', 'primitive_arrays', 'None'], {}), '(actor._raster.compute_array, compute_fp,\n primitive_footprints, primitive_arrays, None)\n', (7139, 7230), False, 'import functools\n'), ((2267, 2320), 'buzzard._actors.message.Msg', 'Msg', (['self._waiting_room_address', '"""schedule_job"""', 'wait'], {}), "(self._waiting_room_address, 'schedule_job', wait)\n", (2270, 2320), False, 'from buzzard._actors.message import Msg\n'), ((2815, 2884), 'buzzard._actors.message.Msg', 'Msg', (['self._working_room_address', '"""launch_job_with_token"""', 'work', 'token'], {}), "(self._working_room_address, 'launch_job_with_token', work, token)\n", (2818, 2884), False, 'from buzzard._actors.message import Msg\n'), ((3019, 3074), 'buzzard._actors.message.Msg', 'Msg', (['self._working_room_address', '"""salvage_token"""', 'token'], {}), "(self._working_room_address, 'salvage_token', token)\n", (3022, 3074), False, 'from buzzard._actors.message import Msg\n'), ((3659, 3713), 'buzzard._actors.message.Msg', 'Msg', (['self._waiting_room_address', '"""unschedule_job"""', 'job'], {}), "(self._waiting_room_address, 'unschedule_job', job)\n", (3662, 3713), False, 'from buzzard._actors.message import Msg\n')] |
import argparse
import numpy as np
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('src_results_fn')
parser.add_argument('dest_results_fn')
args = parser.parse_args()
desired = ['seed', 'clusters', 'garbage']
for A in ('posterior', 'evidence'):
for B in ('rels', 'vids'):
desired.append('clustrel_%s_%s' % (A, B))
with open(args.src_results_fn, 'rb') as F:
src_results = np.load(F, allow_pickle=True)
dest_results = {K: src_results[K] for K in desired}
np.savez_compressed(args.dest_results_fn, **dest_results)
if __name__ == '__main__':
main()
| [
"numpy.savez_compressed",
"argparse.ArgumentParser",
"numpy.load"
] | [((59, 171), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""LOL HI THERE"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='LOL HI THERE', formatter_class=\n argparse.ArgumentDefaultsHelpFormatter)\n", (82, 171), False, 'import argparse\n'), ((603, 660), 'numpy.savez_compressed', 'np.savez_compressed', (['args.dest_results_fn'], {}), '(args.dest_results_fn, **dest_results)\n', (622, 660), True, 'import numpy as np\n'), ((515, 544), 'numpy.load', 'np.load', (['F'], {'allow_pickle': '(True)'}), '(F, allow_pickle=True)\n', (522, 544), True, 'import numpy as np\n')] |
import numpy as np
from scheduling.utils.bottleneck_assignment import solve_bap
from common.distance import distance
def solve_mbap(agents, seq_targets, already_spent_costs):
"""
Solve the multi-level bottleneck assignment problem.
A multi-level bottleneck assignment approach to the bus drivers' rostering problem
:param agents: k agents, each agent is in a position
:param seq_targets: two-dim array, T x k
:param already_spent_costs: already spent costs of k agents
:return: minmaxcost, a matrix, T x k, index is agent index, each row is target location index
"""
k = len(agents)
T = len(seq_targets)
opt_assignment = -1 * np.ones((T, k), dtype=int)
opt_cost = 0
# phase 1, init assign
# print('phase 1...')
pre_costs = already_spent_costs.copy()
pre_pos = agents.copy()
for i in range(T):
c_i, assign_i = solve_bap(pre_pos, seq_targets[i], pre_costs)
opt_cost = c_i
for j in range(k):
cur_pos = seq_targets[i][assign_i[j]]
# update opt_assignment
opt_assignment[i, j] = assign_i[j]
# update pre_costs (the cost from seq_targets[i-1] to seq_targets[i])
pre_costs[j] += distance(pre_pos[j][1], pre_pos[j][0], cur_pos[1], cur_pos[0])
# update agent pre_pos
pre_pos[j] = cur_pos
# print('phase 1 opt cost: ' + str(opt_cost))
# phase 2, iterate assign until coverage
# print('phase 2...')
unit_costs = cal_unit_costs(opt_assignment, agents, seq_targets)
assignment_unstable = True
while assignment_unstable:
assignment_unstable = False
for i in range(T):
other_costs = cal_other_costs(i, unit_costs) + already_spent_costs
if i == 0:
pre_pos = agents.copy()
else:
for j in range(k):
pre_pos[j] = seq_targets[i - 1][opt_assignment[i - 1, j]]
later_pos = None
if i != (T - 1):
later_pos = []
for j in range(k):
later_pos.append(seq_targets[i + 1][opt_assignment[i + 1, j]])
c_i, assign_i = solve_bap(pre_pos, seq_targets[i], other_costs, later_pos)
if c_i < opt_cost:
assignment_unstable = True
opt_cost = c_i
for j in range(k):
opt_assignment[i, j] = assign_i[j]
unit_costs = update_unit_costs(unit_costs, agents, seq_targets, opt_assignment, i)
# print('phase 2 opt cost:' + str(opt_cost) + ', current cost:' + str(c_i))
return opt_cost, opt_assignment
def cal_unit_costs(assignment, agents, seq_targets):
k = len(agents)
T = len(seq_targets)
unit_costs = np.zeros((k, T), dtype=float)
for j in range(k):
for i in range(T):
if i == 0:
pre_loc = agents[j]
else:
pre_loc = seq_targets[i - 1][assignment[i - 1, j]]
cur_loc = seq_targets[i][assignment[i, j]]
unit_costs[j, i] = distance(pre_loc[1], pre_loc[0], cur_loc[1], cur_loc[0])
return unit_costs
def update_unit_costs(unit_costs, agents, seq_targets, assignment, changed_idx):
k = len(agents)
T = len(seq_targets)
for j in range(k):
if changed_idx == 0:
pre_loc = agents[j]
else:
pre_loc = seq_targets[changed_idx - 1][assignment[changed_idx - 1, j]]
cur_loc = seq_targets[changed_idx][assignment[changed_idx, j]]
unit_costs[j, changed_idx] = distance(pre_loc[1], pre_loc[0], cur_loc[1], cur_loc[0])
# if the changed arc is not the last arc, recompute the next unit costs
if changed_idx != (T - 1):
pre_loc = cur_loc
cur_loc = seq_targets[changed_idx + 1][assignment[changed_idx + 1, j]]
unit_costs[j, changed_idx + 1] = distance(pre_loc[1], pre_loc[0], cur_loc[1], cur_loc[0])
return unit_costs
def cal_other_costs(unfixed_idx, unit_costs):
"""
Calculate the cost before unfix idx and after unfix idx
:param unfixed_idx: the unfixed assignment idx
:param unit_costs: the each step cost of each agent (k x T)
:return: other cost of agents except the unfixed idx
"""
# cost before seq_targets[i-1] + cost after seq_targets[i+1]
other_costs = np.sum(unit_costs[:, 0:unfixed_idx], axis=1) + np.sum(unit_costs[:, (unfixed_idx + 2):], axis=1)
return other_costs
| [
"scheduling.utils.bottleneck_assignment.solve_bap",
"numpy.sum",
"numpy.zeros",
"numpy.ones",
"common.distance.distance"
] | [((2774, 2803), 'numpy.zeros', 'np.zeros', (['(k, T)'], {'dtype': 'float'}), '((k, T), dtype=float)\n', (2782, 2803), True, 'import numpy as np\n'), ((672, 698), 'numpy.ones', 'np.ones', (['(T, k)'], {'dtype': 'int'}), '((T, k), dtype=int)\n', (679, 698), True, 'import numpy as np\n'), ((887, 932), 'scheduling.utils.bottleneck_assignment.solve_bap', 'solve_bap', (['pre_pos', 'seq_targets[i]', 'pre_costs'], {}), '(pre_pos, seq_targets[i], pre_costs)\n', (896, 932), False, 'from scheduling.utils.bottleneck_assignment import solve_bap\n'), ((3580, 3636), 'common.distance.distance', 'distance', (['pre_loc[1]', 'pre_loc[0]', 'cur_loc[1]', 'cur_loc[0]'], {}), '(pre_loc[1], pre_loc[0], cur_loc[1], cur_loc[0])\n', (3588, 3636), False, 'from common.distance import distance\n'), ((4368, 4412), 'numpy.sum', 'np.sum', (['unit_costs[:, 0:unfixed_idx]'], {'axis': '(1)'}), '(unit_costs[:, 0:unfixed_idx], axis=1)\n', (4374, 4412), True, 'import numpy as np\n'), ((4415, 4462), 'numpy.sum', 'np.sum', (['unit_costs[:, unfixed_idx + 2:]'], {'axis': '(1)'}), '(unit_costs[:, unfixed_idx + 2:], axis=1)\n', (4421, 4462), True, 'import numpy as np\n'), ((1226, 1288), 'common.distance.distance', 'distance', (['pre_pos[j][1]', 'pre_pos[j][0]', 'cur_pos[1]', 'cur_pos[0]'], {}), '(pre_pos[j][1], pre_pos[j][0], cur_pos[1], cur_pos[0])\n', (1234, 1288), False, 'from common.distance import distance\n'), ((2180, 2238), 'scheduling.utils.bottleneck_assignment.solve_bap', 'solve_bap', (['pre_pos', 'seq_targets[i]', 'other_costs', 'later_pos'], {}), '(pre_pos, seq_targets[i], other_costs, later_pos)\n', (2189, 2238), False, 'from scheduling.utils.bottleneck_assignment import solve_bap\n'), ((3084, 3140), 'common.distance.distance', 'distance', (['pre_loc[1]', 'pre_loc[0]', 'cur_loc[1]', 'cur_loc[0]'], {}), '(pre_loc[1], pre_loc[0], cur_loc[1], cur_loc[0])\n', (3092, 3140), False, 'from common.distance import distance\n'), ((3910, 3966), 'common.distance.distance', 'distance', (['pre_loc[1]', 'pre_loc[0]', 'cur_loc[1]', 'cur_loc[0]'], {}), '(pre_loc[1], pre_loc[0], cur_loc[1], cur_loc[0])\n', (3918, 3966), False, 'from common.distance import distance\n')] |
# -*- coding: utf-8 -*-
"""
@Author: <NAME>
"""
import math as mt
import numpy as np
import pytest
import raman.utilities as ut
from numpy import testing as npt
from collections import namedtuple
from operator import attrgetter
@pytest.mark.parametrize("num_channels", [1, 10])
def test_compute_power_spectrum(num_channels):
delta_f = 50e9
pch = 1e-3
roll_off = 0.1
symbol_rate = 32e9
start_f = 191.0e12
wdm_band = num_channels * delta_f
stop_f = start_f + wdm_band
frequency_slice_size = 50e9
pump_pow = [0.450, 0.400]
pump_freq = [204.0e12, 206.3e12]
pump_direction = [-1, -1]
pump_bandwidth = [1e6, 1e6]
num_pumps = len(pump_pow)
spectral_information = namedtuple('SpectralInformation', 'carriers')
raman_pump_information = namedtuple('SpectralInformation', 'raman_pumps')
channel = namedtuple('Channel', 'channel_number frequency baud_rate roll_off power')
power = namedtuple('Power', 'signal nonlinear_interference amplified_spontaneous_emission')
pump = namedtuple('RamanPump', 'pump_number power frequency propagation_direction pump_bandwidth')
carriers = tuple(channel(1 + ii, start_f + (delta_f * ii), symbol_rate, roll_off, power(pch, 0, 0))
for ii in range(0, num_channels))
pumps = tuple(pump(1 + ii, pump_pow[ii], pump_freq[ii], pump_direction[ii], pump_bandwidth[ii])
for ii in range(0, num_pumps))
spec_info = spectral_information(carriers=carriers)
raman_pump_info = raman_pump_information(raman_pumps=pumps)
pow_array, f_array, propagation_direction, noise_bandwidth_array = ut.compute_power_spectrum(spec_info, raman_pump_info)
# Computing expected values for wdm channels
n_slices = mt.ceil(wdm_band / frequency_slice_size)
pow_slice = pch * frequency_slice_size / delta_f
pow_last_slice = (wdm_band / frequency_slice_size) % 1 * pow_slice
pow_array_test = np.ones(n_slices) * pow_slice
if pow_last_slice:
pow_array_test[-1] = pow_last_slice
f_array_test = np.arange(start_f, stop_f, frequency_slice_size)
propagation_direction_test = np.ones(num_channels)
channels_noise_bw_test = np.ones(num_channels)*symbol_rate
# Computing expected values channels + Raman pumps
pow_array_test = np.append(pow_array_test, pump_pow)
f_array_test = np.append(f_array_test, pump_freq)
propagation_direction_test = np.append(propagation_direction_test, pump_direction)
noise_bandwidth_array_test = np.append(channels_noise_bw_test, pump_bandwidth)
npt.assert_allclose(pow_array_test, pow_array, rtol=1e-6)
npt.assert_allclose(f_array_test, f_array, rtol=1e-6)
npt.assert_allclose(propagation_direction_test, propagation_direction, rtol=1e-6)
npt.assert_allclose(noise_bandwidth_array_test, noise_bandwidth_array, rtol=1e-6)
@pytest.mark.parametrize("roll_off", [0, 0.5])
def test_raised_cosine_comb(roll_off):
# SPECTRAL PARAM
num_channels = 4
delta_f = 50e9
symbol_rate = 32e9
start_f = 193e12
pch = 1e-3
power = namedtuple('Power', 'signal nonlinear_interference amplified_spontaneous_emission')
channel = namedtuple('Channel', 'channel_number frequency baud_rate roll_off power')
carriers = tuple(channel(1 + ii, start_f + (delta_f * ii), symbol_rate, roll_off, power(pch, 0, 0))
for ii in range(0, num_channels))
f_eval = np.array([start_f + (ii * delta_f / 2) for ii in range(0, num_channels * 2)])
psd = ut.raised_cosine_comb(f_eval, *carriers)
expected_psd = np.array([])
for ii in range(0, num_channels):
expected_psd = np.append(expected_psd, [pch / symbol_rate, 0])
npt.assert_allclose(expected_psd, psd, rtol=1e-5) | [
"raman.utilities.compute_power_spectrum",
"math.ceil",
"numpy.testing.assert_allclose",
"raman.utilities.raised_cosine_comb",
"numpy.ones",
"numpy.append",
"numpy.arange",
"collections.namedtuple",
"numpy.array",
"pytest.mark.parametrize"
] | [((231, 279), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""num_channels"""', '[1, 10]'], {}), "('num_channels', [1, 10])\n", (254, 279), False, 'import pytest\n'), ((2884, 2929), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""roll_off"""', '[0, 0.5]'], {}), "('roll_off', [0, 0.5])\n", (2907, 2929), False, 'import pytest\n'), ((717, 762), 'collections.namedtuple', 'namedtuple', (['"""SpectralInformation"""', '"""carriers"""'], {}), "('SpectralInformation', 'carriers')\n", (727, 762), False, 'from collections import namedtuple\n'), ((792, 840), 'collections.namedtuple', 'namedtuple', (['"""SpectralInformation"""', '"""raman_pumps"""'], {}), "('SpectralInformation', 'raman_pumps')\n", (802, 840), False, 'from collections import namedtuple\n'), ((855, 929), 'collections.namedtuple', 'namedtuple', (['"""Channel"""', '"""channel_number frequency baud_rate roll_off power"""'], {}), "('Channel', 'channel_number frequency baud_rate roll_off power')\n", (865, 929), False, 'from collections import namedtuple\n'), ((942, 1029), 'collections.namedtuple', 'namedtuple', (['"""Power"""', '"""signal nonlinear_interference amplified_spontaneous_emission"""'], {}), "('Power',\n 'signal nonlinear_interference amplified_spontaneous_emission')\n", (952, 1029), False, 'from collections import namedtuple\n'), ((1037, 1132), 'collections.namedtuple', 'namedtuple', (['"""RamanPump"""', '"""pump_number power frequency propagation_direction pump_bandwidth"""'], {}), "('RamanPump',\n 'pump_number power frequency propagation_direction pump_bandwidth')\n", (1047, 1132), False, 'from collections import namedtuple\n'), ((1660, 1713), 'raman.utilities.compute_power_spectrum', 'ut.compute_power_spectrum', (['spec_info', 'raman_pump_info'], {}), '(spec_info, raman_pump_info)\n', (1685, 1713), True, 'import raman.utilities as ut\n'), ((1779, 1819), 'math.ceil', 'mt.ceil', (['(wdm_band / frequency_slice_size)'], {}), '(wdm_band / frequency_slice_size)\n', (1786, 1819), True, 'import math as mt\n'), ((2083, 2131), 'numpy.arange', 'np.arange', (['start_f', 'stop_f', 'frequency_slice_size'], {}), '(start_f, stop_f, frequency_slice_size)\n', (2092, 2131), True, 'import numpy as np\n'), ((2166, 2187), 'numpy.ones', 'np.ones', (['num_channels'], {}), '(num_channels)\n', (2173, 2187), True, 'import numpy as np\n'), ((2328, 2363), 'numpy.append', 'np.append', (['pow_array_test', 'pump_pow'], {}), '(pow_array_test, pump_pow)\n', (2337, 2363), True, 'import numpy as np\n'), ((2383, 2417), 'numpy.append', 'np.append', (['f_array_test', 'pump_freq'], {}), '(f_array_test, pump_freq)\n', (2392, 2417), True, 'import numpy as np\n'), ((2451, 2504), 'numpy.append', 'np.append', (['propagation_direction_test', 'pump_direction'], {}), '(propagation_direction_test, pump_direction)\n', (2460, 2504), True, 'import numpy as np\n'), ((2538, 2587), 'numpy.append', 'np.append', (['channels_noise_bw_test', 'pump_bandwidth'], {}), '(channels_noise_bw_test, pump_bandwidth)\n', (2547, 2587), True, 'import numpy as np\n'), ((2593, 2651), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['pow_array_test', 'pow_array'], {'rtol': '(1e-06)'}), '(pow_array_test, pow_array, rtol=1e-06)\n', (2612, 2651), True, 'from numpy import testing as npt\n'), ((2655, 2709), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['f_array_test', 'f_array'], {'rtol': '(1e-06)'}), '(f_array_test, f_array, rtol=1e-06)\n', (2674, 2709), True, 'from numpy import testing as npt\n'), ((2713, 2800), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['propagation_direction_test', 'propagation_direction'], {'rtol': '(1e-06)'}), '(propagation_direction_test, propagation_direction, rtol\n =1e-06)\n', (2732, 2800), True, 'from numpy import testing as npt\n'), ((2799, 2886), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['noise_bandwidth_array_test', 'noise_bandwidth_array'], {'rtol': '(1e-06)'}), '(noise_bandwidth_array_test, noise_bandwidth_array, rtol\n =1e-06)\n', (2818, 2886), True, 'from numpy import testing as npt\n'), ((3102, 3189), 'collections.namedtuple', 'namedtuple', (['"""Power"""', '"""signal nonlinear_interference amplified_spontaneous_emission"""'], {}), "('Power',\n 'signal nonlinear_interference amplified_spontaneous_emission')\n", (3112, 3189), False, 'from collections import namedtuple\n'), ((3200, 3274), 'collections.namedtuple', 'namedtuple', (['"""Channel"""', '"""channel_number frequency baud_rate roll_off power"""'], {}), "('Channel', 'channel_number frequency baud_rate roll_off power')\n", (3210, 3274), False, 'from collections import namedtuple\n'), ((3537, 3577), 'raman.utilities.raised_cosine_comb', 'ut.raised_cosine_comb', (['f_eval', '*carriers'], {}), '(f_eval, *carriers)\n', (3558, 3577), True, 'import raman.utilities as ut\n'), ((3598, 3610), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3606, 3610), True, 'import numpy as np\n'), ((3725, 3775), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected_psd', 'psd'], {'rtol': '(1e-05)'}), '(expected_psd, psd, rtol=1e-05)\n', (3744, 3775), True, 'from numpy import testing as npt\n'), ((1966, 1983), 'numpy.ones', 'np.ones', (['n_slices'], {}), '(n_slices)\n', (1973, 1983), True, 'import numpy as np\n'), ((2217, 2238), 'numpy.ones', 'np.ones', (['num_channels'], {}), '(num_channels)\n', (2224, 2238), True, 'import numpy as np\n'), ((3672, 3719), 'numpy.append', 'np.append', (['expected_psd', '[pch / symbol_rate, 0]'], {}), '(expected_psd, [pch / symbol_rate, 0])\n', (3681, 3719), True, 'import numpy as np\n')] |
import os
import numpy as np
import tensorflow as tf
from librosa import effects
from tacotron.models import create_model
from tacotron.utils.text import text_to_sequence, sequence_to_text
from tacotron.utils import plot
from datasets import audio
from datetime import datetime
import sounddevice as sd
import pyaudio
import wave
from infolog import log
class Synthesizer:
def load(self, checkpoint_path, hparams, gta=False, model_name='Tacotron'):
log('Constructing model: %s' % model_name)
inputs = tf.placeholder(tf.int32, [1, None], 'inputs')
input_lengths = tf.placeholder(tf.int32, [1], 'input_lengths')
targets = tf.placeholder(tf.float32, [1, None, hparams.num_mels], 'mel_targets')
with tf.variable_scope('model') as scope:
self.model = create_model(model_name, hparams)
if gta:
self.model.initialize(inputs, input_lengths, targets, gta=gta)
else:
self.model.initialize(inputs, input_lengths)
self.mel_outputs = self.model.mel_outputs
self.alignment = self.model.alignments[0]
self.gta = gta
self._hparams = hparams
log('Loading checkpoint: %s' % checkpoint_path)
self.session = tf.Session()
self.session.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(self.session, checkpoint_path)
# mel_filename = synth.synthesize(text, i+1, eval_dir, log_dir, None)
def synthesize(self, text, index, out_dir, log_dir, mel_filename):
hparams = self._hparams
cleaner_names = [x.strip() for x in hparams.cleaners.split(',')]
seq = text_to_sequence(text, cleaner_names)
print(text)
print(seq)
text_converted = sequence_to_text(seq)
print(text_converted)
feed_dict = {
self.model.inputs: [np.asarray(seq, dtype=np.int32)],
self.model.input_lengths: np.asarray([len(seq)], dtype=np.int32),
}
if self.gta:
# feed_dict[self.model.mel_targets] = np.load(mel_filename).reshape(1, -1, 80)
feed_dict[self.model.mel_targets] = np.load(mel_filename).reshape(1, -1, 40)
if self.gta or not hparams.predict_linear:
mels, alignment = self.session.run([self.mel_outputs, self.alignment], feed_dict=feed_dict)
else:
linear, mels, alignment = self.session.run([self.linear_outputs, self.mel_outputs, self.alignment],
feed_dict=feed_dict)
linear = linear.reshape(-1, hparams.num_freq)
mels = mels.reshape(-1, hparams.num_mels) # Thanks to @imdatsolak for pointing this out
# convert checkpoint to frozen model
minimal_graph = tf.graph_util.convert_variables_to_constants(self.session, self.session.graph_def,
["model/inference/add"])
tf.train.write_graph(minimal_graph, '.', 'inference_model.pb', as_text=False)
npy_data = mels.reshape((-1,))
f32name = os.path.join(out_dir, 'mels/feature-{}.f32'.format(index)) # by jiang
npy_data.tofile(f32name) # by jiang
return
| [
"numpy.load",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"numpy.asarray",
"tensorflow.Session",
"tensorflow.variable_scope",
"infolog.log",
"tensorflow.placeholder",
"tensorflow.train.write_graph",
"tensorflow.graph_util.convert_variables_to_constants",
"tacotron.utils.... | [((454, 496), 'infolog.log', 'log', (["('Constructing model: %s' % model_name)"], {}), "('Constructing model: %s' % model_name)\n", (457, 496), False, 'from infolog import log\n'), ((508, 553), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[1, None]', '"""inputs"""'], {}), "(tf.int32, [1, None], 'inputs')\n", (522, 553), True, 'import tensorflow as tf\n'), ((572, 618), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[1]', '"""input_lengths"""'], {}), "(tf.int32, [1], 'input_lengths')\n", (586, 618), True, 'import tensorflow as tf\n'), ((631, 701), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[1, None, hparams.num_mels]', '"""mel_targets"""'], {}), "(tf.float32, [1, None, hparams.num_mels], 'mel_targets')\n", (645, 701), True, 'import tensorflow as tf\n'), ((1069, 1116), 'infolog.log', 'log', (["('Loading checkpoint: %s' % checkpoint_path)"], {}), "('Loading checkpoint: %s' % checkpoint_path)\n", (1072, 1116), False, 'from infolog import log\n'), ((1134, 1146), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1144, 1146), True, 'import tensorflow as tf\n'), ((1211, 1227), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1225, 1227), True, 'import tensorflow as tf\n'), ((1517, 1554), 'tacotron.utils.text.text_to_sequence', 'text_to_sequence', (['text', 'cleaner_names'], {}), '(text, cleaner_names)\n', (1533, 1554), False, 'from tacotron.utils.text import text_to_sequence, sequence_to_text\n'), ((1601, 1622), 'tacotron.utils.text.sequence_to_text', 'sequence_to_text', (['seq'], {}), '(seq)\n', (1617, 1622), False, 'from tacotron.utils.text import text_to_sequence, sequence_to_text\n'), ((2459, 2571), 'tensorflow.graph_util.convert_variables_to_constants', 'tf.graph_util.convert_variables_to_constants', (['self.session', 'self.session.graph_def', "['model/inference/add']"], {}), "(self.session, self.session.\n graph_def, ['model/inference/add'])\n", (2503, 2571), True, 'import tensorflow as tf\n'), ((2587, 2664), 'tensorflow.train.write_graph', 'tf.train.write_graph', (['minimal_graph', '"""."""', '"""inference_model.pb"""'], {'as_text': '(False)'}), "(minimal_graph, '.', 'inference_model.pb', as_text=False)\n", (2607, 2664), True, 'import tensorflow as tf\n'), ((709, 735), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {}), "('model')\n", (726, 735), True, 'import tensorflow as tf\n'), ((762, 795), 'tacotron.models.create_model', 'create_model', (['model_name', 'hparams'], {}), '(model_name, hparams)\n', (774, 795), False, 'from tacotron.models import create_model\n'), ((1166, 1199), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1197, 1199), True, 'import tensorflow as tf\n'), ((1686, 1717), 'numpy.asarray', 'np.asarray', (['seq'], {'dtype': 'np.int32'}), '(seq, dtype=np.int32)\n', (1696, 1717), True, 'import numpy as np\n'), ((1930, 1951), 'numpy.load', 'np.load', (['mel_filename'], {}), '(mel_filename)\n', (1937, 1951), True, 'import numpy as np\n')] |
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ingest.
"""
import collections
import math
import os
import tempfile
from absl import flags
from absl.testing import absltest
import numpy as np
import scipy.io.wavfile
import scipy.signal
from telluride_decoding import ingest
import tensorflow.compat.v2 as tf
class IngestTest(absltest.TestCase):
def setUp(self):
super(IngestTest, self).setUp()
self._test_dir = os.path.join(
flags.FLAGS.test_srcdir, '__main__',
'test_data/')
def test_brain_signal(self):
# Test to make sure fix_offset works with 1d signals.
test_name = 'test_name'
test_source = 'test_source'
test_sr = 4
test_data = np.arange(10)
s = ingest.BrainSignal(test_name, test_data, test_sr, test_source)
self.assertEqual(s.name, test_name)
self.assertEqual(s.data_type, test_source)
self.assertEqual(s.sr, test_sr)
self.assertTrue(np.all(np.reshape(test_data, (-1, 1)) == s.signal))
s.fix_offset(1)
self.assertEqual(s.signal[0], 4)
self.assertEqual(s.signal[-1], 9)
# Test to make sure fix_offset works with 2d signals.
s = ingest.BrainSignal('test', np.reshape(np.arange(20), (10, -1)), 4)
s.fix_offset(1)
self.assertLen(s.signal.shape, 2)
self.assertEqual(s.signal[0, 0], 8)
self.assertEqual(s.signal[0, 1], 9)
# Test some of the parameter checking.
with self.assertRaises(ValueError):
s = ingest.BrainSignal(42, test_data, test_sr, test_source)
def test_memory_brain_data_file(self):
one_data = np.arange(10) + 100
two_data = np.arange(10) + 200
channel_data = {'one': one_data,
'two': two_data}
test_sr = 4
df = ingest.MemoryBrainDataFile(channel_data, test_sr)
self.assertEqual(set(df.signal_names), set(channel_data.keys()))
self.assertEqual(df.signal_fs('one'), test_sr)
self.assertEqual(df.signal_fs('two'), test_sr)
self.assertTrue(np.all(df.signal_values('one') == one_data))
self.assertTrue(np.all(df.signal_values('two') == two_data))
def test_brain_data_file_edf_ingest(self):
test_file_name = 'sample.edf'
data_file = ingest.EdfBrainDataFile(test_file_name)
self.assertEqual(data_file.filename, test_file_name)
data_file.load_all_data(self._test_dir)
self.assertLen(data_file.signal_names, 103)
test_channel_name = u'Snore' # Just a random channel
self.assertIn(test_channel_name, data_file.signal_names)
self.assertEqual(data_file.signal_fs('TRIG'), 512.0)
self.assertEqual(data_file.signal_values('TRIG').shape[0], 33792)
def test_brain_trial(self):
trial_name = 'meg/subj01_1ksamples.wav'
trial = ingest.BrainTrial(trial_name)
self.assertEqual(trial.trial_name, trial_name.split('.')[0])
trial.load_sound(trial_name, sound_dir=self._test_dir)
brain_data_file_object = ingest.EdfBrainDataFile('sample.edf')
trial.load_brain_data(self._test_dir, brain_data_file_object)
print(trial.summary_string)
summary = trial.summary_string()
self.assertIn('103 EEG channels', summary)
self.assertIn('with 66s of eeg data', summary)
self.assertIn('1.00006s of audio data', summary)
channels = [c for c in trial.iterate_brain_channels()]
self.assertLen(channels, 103)
self.assertIn('TRIG', [c.name for c in channels])
scalp_channel_names = ('TRIG, Fp2, F3, F4, F7, F8, C3, C4, T7, T8, P3, P4, '
'P7, P8, O1, O2').split(', ')
self.assertNotIn('eeg', trial.model_features)
trial.assemble_brain_data(scalp_channel_names)
self.assertIn('eeg', trial.model_features)
self.assertLen(trial.model_features['eeg'][0, :],
len(scalp_channel_names))
print('trial.model_features channel 0:',
trial.model_features['eeg'][:100, 0])
print('trial.model_features channel last:',
trial.model_features['eeg'][:100, -1])
tf_dir = tempfile.mkdtemp()
tf.io.gfile.makedirs(os.path.join(tf_dir, 'meg'))
tf_file = trial.write_data_as_tfrecords(tf_dir)
feature_dict = ingest.discover_feature_shapes(tf_file)
print('Feature dict is:', feature_dict)
self.assertIn('eeg', feature_dict)
self.assertEqual(feature_dict['eeg'].shape, [len(scalp_channel_names),])
(_, errors) = ingest.count_tfrecords(tf_file)
self.assertEqual(errors, 0)
with self.assertRaises(ValueError):
trial.assemble_brain_data('TRIG, FOO, BAR')
with self.assertRaises(ValueError):
trial.assemble_brain_data(['TRIG', 'TRIG', 'F3'])
def test_mean_std(self):
a = np.random.randn(3, 5)
b = np.random.randn(3, 5)
data_list = [a, b]
mean, std = ingest.find_mean_std(data_list, columnwise=False)
both_arrays = np.concatenate((np.reshape(a, (-1,)),
np.reshape(b, (-1,))), axis=0)
self.assertAlmostEqual(mean, np.mean(both_arrays))
self.assertAlmostEqual(std, np.std(both_arrays))
data_list = [ingest.normalize_data(a, mean, std),
ingest.normalize_data(b, mean, std)]
mean, std = ingest.find_mean_std(data_list)
self.assertAlmostEqual(mean, 0.0)
self.assertAlmostEqual(std, 1.0)
def test_mean_std_columnwise(self):
a = np.random.randn(3, 5)
b = np.random.randn(3, 5)
data_list = [a, b]
mean, std = ingest.find_mean_std(data_list, columnwise=True)
both_arrays = np.concatenate((a, b), axis=0)
true_mean = np.mean(both_arrays, axis=0, keepdims=True)
true_std = np.std(both_arrays, axis=0, keepdims=True)
np.testing.assert_allclose(true_mean[0], mean[0])
np.testing.assert_allclose(true_std[0], std[0])
data_list = [ingest.normalize_data(a, mean, std),
ingest.normalize_data(b, mean, std)]
mean, std = ingest.find_mean_std(data_list, columnwise=True)
np.testing.assert_allclose(mean[0], np.zeros_like(mean[0]), atol=1e-8)
np.testing.assert_allclose(std[0], np.ones_like(std[0]))
def test_find_temporal_offset_via_linear_regression(self):
test_shift = 1.3
audio_times = np.arange(0, 5, 1)
eeg_times = audio_times + test_shift
eeg_times[0] = math.pi # Screw up time for first data point
estimated_time, _ = ingest.find_temporal_offset_via_linear_regression(
audio_times, eeg_times)
self.assertAlmostEqual(estimated_time, test_shift, places=5)
def test_find_temporal_offset_via_histogram(self):
# Generate a bunch of random triggers, shift them, and see if the histogram
# algorithm produces the right answer.
num_triggers = 10
test_shift = 1.42
atriggers = np.random.random(num_triggers)
etriggers = atriggers + test_shift
atriggers[0] = math.pi
num_triggers = 10
atriggers = np.random.random(num_triggers)
etriggers = atriggers + 1.42
mode = ingest.find_temporal_offset_via_mode_histogram(atriggers, etriggers,
fs=100)
self.assertAlmostEqual(mode, test_shift, delta=0.01)
def test_remove_close_times(self):
trigger_width = 0.1
onsets = np.array([1.1, 2.4, 3.5, 6.7, 25.8, 30.4, 87.2, 90.2])
offsets = onsets + trigger_width
times = np.sort(np.concatenate((onsets, offsets)))
times = ingest.remove_close_times(times, min_time=trigger_width*2)
self.assertEqual(np.sum(onsets-times), 0)
def test_brain_experiment(self):
one_data = np.arange(10) + 100
two_data = np.arange(10) + 200
channel_data = {'one': one_data,
'two': two_data}
test_sr = 4
df = ingest.MemoryBrainDataFile(channel_data, test_sr)
sound_filename = 'subj01_1ksamples.wav'
trial_name = ingest.BrainExperiment.delete_suffix(sound_filename, '.wav')
trial_dict = {trial_name: [sound_filename, df]}
test_dir = os.path.join(self._test_dir, 'meg')
experiment = ingest.BrainExperiment(trial_dict,
test_dir, test_dir)
experiment.load_all_data(test_dir, test_dir)
summary = experiment.summary()
self.assertIn('Found 1 trials', summary)
self.assertIn('Trial subj01_1ksamples: 2 EEG channels with 2.5s of '
'eeg data', summary)
experiment.z_score_all_data()
def test_brain_memory_experiment(self):
fs = 16000
audio_len = 2*fs
audio_data = np.random.randn(audio_len, 1)
frame_sr = 100
eeg_len = 2*frame_sr
channel_one = np.arange(eeg_len) # Use ints for easier debugging
channel_two = np.arange(eeg_len) + 200
eeg_data = collections.OrderedDict((('C1', channel_one),
('C2', channel_two)))
df = ingest.MemoryBrainDataFile(eeg_data, frame_sr)
trial_two_name = 'trial_2'
experiment_dict = {trial_two_name:
[{'audio_data': audio_data, 'audio_sr': fs}, df],
}
experiment = ingest.BrainExperiment(experiment_dict,
self._test_dir, self._test_dir,
frame_rate=frame_sr)
self.assertTrue(experiment)
experiment.load_all_data(self._test_dir, self._test_dir)
summary = experiment.summary()
self.assertIn('Found 1 trials', summary)
self.assertIn('Trial trial_2: 2 EEG channels with 2s of eeg data', summary)
for trial in experiment.iterate_trials():
trial.assemble_brain_data(list(eeg_data.keys()))
# Master copy of EEG data has moved from brain_data to model_features dict
brain_data = trial.model_features['eeg']
self.assertEqual(brain_data.shape, (eeg_len, 2))
tmp_dir = flags.FLAGS.test_tmpdir or '/tmp'
all_ingested_files = experiment.write_all_data(tmp_dir)
self.assertLen(all_ingested_files, 1)
tf_file = os.path.join(tmp_dir, trial_two_name + '.tfrecords')
(_, error) = ingest.count_tfrecords(tf_file)
self.assertEqual(error, 0)
file_data = ingest.read_tfrecords(tf_file)
self.assertIn('eeg', file_data)
np.testing.assert_allclose(file_data['eeg'],
np.hstack((np.reshape(channel_one[:eeg_len],
(-1, 1)),
np.reshape(channel_two[:eeg_len],
(-1, 1)))))
# Test like above, but include the eeg offset correction.
def test_brain_memory_experiment2(self):
fs = 16000
audio_len = fs
audio_data = np.random.randn(audio_len, 1)
frame_sr = 100
channel_one = np.arange(2*frame_sr) # Use ints for easier debugging
channel_two = np.arange(2*frame_sr) + 200
eeg_data = collections.OrderedDict((('C1', channel_one),
('C2', channel_two)))
df = ingest.MemoryBrainDataFile(eeg_data, frame_sr)
trial_two_name = 'trial_2'
experiment_dict = {trial_two_name:
[{'audio_data': audio_data, 'audio_sr': fs}, df],
}
experiment = ingest.BrainExperiment(experiment_dict,
self._test_dir, self._test_dir,
frame_rate=frame_sr)
self.assertTrue(experiment)
experiment.load_all_data(self._test_dir, self._test_dir)
summary = experiment.summary()
self.assertIn('Found 1 trials', summary)
self.assertIn('Trial trial_2: 2 EEG channels with 2s of eeg data', summary)
for trial in experiment.iterate_trials():
trial.fix_eeg_offset(1.0)
trial.assemble_brain_data(list(eeg_data.keys()))
# Master copy of EEG data has moved from brain_data to model_features dict
brain_data = trial.model_features['eeg']
# Now the eeg size is shorter, due to fix_eeg_offset above.
self.assertEqual(brain_data.shape, (frame_sr, 2))
tmp_dir = '/tmp'
all_ingested_files = experiment.write_all_data(tmp_dir)
self.assertLen(all_ingested_files, 1)
tf_file = os.path.join(tmp_dir, trial_two_name + '.tfrecords')
(_, error) = ingest.count_tfrecords(tf_file)
self.assertEqual(error, 0)
file_data = ingest.read_tfrecords(tf_file)
print('Read in data and found keys:', list(file_data.keys()))
self.assertIn('eeg', file_data)
np.testing.assert_allclose(file_data['eeg'],
np.hstack((np.reshape(channel_one[frame_sr:],
(-1, 1)),
np.reshape(channel_two[frame_sr:],
(-1, 1)))))
def test_local_file_copy(self):
sound_filename = 'tapestry.wav'
full_filename = os.path.join(self._test_dir, sound_filename)
with ingest.LocalCopy(full_filename) as fn:
sound_fs, sound_data = scipy.io.wavfile.read(fn)
self.assertEqual(sound_fs, 16000)
self.assertEqual(sound_data.shape[0], 50381)
def test_tfrecord_transform(self):
num_samples = 5
trial_name = 'Trial 01'
positive_data = np.reshape(np.arange(num_samples, dtype=np.float32),
(-1, 1))
negative_data = np.reshape(-np.arange(num_samples, dtype=np.float32),
(-1, 1))
brain_trial = ingest.BrainTrial(trial_name)
brain_trial.add_model_feature('positive', positive_data)
brain_trial.add_model_feature('negative', negative_data)
tf_dir = os.path.join(os.environ.get('TMPDIR') or '/tmp',
'tfrecord_transform')
tf.io.gfile.makedirs(tf_dir)
brain_trial.write_data_as_tfrecords(tf_dir)
input_file = trial_name + '.tfrecords'
file_data = ingest.read_tfrecords(os.path.join(tf_dir, input_file))
self.assertCountEqual(('positive', 'negative'), file_data.keys())
np.testing.assert_equal(file_data['positive'], positive_data)
np.testing.assert_equal(file_data['negative'], negative_data)
new_trial_name = 'New ' + trial_name
new_file = ingest.transform_tfrecords(os.path.join(tf_dir, input_file),
tf_dir, new_trial_name,
[lambda d: ('two', 2*d['positive']),
])
self.assertEqual(new_file, os.path.join(tf_dir,
new_trial_name + '.tfrecords'))
file_data = ingest.read_tfrecords(new_file)
self.assertCountEqual(('positive', 'negative', 'two'), file_data.keys())
np.testing.assert_equal(file_data['positive'], positive_data)
np.testing.assert_equal(file_data['negative'], negative_data)
np.testing.assert_equal(file_data['two'], 2*positive_data)
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
absltest.main()
| [
"absl.testing.absltest.main",
"numpy.sum",
"tensorflow.compat.v2.io.gfile.makedirs",
"telluride_decoding.ingest.BrainTrial",
"numpy.mean",
"numpy.arange",
"telluride_decoding.ingest.LocalCopy",
"os.path.join",
"telluride_decoding.ingest.BrainExperiment.delete_suffix",
"telluride_decoding.ingest.di... | [((15245, 15278), 'tensorflow.compat.v2.compat.v1.enable_v2_behavior', 'tf.compat.v1.enable_v2_behavior', ([], {}), '()\n', (15276, 15278), True, 'import tensorflow.compat.v2 as tf\n'), ((15281, 15296), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (15294, 15296), False, 'from absl.testing import absltest\n'), ((1051, 1114), 'os.path.join', 'os.path.join', (['flags.FLAGS.test_srcdir', '"""__main__"""', '"""test_data/"""'], {}), "(flags.FLAGS.test_srcdir, '__main__', 'test_data/')\n", (1063, 1114), False, 'import os\n'), ((1314, 1327), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1323, 1327), True, 'import numpy as np\n'), ((1336, 1398), 'telluride_decoding.ingest.BrainSignal', 'ingest.BrainSignal', (['test_name', 'test_data', 'test_sr', 'test_source'], {}), '(test_name, test_data, test_sr, test_source)\n', (1354, 1398), False, 'from telluride_decoding import ingest\n'), ((2323, 2372), 'telluride_decoding.ingest.MemoryBrainDataFile', 'ingest.MemoryBrainDataFile', (['channel_data', 'test_sr'], {}), '(channel_data, test_sr)\n', (2349, 2372), False, 'from telluride_decoding import ingest\n'), ((2770, 2809), 'telluride_decoding.ingest.EdfBrainDataFile', 'ingest.EdfBrainDataFile', (['test_file_name'], {}), '(test_file_name)\n', (2793, 2809), False, 'from telluride_decoding import ingest\n'), ((3294, 3323), 'telluride_decoding.ingest.BrainTrial', 'ingest.BrainTrial', (['trial_name'], {}), '(trial_name)\n', (3311, 3323), False, 'from telluride_decoding import ingest\n'), ((3478, 3515), 'telluride_decoding.ingest.EdfBrainDataFile', 'ingest.EdfBrainDataFile', (['"""sample.edf"""'], {}), "('sample.edf')\n", (3501, 3515), False, 'from telluride_decoding import ingest\n'), ((4541, 4559), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (4557, 4559), False, 'import tempfile\n'), ((4686, 4725), 'telluride_decoding.ingest.discover_feature_shapes', 'ingest.discover_feature_shapes', (['tf_file'], {}), '(tf_file)\n', (4716, 4725), False, 'from telluride_decoding import ingest\n'), ((4905, 4936), 'telluride_decoding.ingest.count_tfrecords', 'ingest.count_tfrecords', (['tf_file'], {}), '(tf_file)\n', (4927, 4936), False, 'from telluride_decoding import ingest\n'), ((5192, 5213), 'numpy.random.randn', 'np.random.randn', (['(3)', '(5)'], {}), '(3, 5)\n', (5207, 5213), True, 'import numpy as np\n'), ((5222, 5243), 'numpy.random.randn', 'np.random.randn', (['(3)', '(5)'], {}), '(3, 5)\n', (5237, 5243), True, 'import numpy as np\n'), ((5283, 5332), 'telluride_decoding.ingest.find_mean_std', 'ingest.find_mean_std', (['data_list'], {'columnwise': '(False)'}), '(data_list, columnwise=False)\n', (5303, 5332), False, 'from telluride_decoding import ingest\n'), ((5687, 5718), 'telluride_decoding.ingest.find_mean_std', 'ingest.find_mean_std', (['data_list'], {}), '(data_list)\n', (5707, 5718), False, 'from telluride_decoding import ingest\n'), ((5841, 5862), 'numpy.random.randn', 'np.random.randn', (['(3)', '(5)'], {}), '(3, 5)\n', (5856, 5862), True, 'import numpy as np\n'), ((5871, 5892), 'numpy.random.randn', 'np.random.randn', (['(3)', '(5)'], {}), '(3, 5)\n', (5886, 5892), True, 'import numpy as np\n'), ((5932, 5980), 'telluride_decoding.ingest.find_mean_std', 'ingest.find_mean_std', (['data_list'], {'columnwise': '(True)'}), '(data_list, columnwise=True)\n', (5952, 5980), False, 'from telluride_decoding import ingest\n'), ((5999, 6029), 'numpy.concatenate', 'np.concatenate', (['(a, b)'], {'axis': '(0)'}), '((a, b), axis=0)\n', (6013, 6029), True, 'import numpy as np\n'), ((6046, 6089), 'numpy.mean', 'np.mean', (['both_arrays'], {'axis': '(0)', 'keepdims': '(True)'}), '(both_arrays, axis=0, keepdims=True)\n', (6053, 6089), True, 'import numpy as np\n'), ((6105, 6147), 'numpy.std', 'np.std', (['both_arrays'], {'axis': '(0)', 'keepdims': '(True)'}), '(both_arrays, axis=0, keepdims=True)\n', (6111, 6147), True, 'import numpy as np\n'), ((6152, 6201), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['true_mean[0]', 'mean[0]'], {}), '(true_mean[0], mean[0])\n', (6178, 6201), True, 'import numpy as np\n'), ((6206, 6253), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['true_std[0]', 'std[0]'], {}), '(true_std[0], std[0])\n', (6232, 6253), True, 'import numpy as np\n'), ((6378, 6426), 'telluride_decoding.ingest.find_mean_std', 'ingest.find_mean_std', (['data_list'], {'columnwise': '(True)'}), '(data_list, columnwise=True)\n', (6398, 6426), False, 'from telluride_decoding import ingest\n'), ((6664, 6682), 'numpy.arange', 'np.arange', (['(0)', '(5)', '(1)'], {}), '(0, 5, 1)\n', (6673, 6682), True, 'import numpy as np\n'), ((6814, 6887), 'telluride_decoding.ingest.find_temporal_offset_via_linear_regression', 'ingest.find_temporal_offset_via_linear_regression', (['audio_times', 'eeg_times'], {}), '(audio_times, eeg_times)\n', (6863, 6887), False, 'from telluride_decoding import ingest\n'), ((7199, 7229), 'numpy.random.random', 'np.random.random', (['num_triggers'], {}), '(num_triggers)\n', (7215, 7229), True, 'import numpy as np\n'), ((7334, 7364), 'numpy.random.random', 'np.random.random', (['num_triggers'], {}), '(num_triggers)\n', (7350, 7364), True, 'import numpy as np\n'), ((7409, 7485), 'telluride_decoding.ingest.find_temporal_offset_via_mode_histogram', 'ingest.find_temporal_offset_via_mode_histogram', (['atriggers', 'etriggers'], {'fs': '(100)'}), '(atriggers, etriggers, fs=100)\n', (7455, 7485), False, 'from telluride_decoding import ingest\n'), ((7676, 7730), 'numpy.array', 'np.array', (['[1.1, 2.4, 3.5, 6.7, 25.8, 30.4, 87.2, 90.2]'], {}), '([1.1, 2.4, 3.5, 6.7, 25.8, 30.4, 87.2, 90.2])\n', (7684, 7730), True, 'import numpy as np\n'), ((7835, 7895), 'telluride_decoding.ingest.remove_close_times', 'ingest.remove_close_times', (['times'], {'min_time': '(trigger_width * 2)'}), '(times, min_time=trigger_width * 2)\n', (7860, 7895), False, 'from telluride_decoding import ingest\n'), ((8145, 8194), 'telluride_decoding.ingest.MemoryBrainDataFile', 'ingest.MemoryBrainDataFile', (['channel_data', 'test_sr'], {}), '(channel_data, test_sr)\n', (8171, 8194), False, 'from telluride_decoding import ingest\n'), ((8256, 8316), 'telluride_decoding.ingest.BrainExperiment.delete_suffix', 'ingest.BrainExperiment.delete_suffix', (['sound_filename', '""".wav"""'], {}), "(sound_filename, '.wav')\n", (8292, 8316), False, 'from telluride_decoding import ingest\n'), ((8384, 8419), 'os.path.join', 'os.path.join', (['self._test_dir', '"""meg"""'], {}), "(self._test_dir, 'meg')\n", (8396, 8419), False, 'import os\n'), ((8437, 8491), 'telluride_decoding.ingest.BrainExperiment', 'ingest.BrainExperiment', (['trial_dict', 'test_dir', 'test_dir'], {}), '(trial_dict, test_dir, test_dir)\n', (8459, 8491), False, 'from telluride_decoding import ingest\n'), ((8903, 8932), 'numpy.random.randn', 'np.random.randn', (['audio_len', '(1)'], {}), '(audio_len, 1)\n', (8918, 8932), True, 'import numpy as np\n'), ((8996, 9014), 'numpy.arange', 'np.arange', (['eeg_len'], {}), '(eeg_len)\n', (9005, 9014), True, 'import numpy as np\n'), ((9108, 9175), 'collections.OrderedDict', 'collections.OrderedDict', (["(('C1', channel_one), ('C2', channel_two))"], {}), "((('C1', channel_one), ('C2', channel_two)))\n", (9131, 9175), False, 'import collections\n'), ((9225, 9271), 'telluride_decoding.ingest.MemoryBrainDataFile', 'ingest.MemoryBrainDataFile', (['eeg_data', 'frame_sr'], {}), '(eeg_data, frame_sr)\n', (9251, 9271), False, 'from telluride_decoding import ingest\n'), ((9461, 9557), 'telluride_decoding.ingest.BrainExperiment', 'ingest.BrainExperiment', (['experiment_dict', 'self._test_dir', 'self._test_dir'], {'frame_rate': 'frame_sr'}), '(experiment_dict, self._test_dir, self._test_dir,\n frame_rate=frame_sr)\n', (9483, 9557), False, 'from telluride_decoding import ingest\n'), ((10335, 10387), 'os.path.join', 'os.path.join', (['tmp_dir', "(trial_two_name + '.tfrecords')"], {}), "(tmp_dir, trial_two_name + '.tfrecords')\n", (10347, 10387), False, 'import os\n'), ((10406, 10437), 'telluride_decoding.ingest.count_tfrecords', 'ingest.count_tfrecords', (['tf_file'], {}), '(tf_file)\n', (10428, 10437), False, 'from telluride_decoding import ingest\n'), ((10486, 10516), 'telluride_decoding.ingest.read_tfrecords', 'ingest.read_tfrecords', (['tf_file'], {}), '(tf_file)\n', (10507, 10516), False, 'from telluride_decoding import ingest\n'), ((11038, 11067), 'numpy.random.randn', 'np.random.randn', (['audio_len', '(1)'], {}), '(audio_len, 1)\n', (11053, 11067), True, 'import numpy as np\n'), ((11106, 11129), 'numpy.arange', 'np.arange', (['(2 * frame_sr)'], {}), '(2 * frame_sr)\n', (11115, 11129), True, 'import numpy as np\n'), ((11224, 11291), 'collections.OrderedDict', 'collections.OrderedDict', (["(('C1', channel_one), ('C2', channel_two))"], {}), "((('C1', channel_one), ('C2', channel_two)))\n", (11247, 11291), False, 'import collections\n'), ((11341, 11387), 'telluride_decoding.ingest.MemoryBrainDataFile', 'ingest.MemoryBrainDataFile', (['eeg_data', 'frame_sr'], {}), '(eeg_data, frame_sr)\n', (11367, 11387), False, 'from telluride_decoding import ingest\n'), ((11577, 11673), 'telluride_decoding.ingest.BrainExperiment', 'ingest.BrainExperiment', (['experiment_dict', 'self._test_dir', 'self._test_dir'], {'frame_rate': 'frame_sr'}), '(experiment_dict, self._test_dir, self._test_dir,\n frame_rate=frame_sr)\n', (11599, 11673), False, 'from telluride_decoding import ingest\n'), ((12523, 12575), 'os.path.join', 'os.path.join', (['tmp_dir', "(trial_two_name + '.tfrecords')"], {}), "(tmp_dir, trial_two_name + '.tfrecords')\n", (12535, 12575), False, 'import os\n'), ((12594, 12625), 'telluride_decoding.ingest.count_tfrecords', 'ingest.count_tfrecords', (['tf_file'], {}), '(tf_file)\n', (12616, 12625), False, 'from telluride_decoding import ingest\n'), ((12674, 12704), 'telluride_decoding.ingest.read_tfrecords', 'ingest.read_tfrecords', (['tf_file'], {}), '(tf_file)\n', (12695, 12704), False, 'from telluride_decoding import ingest\n'), ((13230, 13274), 'os.path.join', 'os.path.join', (['self._test_dir', 'sound_filename'], {}), '(self._test_dir, sound_filename)\n', (13242, 13274), False, 'import os\n'), ((13796, 13825), 'telluride_decoding.ingest.BrainTrial', 'ingest.BrainTrial', (['trial_name'], {}), '(trial_name)\n', (13813, 13825), False, 'from telluride_decoding import ingest\n'), ((14063, 14091), 'tensorflow.compat.v2.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['tf_dir'], {}), '(tf_dir)\n', (14083, 14091), True, 'import tensorflow.compat.v2 as tf\n'), ((14330, 14391), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["file_data['positive']", 'positive_data'], {}), "(file_data['positive'], positive_data)\n", (14353, 14391), True, 'import numpy as np\n'), ((14396, 14457), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["file_data['negative']", 'negative_data'], {}), "(file_data['negative'], negative_data)\n", (14419, 14457), True, 'import numpy as np\n'), ((14910, 14941), 'telluride_decoding.ingest.read_tfrecords', 'ingest.read_tfrecords', (['new_file'], {}), '(new_file)\n', (14931, 14941), False, 'from telluride_decoding import ingest\n'), ((15024, 15085), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["file_data['positive']", 'positive_data'], {}), "(file_data['positive'], positive_data)\n", (15047, 15085), True, 'import numpy as np\n'), ((15090, 15151), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["file_data['negative']", 'negative_data'], {}), "(file_data['negative'], negative_data)\n", (15113, 15151), True, 'import numpy as np\n'), ((15156, 15216), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["file_data['two']", '(2 * positive_data)'], {}), "(file_data['two'], 2 * positive_data)\n", (15179, 15216), True, 'import numpy as np\n'), ((2056, 2111), 'telluride_decoding.ingest.BrainSignal', 'ingest.BrainSignal', (['(42)', 'test_data', 'test_sr', 'test_source'], {}), '(42, test_data, test_sr, test_source)\n', (2074, 2111), False, 'from telluride_decoding import ingest\n'), ((2169, 2182), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (2178, 2182), True, 'import numpy as np\n'), ((2204, 2217), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (2213, 2217), True, 'import numpy as np\n'), ((4585, 4612), 'os.path.join', 'os.path.join', (['tf_dir', '"""meg"""'], {}), "(tf_dir, 'meg')\n", (4597, 4612), False, 'import os\n'), ((5487, 5507), 'numpy.mean', 'np.mean', (['both_arrays'], {}), '(both_arrays)\n', (5494, 5507), True, 'import numpy as np\n'), ((5541, 5560), 'numpy.std', 'np.std', (['both_arrays'], {}), '(both_arrays)\n', (5547, 5560), True, 'import numpy as np\n'), ((5580, 5615), 'telluride_decoding.ingest.normalize_data', 'ingest.normalize_data', (['a', 'mean', 'std'], {}), '(a, mean, std)\n', (5601, 5615), False, 'from telluride_decoding import ingest\n'), ((5634, 5669), 'telluride_decoding.ingest.normalize_data', 'ingest.normalize_data', (['b', 'mean', 'std'], {}), '(b, mean, std)\n', (5655, 5669), False, 'from telluride_decoding import ingest\n'), ((6271, 6306), 'telluride_decoding.ingest.normalize_data', 'ingest.normalize_data', (['a', 'mean', 'std'], {}), '(a, mean, std)\n', (6292, 6306), False, 'from telluride_decoding import ingest\n'), ((6325, 6360), 'telluride_decoding.ingest.normalize_data', 'ingest.normalize_data', (['b', 'mean', 'std'], {}), '(b, mean, std)\n', (6346, 6360), False, 'from telluride_decoding import ingest\n'), ((6467, 6489), 'numpy.zeros_like', 'np.zeros_like', (['mean[0]'], {}), '(mean[0])\n', (6480, 6489), True, 'import numpy as np\n'), ((6541, 6561), 'numpy.ones_like', 'np.ones_like', (['std[0]'], {}), '(std[0])\n', (6553, 6561), True, 'import numpy as np\n'), ((7788, 7821), 'numpy.concatenate', 'np.concatenate', (['(onsets, offsets)'], {}), '((onsets, offsets))\n', (7802, 7821), True, 'import numpy as np\n'), ((7915, 7937), 'numpy.sum', 'np.sum', (['(onsets - times)'], {}), '(onsets - times)\n', (7921, 7937), True, 'import numpy as np\n'), ((7991, 8004), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (8000, 8004), True, 'import numpy as np\n'), ((8026, 8039), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (8035, 8039), True, 'import numpy as np\n'), ((9068, 9086), 'numpy.arange', 'np.arange', (['eeg_len'], {}), '(eeg_len)\n', (9077, 9086), True, 'import numpy as np\n'), ((11181, 11204), 'numpy.arange', 'np.arange', (['(2 * frame_sr)'], {}), '(2 * frame_sr)\n', (11190, 11204), True, 'import numpy as np\n'), ((13284, 13315), 'telluride_decoding.ingest.LocalCopy', 'ingest.LocalCopy', (['full_filename'], {}), '(full_filename)\n', (13300, 13315), False, 'from telluride_decoding import ingest\n'), ((13582, 13622), 'numpy.arange', 'np.arange', (['num_samples'], {'dtype': 'np.float32'}), '(num_samples, dtype=np.float32)\n', (13591, 13622), True, 'import numpy as np\n'), ((14222, 14254), 'os.path.join', 'os.path.join', (['tf_dir', 'input_file'], {}), '(tf_dir, input_file)\n', (14234, 14254), False, 'import os\n'), ((14542, 14574), 'os.path.join', 'os.path.join', (['tf_dir', 'input_file'], {}), '(tf_dir, input_file)\n', (14554, 14574), False, 'import os\n'), ((14797, 14848), 'os.path.join', 'os.path.join', (['tf_dir', "(new_trial_name + '.tfrecords')"], {}), "(tf_dir, new_trial_name + '.tfrecords')\n", (14809, 14848), False, 'import os\n'), ((1795, 1808), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (1804, 1808), True, 'import numpy as np\n'), ((5367, 5387), 'numpy.reshape', 'np.reshape', (['a', '(-1,)'], {}), '(a, (-1,))\n', (5377, 5387), True, 'import numpy as np\n'), ((5423, 5443), 'numpy.reshape', 'np.reshape', (['b', '(-1,)'], {}), '(b, (-1,))\n', (5433, 5443), True, 'import numpy as np\n'), ((13696, 13736), 'numpy.arange', 'np.arange', (['num_samples'], {'dtype': 'np.float32'}), '(num_samples, dtype=np.float32)\n', (13705, 13736), True, 'import numpy as np\n'), ((13975, 13999), 'os.environ.get', 'os.environ.get', (['"""TMPDIR"""'], {}), "('TMPDIR')\n", (13989, 13999), False, 'import os\n'), ((1549, 1579), 'numpy.reshape', 'np.reshape', (['test_data', '(-1, 1)'], {}), '(test_data, (-1, 1))\n', (1559, 1579), True, 'import numpy as np\n'), ((10645, 10687), 'numpy.reshape', 'np.reshape', (['channel_one[:eeg_len]', '(-1, 1)'], {}), '(channel_one[:eeg_len], (-1, 1))\n', (10655, 10687), True, 'import numpy as np\n'), ((10784, 10826), 'numpy.reshape', 'np.reshape', (['channel_two[:eeg_len]', '(-1, 1)'], {}), '(channel_two[:eeg_len], (-1, 1))\n', (10794, 10826), True, 'import numpy as np\n'), ((12899, 12942), 'numpy.reshape', 'np.reshape', (['channel_one[frame_sr:]', '(-1, 1)'], {}), '(channel_one[frame_sr:], (-1, 1))\n', (12909, 12942), True, 'import numpy as np\n'), ((13039, 13082), 'numpy.reshape', 'np.reshape', (['channel_two[frame_sr:]', '(-1, 1)'], {}), '(channel_two[frame_sr:], (-1, 1))\n', (13049, 13082), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Built-in imports
import sys
# Own imports
import baxter_essentials.baxter_class as bc
# General module imports
import numpy as np
import rospy
import baxter_interface
from std_msgs.msg import (
String
)
from geometry_msgs.msg import (
Pose
)
from sensor_msgs.msg import (
JointState
)
from baxter_core_msgs.msg import (
JointCommand
)
class NodeProportionalControlFromFaceCoordinates:
"""
ROS Node that subscribes to the face_coordinates publisher and enables the
baxter_interface control method to apply a proportional-control action
command to move the Baxter's right limb to specific position-orientation.
:param rospy_rate: integer that defines the frequency for the ROS nodes.
"""
def __init__(self, rospy_rate):
self.define_rotation_matrix()
self.rate = rospy.Rate(rospy_rate)
# Initial right limb matrix as "default" value
self.tm_w0_tool = bc.BaxterClass().TM_right_limb_home
# Initial current_position_vector as "default" value
self.current_position_vector = np.array([0, 0, 0]).reshape((3, 1))
_fsm_sub = rospy.Subscriber(
'user/fsm',
String,
self.update_fsm_callback,
queue_size=1
)
self.state = "stop"
_face_coordinates_sub = rospy.Subscriber(
'user/face_coordinates',
Pose,
self.update_coordinates_callback,
queue_size=1
)
_joint_states_sub = rospy.Subscriber(
'/robot/joint_states',
JointState,
self.joint_states_callback,
queue_size=1
)
self._pub_joint_control_values = rospy.Publisher(
'user/joint_control_values',
JointCommand,
queue_size=1
)
def update_fsm_callback(self, std_string):
"""
Recieve the callback function from the current node that publishes the
fsm as a "String" std_msgs. This enables the node to keep updating the
Finite State Machine values for executing the "open_loop_control".
:param geometry_pose: current fsm message with a standard
"String" format from "std_msgs.msg".
"""
self.state = std_string.data
print(self.state)
def joint_states_callback(self, event):
"""
Callback to get current joint_states angles for Baxter robot.
"""
baxter_angles = event.position
self.joint_states = {
'right': [
baxter_angles[11],
baxter_angles[12],
baxter_angles[9],
baxter_angles[10],
baxter_angles[13],
baxter_angles[14],
baxter_angles[15]
],
'left': [
baxter_angles[4],
baxter_angles[5],
baxter_angles[2],
baxter_angles[3],
baxter_angles[6],
baxter_angles[7],
baxter_angles[8]
]
}
def define_rotation_matrix(self):
"""
This method defines a constant attribute for the right limb correct
orientation in the feeding process. This matrix was found by empiric
experiments with Baxter.
"""
self.ROTATION_MATRIX = np.array(
[
[-0.04483493, 0.99897278, -0.00657433],
[-0.15247979, -0.01334699, -0.98821646],
[-0.98728909, -0.04330416, 0.15292157]
]
)
def update_coordinates_callback(self, geometry_pose):
"""
Recieve the callback function from the current node that publishes the
face_coordinates as a "Pose" geometry_message. This callback calls the
necessary methods to apply the proportional-control algorithms based
on BaxterInterface class.
:param geometry_pose: current face_coordinates message with a standard
"Pose" format from "geometry_msgs.msg".
"""
self.current_position_vector = np.array(
[
geometry_pose.position.x,
geometry_pose.position.y,
geometry_pose.position.z
]
).reshape((3, 1))
self.tm_w0_tool = self.create_tm_structure_from_pose_and_rotation()
print(self.tm_w0_tool)
def create_tm_structure_from_pose_and_rotation(self):
"""
Create a Homogeneous Transformation Matrix from a rotation matrix and
a position vector.
:returns: transformation matrix numpy array of size (4x4).
"""
# Create a matrix (3x4) from rotation matrix and position vector
tm_top_part = np.concatenate(
[self.ROTATION_MATRIX, self.current_position_vector], 1
)
# Add the lower part array to the transformation matrix
lower_part_array = np.array([[0, 0, 0, 1]])
return np.concatenate([tm_top_part, lower_part_array], 0)
def update_control_joint_values(self):
"""
Move Baxter's right limb based on a complete transformation matrix
using BaxterInterface class with a proportional control.
"""
# Get current joint values from Baxter right limb
b1 = bc.BaxterClass()
self.control_joints_values = b1.ipk(self.tm_w0_tool, 'right', 'up')
def execute_control(self):
"""
Execute main control loop for Baxter's right arm.
"""
while not rospy.is_shutdown():
if (self.state == "open_loop"):
if (self.current_position_vector[0] != 0):
print("Face detected")
self.update_control_joint_values()
self.publish_control_joint_commands()
# self.rate.sleep()
else:
print("Face NOT detected")
else:
self.publish_current_joint_angles()
def publish_control_joint_commands(self):
"""
Publish 'JointCommand' topic with the desired joint-values for each of
Baxter's right limb based on the open loop control.
"""
cmd = JointCommand()
cmd.mode = JointCommand.POSITION_MODE
cmd.names = [
"right_s0",
"right_s1",
"right_e0",
"right_e1",
"right_w0",
"right_w1",
"right_w2"
]
cmd.command = self.control_joints_values
self._pub_joint_control_values.publish(cmd)
def publish_current_joint_angles(self):
"""
Publish 'JointCommand' topic with the current joint-values for each of
Baxter's right limb based on the open loop control.
"""
cmd = JointCommand()
cmd.mode = JointCommand.POSITION_MODE
cmd.names = [
"right_s0",
"right_s1",
"right_e0",
"right_e1",
"right_w0",
"right_w1",
"right_w2"
]
cmd.command = self.joint_states["right"]
self._pub_joint_control_values.publish(cmd)
def main():
print("Initializing node... ")
rospy.init_node('open_loop_control')
main_node_proportional_control = NodeProportionalControlFromFaceCoordinates(
100)
main_node_proportional_control.execute_control()
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"rospy.Subscriber",
"rospy.Publisher",
"rospy.Rate",
"baxter_essentials.baxter_class.BaxterClass",
"rospy.is_shutdown",
"numpy.array",
"rospy.init_node",
"baxter_core_msgs.msg.JointCommand",
"numpy.concatenate"
] | [((7243, 7279), 'rospy.init_node', 'rospy.init_node', (['"""open_loop_control"""'], {}), "('open_loop_control')\n", (7258, 7279), False, 'import rospy\n'), ((857, 879), 'rospy.Rate', 'rospy.Rate', (['rospy_rate'], {}), '(rospy_rate)\n', (867, 879), False, 'import rospy\n'), ((1155, 1231), 'rospy.Subscriber', 'rospy.Subscriber', (['"""user/fsm"""', 'String', 'self.update_fsm_callback'], {'queue_size': '(1)'}), "('user/fsm', String, self.update_fsm_callback, queue_size=1)\n", (1171, 1231), False, 'import rospy\n'), ((1351, 1451), 'rospy.Subscriber', 'rospy.Subscriber', (['"""user/face_coordinates"""', 'Pose', 'self.update_coordinates_callback'], {'queue_size': '(1)'}), "('user/face_coordinates', Pose, self.\n update_coordinates_callback, queue_size=1)\n", (1367, 1451), False, 'import rospy\n'), ((1534, 1632), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/robot/joint_states"""', 'JointState', 'self.joint_states_callback'], {'queue_size': '(1)'}), "('/robot/joint_states', JointState, self.\n joint_states_callback, queue_size=1)\n", (1550, 1632), False, 'import rospy\n'), ((1728, 1800), 'rospy.Publisher', 'rospy.Publisher', (['"""user/joint_control_values"""', 'JointCommand'], {'queue_size': '(1)'}), "('user/joint_control_values', JointCommand, queue_size=1)\n", (1743, 1800), False, 'import rospy\n'), ((3388, 3523), 'numpy.array', 'np.array', (['[[-0.04483493, 0.99897278, -0.00657433], [-0.15247979, -0.01334699, -\n 0.98821646], [-0.98728909, -0.04330416, 0.15292157]]'], {}), '([[-0.04483493, 0.99897278, -0.00657433], [-0.15247979, -0.01334699,\n -0.98821646], [-0.98728909, -0.04330416, 0.15292157]])\n', (3396, 3523), True, 'import numpy as np\n'), ((4780, 4851), 'numpy.concatenate', 'np.concatenate', (['[self.ROTATION_MATRIX, self.current_position_vector]', '(1)'], {}), '([self.ROTATION_MATRIX, self.current_position_vector], 1)\n', (4794, 4851), True, 'import numpy as np\n'), ((4966, 4990), 'numpy.array', 'np.array', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (4974, 4990), True, 'import numpy as np\n'), ((5007, 5057), 'numpy.concatenate', 'np.concatenate', (['[tm_top_part, lower_part_array]', '(0)'], {}), '([tm_top_part, lower_part_array], 0)\n', (5021, 5057), True, 'import numpy as np\n'), ((5338, 5354), 'baxter_essentials.baxter_class.BaxterClass', 'bc.BaxterClass', ([], {}), '()\n', (5352, 5354), True, 'import baxter_essentials.baxter_class as bc\n'), ((6246, 6260), 'baxter_core_msgs.msg.JointCommand', 'JointCommand', ([], {}), '()\n', (6258, 6260), False, 'from baxter_core_msgs.msg import JointCommand\n'), ((6829, 6843), 'baxter_core_msgs.msg.JointCommand', 'JointCommand', ([], {}), '()\n', (6841, 6843), False, 'from baxter_core_msgs.msg import JointCommand\n'), ((962, 978), 'baxter_essentials.baxter_class.BaxterClass', 'bc.BaxterClass', ([], {}), '()\n', (976, 978), True, 'import baxter_essentials.baxter_class as bc\n'), ((5563, 5582), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (5580, 5582), False, 'import rospy\n'), ((1099, 1118), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (1107, 1118), True, 'import numpy as np\n'), ((4133, 4226), 'numpy.array', 'np.array', (['[geometry_pose.position.x, geometry_pose.position.y, geometry_pose.position.z]'], {}), '([geometry_pose.position.x, geometry_pose.position.y, geometry_pose\n .position.z])\n', (4141, 4226), True, 'import numpy as np\n')] |
# Created byMartin.cz
# Copyright (c) <NAME>. All rights reserved.
import numpy
import pero
import perrot
# prepare data
x_data = numpy.linspace(-2 * numpy.pi, 2 * numpy.pi, 100)
y_data = numpy.sin(x_data)
x_data /= numpy.pi
# init plot
plot = perrot.plot.Plot(
x_axis_title="pi",
y_axis_title="f(x)")
# add series
series = perrot.plot.Profile(
x=x_data,
y=y_data,
base=0,
title="sin(x)",
steps=pero.LINE_STEP.NONE,
marker_line_color="white",
show_area=True)
plot.plot(series)
# show plot
plot.zoom()
plot.view("Profile Series")
| [
"perrot.plot.Profile",
"numpy.sin",
"numpy.linspace",
"perrot.plot.Plot"
] | [((135, 183), 'numpy.linspace', 'numpy.linspace', (['(-2 * numpy.pi)', '(2 * numpy.pi)', '(100)'], {}), '(-2 * numpy.pi, 2 * numpy.pi, 100)\n', (149, 183), False, 'import numpy\n'), ((193, 210), 'numpy.sin', 'numpy.sin', (['x_data'], {}), '(x_data)\n', (202, 210), False, 'import numpy\n'), ((250, 306), 'perrot.plot.Plot', 'perrot.plot.Plot', ([], {'x_axis_title': '"""pi"""', 'y_axis_title': '"""f(x)"""'}), "(x_axis_title='pi', y_axis_title='f(x)')\n", (266, 306), False, 'import perrot\n'), ((339, 477), 'perrot.plot.Profile', 'perrot.plot.Profile', ([], {'x': 'x_data', 'y': 'y_data', 'base': '(0)', 'title': '"""sin(x)"""', 'steps': 'pero.LINE_STEP.NONE', 'marker_line_color': '"""white"""', 'show_area': '(True)'}), "(x=x_data, y=y_data, base=0, title='sin(x)', steps=pero.\n LINE_STEP.NONE, marker_line_color='white', show_area=True)\n", (358, 477), False, 'import perrot\n')] |
import cv2
import os
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import colorsys
from Utils.GeometryUtils import compute_two_points_angle, get_coordinates_of_rotated_box
current_directory = os.path.dirname(__file__)
candidate_font = '田氏颜体大字库2.0.ttf'
annotate_font = ImageFont.truetype(os.path.join(current_directory, candidate_font), size=21)
def generate_colors(_color_num):
"""
生成一定数量的颜色
Args:
_color_num: 颜色的数量
Returns: 所有生成的颜色
"""
to_return_palette = []
# 最多50种颜色
for i in range(_color_num):
hue_value = (i % 50) * 0.02
(r, g, b) = colorsys.hsv_to_rgb(hue_value, 1, 1)
to_return_palette.append([int(b * 255), int(g * 255), int(r * 255)])
return to_return_palette
def annotation_multi_horizon_line_on_image(_img, _y_list, _line_color, _line_thickness=4):
to_return_img = _img.copy()
for m_y in _y_list:
cv2.line(to_return_img, (0, m_y), (_img.shape[0] - 1, m_y), _line_color, thickness=_line_thickness)
return cv2.addWeighted(to_return_img, 0.5, _img, 0.5, 0)
def annotation_horizon_line_on_image(_img, _y, _line_color, _line_thickness=4):
return annotation_multi_horizon_line_on_image(_img, [_y], _line_color, _line_thickness)
def annotate_bounding_box_on_image(_img, _boxes, _specific_color, _with_index=False, _thickness=4):
to_return_img = _img.copy()
if len(_boxes) > 0:
for m_box_index, m_box in enumerate(_boxes):
cv2.rectangle(to_return_img, (m_box[0], m_box[1]), (m_box[2], m_box[3]), _specific_color,
thickness=_thickness)
if _with_index:
cv2.putText(to_return_img,
f'{m_box_index}',
(m_box[0] + 5, m_box[1] + 5),
cv2.FONT_HERSHEY_SIMPLEX, 1,
_specific_color
)
return to_return_img
def annotate_circle_on_image(_to_annotate_image, _points, _specific_color, _radius=8, _thickness=2):
"""
在图中标注多个圆
Args:
_to_annotate_image: 待标注图像
_points: 待标注圆的中心(同opencv配置)
_specific_color: 圆的颜色(同opencv配置)
_radius: 圆的半径(同opencv配置)
_thickness: 圆的厚度(同opencv配置)
"""
h, w = _to_annotate_image.shape[:2]
if len(_points) > 0:
for m_point in _points:
cv2.circle(_to_annotate_image, (int(m_point[0] * w), int(m_point[1] * h)), _radius, _specific_color,
thickness=_thickness)
def annotate_polygon_on_image(_img, _polygon, _specific_color, _is_transparent=True):
"""
在图中标注多边形区域
:param _img: 待标注图像
:param _polygon: 多边形区域
:param _specific_color: 标注颜色
:param _is_transparent: 是否透明
:return: 标注完成的图像
"""
to_return_img = _img.copy()
h, w = _img.shape[:2]
if isinstance(_polygon, list):
_polygon = (np.array(_polygon) * (w, h)).astype(np.int32)
cv2.fillPoly(to_return_img, [_polygon, ], _specific_color)
if _is_transparent:
to_return_img = cv2.addWeighted(to_return_img, 0.5, _img, 0.5, 0)
return to_return_img
def __annotation_text_on_image(_img, _text_start_position, _text_color, _text):
img_pil = Image.fromarray(_img)
to_draw_image = ImageDraw.Draw(img_pil)
to_draw_image.multiline_text(_text_start_position, _text, fill=_text_color, font=annotate_font)
to_return_img = np.asarray(img_pil)
return to_return_img
def annotation_angle_on_image(_img, _start_point, _middle_point, _end_point, _line_color, _text_color, _angle):
"""
在图上画一个角
:param _img: 需要标注的图
:param _start_point: 起点(顺时针)
:param _middle_point: 中点
:param _end_point: 终点(顺时针)
:param _line_color: 线条颜色
:param _text_color: 文本颜色
:param _angle: 当前角度
:return:
"""
to_return_img = _img.copy()
cv2.line(to_return_img, (_start_point[0], _start_point[1]), (_middle_point[0], _middle_point[1]), _line_color, 2)
cv2.line(to_return_img, (_middle_point[0], _middle_point[1]), (_end_point[0], _end_point[1]), _line_color, 2)
cv2.circle(to_return_img, (_middle_point[0], _middle_point[1]), 3, (0, 255, 0), 3)
angle_1 = compute_two_points_angle(_middle_point, _start_point)
angle_2 = compute_two_points_angle(_middle_point, _end_point)
start_angle = 0
if angle_2 < angle_1:
angle_2 = angle_2 + 360 - angle_1
start_angle = angle_1
angle_1 = 0
cv2.ellipse(to_return_img, (_middle_point[0], _middle_point[1]), (15, 15), start_angle, angle_1, angle_2,
_line_color, 2)
to_return_img = __annotation_text_on_image(to_return_img, (_middle_point[0] + 5, _middle_point[1] + 5),
_text_color, str(_angle))
return to_return_img
def annotation_multi_horizon_width(_img, _y, _x_list, _line_color, _text_color, _text_list,
_thickness=1,
_with_arrow=True):
"""
横向标注多个宽度
:param _img: 需要标注的图像
:param _y: 当前直线所在高度
:param _x_list: 所有x的列表
:param _line_color: 线条颜色(bgr)
:param _text_color: 文本颜色(bgr)
:param _text_list: 每个区间需要显示的文本
:param _thickness: 线条粗细
:param _with_arrow: 线条两端是否带箭头
:return: 标注后的图像
"""
assert len(_x_list) - 1 == len(_text_list), '线段数与字符串数不匹配'
to_return_img = _img.copy()
# 需要绘制:
# 1. 双向箭头线
# 2. 箭头到头的直线
# 3. 线条对应的文字
for m_index, (m_start_x, m_end_x, m_text) in enumerate(zip(_x_list[:-1], _x_list[1:], _text_list)):
if _with_arrow:
cv2.arrowedLine(to_return_img, (m_start_x, _y), (m_end_x, _y), _line_color, thickness=_thickness)
cv2.arrowedLine(to_return_img, (m_end_x, _y), (m_start_x, _y), _line_color, thickness=_thickness)
else:
cv2.line(to_return_img, (m_start_x, _y), (m_end_x, _y), _line_color, thickness=_thickness)
cv2.line(to_return_img, (m_end_x, _y), (m_start_x, _y), _line_color, thickness=_thickness)
# 文本在最左侧
text_start_x = m_start_x
text_start_y = _y + (10 if m_index % 2 == 0 else -annotate_font.size - 10)
to_return_img = __annotation_text_on_image(to_return_img, (text_start_x, text_start_y), _text_color, m_text)
for m_x in _x_list:
cv2.line(to_return_img, (m_x, _y - 12), (m_x, _y + 12), _line_color, thickness=_thickness)
return to_return_img
def annotation_horizon_width(_img, _y, _start_x, _end_x, _line_color, _text_color, _text):
"""
横向标注宽度
:param _img: 需要标注的图像
:param _y: 当前直线所在高度
:param _start_x: 起始x
:param _end_x: 结束x
:param _line_color: 线条颜色(bgr)
:param _text_color: 文本颜色(bgr)
:param _text: 需要显示的文本
:return: 标注后的图像
"""
return annotation_multi_horizon_width(_img, _y, [_start_x, _end_x], _line_color, _text_color, [_text])
def annotation_multi_vertical_height(_img, _x, _y_list, _line_color, _text_color, _text_list,
_thickness=1,
_with_arrow=True):
"""
纵向标注多个高度
:param _img: 需要标注的图像
:param _x: 当前直线所在宽度
:param _y_list: 所有y的列表
:param _line_color: 线条颜色(bgr)
:param _text_color: 文本颜色(bgr)
:param _text_list: 所有需要显示的文本
:param _thickness: 线条粗细
:param _with_arrow: 线条两端是否带箭头
:return: 标注后的图像
"""
assert len(_y_list) - 1 == len(_text_list), '线段数与字符串数不匹配'
to_return_img = _img.copy()
# 需要绘制:
# 1. 双向箭头线
# 2. 箭头到头的直线
# 3. 线条对应的文字
for m_start_y, m_end_y, m_text in zip(_y_list[:-1], _y_list[1:], _text_list):
if _with_arrow:
cv2.arrowedLine(to_return_img, (_x, m_start_y), (_x, m_end_y), _line_color, thickness=_thickness)
cv2.arrowedLine(to_return_img, (_x, m_end_y), (_x, m_start_y), _line_color, thickness=_thickness)
else:
cv2.line(to_return_img, (_x, m_start_y), (_x, m_end_y), _line_color, thickness=_thickness)
cv2.line(to_return_img, (_x, m_end_y), (_x, m_start_y), _line_color, thickness=_thickness)
text_start_x = _x + 10
text_start_y = m_start_y + (m_end_y - m_start_y) // 2
to_return_img = __annotation_text_on_image(to_return_img, (text_start_x, text_start_y), _text_color, m_text)
for m_y in _y_list:
cv2.line(to_return_img, (_x - 12, m_y), (_x + 12, m_y), _line_color, thickness=_thickness)
return to_return_img
def annotation_vertical_height(_img, _x, _start_y, _end_y, _line_color, _text_color, _text):
return annotation_multi_vertical_height(_img, _x, [_start_y, _end_y], _line_color, _text_color, [_text, ])
def draw_rotated_bbox(_to_draw_image: np.ndarray, _rotated_box: dict, _color: tuple, _thickness: int):
"""
在图中标注旋转矩形
Args:
_to_draw_image: 待标注图像
_rotated_box: 待标注的旋转矩形(包含center_x,center_y,box_width,box_height,degree)
_color: 标注颜色(同opencv配置)
_thickness: 边框粗细(同opencv配置)
"""
rotated_points = get_coordinates_of_rotated_box(_to_draw_image, _rotated_box)
cv2.polylines(_to_draw_image, [rotated_points, ], True, _color, _thickness)
def annotate_segmentation(
_to_draw_image,
_segmentation_result,
_background_index=0,
):
"""
标注分割区域
Args:
_to_draw_image: 需要标注的图像
_segmentation_result: 分割的结果
_background_index: 背景部分的下标
Returns: 标注完成的图
"""
h, w = _to_draw_image.shape[:2]
if _to_draw_image.shape[:2] != _segmentation_result.shape[:2]:
_segmentation_result = cv2.resize(_segmentation_result, (w, h), cv2.INTER_NEAREST)
distinct_index = np.sort(np.unique(_segmentation_result), axis=None)
candidate_colors = generate_colors(len(distinct_index))
mask_result_image = _to_draw_image.copy()
for m_index, m_candidate_color in zip(distinct_index.tolist(), candidate_colors):
if m_index == _background_index:
continue
m_index_segment_result = _segmentation_result == m_index
np.putmask(mask_result_image, np.repeat(m_index_segment_result[..., None], 3, axis=-1), m_candidate_color)
add_weighted_result_image = cv2.addWeighted(_to_draw_image, 0.5, mask_result_image, 0.5, 0)
return add_weighted_result_image
def annotate_detect_rotated_bbox_and_text_result(
_to_draw_image,
_rotated_box_list, _text_list,
_box_color, _box_thickness
):
"""
标注rotated box和文本到图片上
Args:
_to_draw_image: 待标注图像
_rotated_box_list: box列表
_text_list: 文本列表
_box_color: box的颜色
_box_thickness: box的边框粗细
Returns: 标注好的图像
"""
to_return_image = _to_draw_image.copy()
h, w = to_return_image.shape[:2]
for m_box, m_text in zip(_rotated_box_list, _text_list):
draw_rotated_bbox(to_return_image, m_box, _box_color, _box_thickness)
m_box_center_x = int(m_box['center_x'] * w)
m_box_center_y = int(m_box['center_y'] * h)
to_return_image = __annotation_text_on_image(to_return_image,
(m_box_center_x, m_box_center_y),
(0, 255, 0),
m_text['text'])
return to_return_image
| [
"cv2.fillPoly",
"cv2.ellipse",
"cv2.rectangle",
"os.path.join",
"numpy.unique",
"cv2.line",
"os.path.dirname",
"Utils.GeometryUtils.compute_two_points_angle",
"PIL.ImageDraw.Draw",
"Utils.GeometryUtils.get_coordinates_of_rotated_box",
"cv2.resize",
"numpy.repeat",
"cv2.circle",
"numpy.asar... | [((210, 235), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (225, 235), False, 'import os\n'), ((305, 352), 'os.path.join', 'os.path.join', (['current_directory', 'candidate_font'], {}), '(current_directory, candidate_font)\n', (317, 352), False, 'import os\n'), ((1031, 1080), 'cv2.addWeighted', 'cv2.addWeighted', (['to_return_img', '(0.5)', '_img', '(0.5)', '(0)'], {}), '(to_return_img, 0.5, _img, 0.5, 0)\n', (1046, 1080), False, 'import cv2\n'), ((2980, 3036), 'cv2.fillPoly', 'cv2.fillPoly', (['to_return_img', '[_polygon]', '_specific_color'], {}), '(to_return_img, [_polygon], _specific_color)\n', (2992, 3036), False, 'import cv2\n'), ((3258, 3279), 'PIL.Image.fromarray', 'Image.fromarray', (['_img'], {}), '(_img)\n', (3273, 3279), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3300, 3323), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img_pil'], {}), '(img_pil)\n', (3314, 3323), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3444, 3463), 'numpy.asarray', 'np.asarray', (['img_pil'], {}), '(img_pil)\n', (3454, 3463), True, 'import numpy as np\n'), ((3899, 4017), 'cv2.line', 'cv2.line', (['to_return_img', '(_start_point[0], _start_point[1])', '(_middle_point[0], _middle_point[1])', '_line_color', '(2)'], {}), '(to_return_img, (_start_point[0], _start_point[1]), (_middle_point[\n 0], _middle_point[1]), _line_color, 2)\n', (3907, 4017), False, 'import cv2\n'), ((4017, 4131), 'cv2.line', 'cv2.line', (['to_return_img', '(_middle_point[0], _middle_point[1])', '(_end_point[0], _end_point[1])', '_line_color', '(2)'], {}), '(to_return_img, (_middle_point[0], _middle_point[1]), (_end_point[0\n ], _end_point[1]), _line_color, 2)\n', (4025, 4131), False, 'import cv2\n'), ((4131, 4217), 'cv2.circle', 'cv2.circle', (['to_return_img', '(_middle_point[0], _middle_point[1])', '(3)', '(0, 255, 0)', '(3)'], {}), '(to_return_img, (_middle_point[0], _middle_point[1]), 3, (0, 255,\n 0), 3)\n', (4141, 4217), False, 'import cv2\n'), ((4228, 4281), 'Utils.GeometryUtils.compute_two_points_angle', 'compute_two_points_angle', (['_middle_point', '_start_point'], {}), '(_middle_point, _start_point)\n', (4252, 4281), False, 'from Utils.GeometryUtils import compute_two_points_angle, get_coordinates_of_rotated_box\n'), ((4296, 4347), 'Utils.GeometryUtils.compute_two_points_angle', 'compute_two_points_angle', (['_middle_point', '_end_point'], {}), '(_middle_point, _end_point)\n', (4320, 4347), False, 'from Utils.GeometryUtils import compute_two_points_angle, get_coordinates_of_rotated_box\n'), ((4490, 4615), 'cv2.ellipse', 'cv2.ellipse', (['to_return_img', '(_middle_point[0], _middle_point[1])', '(15, 15)', 'start_angle', 'angle_1', 'angle_2', '_line_color', '(2)'], {}), '(to_return_img, (_middle_point[0], _middle_point[1]), (15, 15),\n start_angle, angle_1, angle_2, _line_color, 2)\n', (4501, 4615), False, 'import cv2\n'), ((9048, 9108), 'Utils.GeometryUtils.get_coordinates_of_rotated_box', 'get_coordinates_of_rotated_box', (['_to_draw_image', '_rotated_box'], {}), '(_to_draw_image, _rotated_box)\n', (9078, 9108), False, 'from Utils.GeometryUtils import compute_two_points_angle, get_coordinates_of_rotated_box\n'), ((9113, 9186), 'cv2.polylines', 'cv2.polylines', (['_to_draw_image', '[rotated_points]', '(True)', '_color', '_thickness'], {}), '(_to_draw_image, [rotated_points], True, _color, _thickness)\n', (9126, 9186), False, 'import cv2\n'), ((10210, 10273), 'cv2.addWeighted', 'cv2.addWeighted', (['_to_draw_image', '(0.5)', 'mask_result_image', '(0.5)', '(0)'], {}), '(_to_draw_image, 0.5, mask_result_image, 0.5, 0)\n', (10225, 10273), False, 'import cv2\n'), ((620, 656), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['hue_value', '(1)', '(1)'], {}), '(hue_value, 1, 1)\n', (639, 656), False, 'import colorsys\n'), ((920, 1023), 'cv2.line', 'cv2.line', (['to_return_img', '(0, m_y)', '(_img.shape[0] - 1, m_y)', '_line_color'], {'thickness': '_line_thickness'}), '(to_return_img, (0, m_y), (_img.shape[0] - 1, m_y), _line_color,\n thickness=_line_thickness)\n', (928, 1023), False, 'import cv2\n'), ((3087, 3136), 'cv2.addWeighted', 'cv2.addWeighted', (['to_return_img', '(0.5)', '_img', '(0.5)', '(0)'], {}), '(to_return_img, 0.5, _img, 0.5, 0)\n', (3102, 3136), False, 'import cv2\n'), ((6343, 6437), 'cv2.line', 'cv2.line', (['to_return_img', '(m_x, _y - 12)', '(m_x, _y + 12)', '_line_color'], {'thickness': '_thickness'}), '(to_return_img, (m_x, _y - 12), (m_x, _y + 12), _line_color,\n thickness=_thickness)\n', (6351, 6437), False, 'import cv2\n'), ((8370, 8464), 'cv2.line', 'cv2.line', (['to_return_img', '(_x - 12, m_y)', '(_x + 12, m_y)', '_line_color'], {'thickness': '_thickness'}), '(to_return_img, (_x - 12, m_y), (_x + 12, m_y), _line_color,\n thickness=_thickness)\n', (8378, 8464), False, 'import cv2\n'), ((9611, 9670), 'cv2.resize', 'cv2.resize', (['_segmentation_result', '(w, h)', 'cv2.INTER_NEAREST'], {}), '(_segmentation_result, (w, h), cv2.INTER_NEAREST)\n', (9621, 9670), False, 'import cv2\n'), ((9700, 9731), 'numpy.unique', 'np.unique', (['_segmentation_result'], {}), '(_segmentation_result)\n', (9709, 9731), True, 'import numpy as np\n'), ((1478, 1593), 'cv2.rectangle', 'cv2.rectangle', (['to_return_img', '(m_box[0], m_box[1])', '(m_box[2], m_box[3])', '_specific_color'], {'thickness': '_thickness'}), '(to_return_img, (m_box[0], m_box[1]), (m_box[2], m_box[3]),\n _specific_color, thickness=_thickness)\n', (1491, 1593), False, 'import cv2\n'), ((5633, 5734), 'cv2.arrowedLine', 'cv2.arrowedLine', (['to_return_img', '(m_start_x, _y)', '(m_end_x, _y)', '_line_color'], {'thickness': '_thickness'}), '(to_return_img, (m_start_x, _y), (m_end_x, _y), _line_color,\n thickness=_thickness)\n', (5648, 5734), False, 'import cv2\n'), ((5743, 5844), 'cv2.arrowedLine', 'cv2.arrowedLine', (['to_return_img', '(m_end_x, _y)', '(m_start_x, _y)', '_line_color'], {'thickness': '_thickness'}), '(to_return_img, (m_end_x, _y), (m_start_x, _y), _line_color,\n thickness=_thickness)\n', (5758, 5844), False, 'import cv2\n'), ((5867, 5961), 'cv2.line', 'cv2.line', (['to_return_img', '(m_start_x, _y)', '(m_end_x, _y)', '_line_color'], {'thickness': '_thickness'}), '(to_return_img, (m_start_x, _y), (m_end_x, _y), _line_color,\n thickness=_thickness)\n', (5875, 5961), False, 'import cv2\n'), ((5970, 6064), 'cv2.line', 'cv2.line', (['to_return_img', '(m_end_x, _y)', '(m_start_x, _y)', '_line_color'], {'thickness': '_thickness'}), '(to_return_img, (m_end_x, _y), (m_start_x, _y), _line_color,\n thickness=_thickness)\n', (5978, 6064), False, 'import cv2\n'), ((7700, 7801), 'cv2.arrowedLine', 'cv2.arrowedLine', (['to_return_img', '(_x, m_start_y)', '(_x, m_end_y)', '_line_color'], {'thickness': '_thickness'}), '(to_return_img, (_x, m_start_y), (_x, m_end_y), _line_color,\n thickness=_thickness)\n', (7715, 7801), False, 'import cv2\n'), ((7810, 7911), 'cv2.arrowedLine', 'cv2.arrowedLine', (['to_return_img', '(_x, m_end_y)', '(_x, m_start_y)', '_line_color'], {'thickness': '_thickness'}), '(to_return_img, (_x, m_end_y), (_x, m_start_y), _line_color,\n thickness=_thickness)\n', (7825, 7911), False, 'import cv2\n'), ((7934, 8028), 'cv2.line', 'cv2.line', (['to_return_img', '(_x, m_start_y)', '(_x, m_end_y)', '_line_color'], {'thickness': '_thickness'}), '(to_return_img, (_x, m_start_y), (_x, m_end_y), _line_color,\n thickness=_thickness)\n', (7942, 8028), False, 'import cv2\n'), ((8037, 8131), 'cv2.line', 'cv2.line', (['to_return_img', '(_x, m_end_y)', '(_x, m_start_y)', '_line_color'], {'thickness': '_thickness'}), '(to_return_img, (_x, m_end_y), (_x, m_start_y), _line_color,\n thickness=_thickness)\n', (8045, 8131), False, 'import cv2\n'), ((10101, 10157), 'numpy.repeat', 'np.repeat', (['m_index_segment_result[..., None]', '(3)'], {'axis': '(-1)'}), '(m_index_segment_result[..., None], 3, axis=-1)\n', (10110, 10157), True, 'import numpy as np\n'), ((1660, 1784), 'cv2.putText', 'cv2.putText', (['to_return_img', 'f"""{m_box_index}"""', '(m_box[0] + 5, m_box[1] + 5)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '_specific_color'], {}), "(to_return_img, f'{m_box_index}', (m_box[0] + 5, m_box[1] + 5),\n cv2.FONT_HERSHEY_SIMPLEX, 1, _specific_color)\n", (1671, 1784), False, 'import cv2\n'), ((2930, 2948), 'numpy.array', 'np.array', (['_polygon'], {}), '(_polygon)\n', (2938, 2948), True, 'import numpy as np\n')] |
"""
Bayesian nonparametric samplers
"""
import numpy as np
# CRP samplers
def sample_CRP(N, alpha,d =0):
"""
sample from a Pitman-Yor process via the Chinese Restaraunt process, default
value of d=0 samples from a Dirichlet process
Parameters
-----------
N : scalar, integer
number of samples to return
alpha : scalar > -d
concentration parameter
d : [0,1)
Returns
--------
z : list
list of integers in [0,K] sampled from the CRP,
"""
pi = [1]
z = []
for n in range(N):
# sample from pi
z.append(np.random.choice(len(pi),p=pi))
K = max(z_py) +1
# update counts
counts,e = np.histogram(z_py,bins = np.arange(K+1)-.5)
# append alpha and normalize to a distribution
# denoms = np.append()
pi = np.append(counts - d,alpha + d*K)/(alpha + n +1)
return z
# IBP Sampler
def p_row(p):
"""
sample a binary vector where the ith element is 1 with probability p_i
"""
return np.asarray([np.random.choice([1,0],p=[p_i, 1-p_i]) for p_i in p])
def sample_IBP(N, gamma):
"""
sample from a Pitman-Yor process via the Chinese Restaraunt process
Parameters
-----------
N : scalar, integer
number of samples to return
alpha : scalar > -d
concentration parameter
d : [0,1)
Returns
--------
z : list of lists
list of N binary lists of up to length K sampled from the IBP,
"""
z = []
z_tmp = np.ones(np.random.poisson(gamma))
m = np.zeros(z_tmp.shape)
z.append(z_tmp)
for n in range(1,D):
m += z_tmp
# print(m)
p = m/(n+1)
# print(p)
new = np.random.poisson(gamma/n)
z_tmp = np.concatenate((p_row(p),np.ones(new)))
m = np.concatenate((m,np.zeros(new)))
z.append(z_tmp)
return z
| [
"numpy.zeros",
"numpy.ones",
"numpy.append",
"numpy.arange",
"numpy.random.poisson",
"numpy.random.choice"
] | [((1570, 1591), 'numpy.zeros', 'np.zeros', (['z_tmp.shape'], {}), '(z_tmp.shape)\n', (1578, 1591), True, 'import numpy as np\n'), ((1536, 1560), 'numpy.random.poisson', 'np.random.poisson', (['gamma'], {}), '(gamma)\n', (1553, 1560), True, 'import numpy as np\n'), ((1729, 1757), 'numpy.random.poisson', 'np.random.poisson', (['(gamma / n)'], {}), '(gamma / n)\n', (1746, 1757), True, 'import numpy as np\n'), ((846, 882), 'numpy.append', 'np.append', (['(counts - d)', '(alpha + d * K)'], {}), '(counts - d, alpha + d * K)\n', (855, 882), True, 'import numpy as np\n'), ((1053, 1095), 'numpy.random.choice', 'np.random.choice', (['[1, 0]'], {'p': '[p_i, 1 - p_i]'}), '([1, 0], p=[p_i, 1 - p_i])\n', (1069, 1095), True, 'import numpy as np\n'), ((1797, 1809), 'numpy.ones', 'np.ones', (['new'], {}), '(new)\n', (1804, 1809), True, 'import numpy as np\n'), ((1842, 1855), 'numpy.zeros', 'np.zeros', (['new'], {}), '(new)\n', (1850, 1855), True, 'import numpy as np\n'), ((728, 744), 'numpy.arange', 'np.arange', (['(K + 1)'], {}), '(K + 1)\n', (737, 744), True, 'import numpy as np\n')] |
"""
Here we put some functions that both parsers need
"""
import numpy as np
def log_add(logx,logy):
"""This adds two log-transformed variables,
taking care of the underflow that you usually find when you do this
Arguments:
logx : float
logy : float"""
if logx==None: # This is just a hack so that I can give a not-defined sum
return logy
if logy==None:
return logx
# First, make X the maximum
if (logy > logx):
logx,logy = logy,logx
#temp = logx
#logx = logy
#logy = temp
# How far "down" is logY from logX?
negdiff = logy - logx
if negdiff < -30: # If it's small, we can just ignore logY altogether (it won't make much of a difference)
return logx
# However, in my case I can maybe keep it in because it will just become zero in the sum below.
# Otherwise, use some simple algebra to stay in the log domain
# (i.e. here we use log(X)+log(Y) = log(X)+log(1.0+exp(log(Y)-log(X)))
return logx + np.log(1.0 + np.exp(negdiff))
##### PRINTING #######
def rule2string(lhs,rhs,isCopy=False):
"""
Makes a string from a rule. Used in printing and in making a dict of rules and probs.
Arguments:
lhs : string
rhs : string list
isCopy : boolean indicating a copy rule. Only used in cky_constituent_copy
"""
s = "%s->%s"%(lhs,".".join(rhs))
if isCopy:
s+=".copy"
return s
def grammar2string(grammar):
"""
Prints a grammar as a readable string.
Arguments:
grammar : a simple grammar with no probabilities and no isCopy
"""
s = ""
for (lhs,rhss) in grammar:
for rhs in rhss:
s+=rule2string(lhs,rhs)+"\n"
return s
def print_chart(ch):
"""
Prints any list of lists of lists.
For a given cell, prints the coordinates and contents if it's not empty.
We use this to print the CKY chart. Also works for the backpointers chart.
Arguments:
ch : list list list
"""
print ("### Chart ###")
for i,row in enumerate(ch):
for j,col in enumerate(ch[i]):
if len(ch[i][j])>0:
print ("(%i,%i)"%(i,j),ch[i][j])
print ("### end Chart ###")
def print_backpointers(ch):
g_length = len(ch[0][0])
print ("### Backpointers ###")
for i,row in enumerate(ch):
for j,col in enumerate(ch[i]):
if any(len(ch[i][j][m])>0 for m in range(g_length)) :
print ("(%i,%i)"%(i,j),ch[i][j])
print ("### end Backpointers ###")
####### CHANGE THE GRAMMAR ##########
def make_rule_probs(g,log=False):
"""Given a grammar with rhss (rhs,prob) makes dictionary of log rule probs.
Keys are strings built from rule names.
We use the same method for making keys as is used in the parser in case we want to change it
If log=False, input probs are not log-transformed.
Arguments:
g : category * (category list * float) list list
OR category * (category list * bool * float) list list
log : Bool indicates whether probs are already log(p)
"""
rule_probs={}
for (lhs,rhss) in g:
for rhs in rhss:
if len(rhs)==2:
(rhs,p)=rhs
if not log: p=np.log(p)
rule_probs[rule2string(lhs,rhs)]=p
elif len(rhs)==3:
(rhs,isCopy,p)=rhs
if not log: p=np.log(p)
rule_probs[rule2string(lhs,rhs,isCopy)]=p
return rule_probs
##### DEALING WITH OUTPUTS ######
def find_prob_from_parses(parses,ruleprobs,output_trees=False):
# Find the probability of a particular sentence from its parses.
# I.e. this will be untractable for big guys but I can use it to
# verify my faster implementation (using caching) on the smaller guys.
parse_probs = []
for i,parse in enumerate(parses):
if output_trees:
tree_to_pdf(parse,'output/parse%05i.pdf'%i)
rules = rules_used(parse)
log_ps = map(lambda x: ruleprobs[x],rules)
parseprob = sum(log_ps) # the probability of the parse is simply the sum of the log probabilities of the rules used
parse_probs.append( parseprob )
# Add the probabilities of the parses, using a smart
# bit of algebra to prevent underflow
# (essentially what we are trying to compute is log(exp(X)+exp(Y))).
total_prob = parse_probs[0]
for logp in parse_probs[1:]:
total_prob = log_add(total_prob,logp)
return total_prob
def get_nodes_edges(tree,prefix=""):
# Given a particular tree, define a list of nodes and edges
# so that we can easily plot it later on.
# The prefix is a prefix that we give to node names so that we
# guarantee that they will be unique
(rule,children) = tree
thisnodename = "p%s"%prefix
nodes = [(thisnodename,rule)]
edges = []
for i,child in enumerate(children):
# Take over the nodes and edges from the children
childroot,n,newedges = get_nodes_edges(child,"%i%s"%(i,prefix))
nodes += n
edges += newedges
edges += [(thisnodename,childroot)]
return thisnodename,nodes,edges
def dot_output(tree):
# Make a little dot output for the particular tree
_,nodes,edges = get_nodes_edges(tree)
outp = ""
outp += "digraph tree {\n node [shape = none; height=0; width=0];\n edge [dir=none];"
for (nodename,nodelabel) in nodes:
outp += "\t%s [label=\"%s\"]\n"%(nodename,nodelabel)
for (from_node,to_node) in edges:
outp += "\t%s -> %s\n"%(from_node,to_node)
outp+="}\n"
return outp
def tree_to_pdf(tree,fname):
# Makes a dot graph and outputs to pdf
outp = dot_output(tree)
f = open('.tmp.dot','w')
f.write(outp)
f.close()
import subprocess
subprocess.call(['dot','.tmp.dot','-Tpdf','-o',fname])
# subprocess.call(['rm','.tmp.dot']) # clean up my mess
return
def tree_to_png(tree,fname):
# Makes a dot graph and outputs to pdf
outp = dot_output(tree)
f = open('.tmp.dot','w')
f.write(outp)
f.close()
import subprocess
subprocess.call(['dot','.tmp.dot','-Tpng','-o',fname])
# subprocess.call(['rm','.tmp.dot']) # clean up my mess
return
| [
"numpy.log",
"subprocess.call",
"numpy.exp"
] | [((5866, 5924), 'subprocess.call', 'subprocess.call', (["['dot', '.tmp.dot', '-Tpdf', '-o', fname]"], {}), "(['dot', '.tmp.dot', '-Tpdf', '-o', fname])\n", (5881, 5924), False, 'import subprocess\n'), ((6192, 6250), 'subprocess.call', 'subprocess.call', (["['dot', '.tmp.dot', '-Tpng', '-o', fname]"], {}), "(['dot', '.tmp.dot', '-Tpng', '-o', fname])\n", (6207, 6250), False, 'import subprocess\n'), ((1039, 1054), 'numpy.exp', 'np.exp', (['negdiff'], {}), '(negdiff)\n', (1045, 1054), True, 'import numpy as np\n'), ((3275, 3284), 'numpy.log', 'np.log', (['p'], {}), '(p)\n', (3281, 3284), True, 'import numpy as np\n'), ((3431, 3440), 'numpy.log', 'np.log', (['p'], {}), '(p)\n', (3437, 3440), True, 'import numpy as np\n')] |
import os
import numpy as np
import shutil
import json
import tensorflow as tf
def create_lines_dataset(data_source,
preprocessor,
destination_folder='lines_dataset',
size=10000, train_fraction=0.6, val_fraction=0.2):
temp_folder = os.path.join(destination_folder, 'extracted_lines')
extract_lines(data_source, temp_folder, size, train_fraction, val_fraction)
train_path = os.path.join(temp_folder, 'train')
val_path = os.path.join(temp_folder, 'validation')
test_path = os.path.join(temp_folder, 'test')
preprocessor.fit(train_path, val_path, test_path)
preprocessor_path = os.path.join(destination_folder, 'preprocessing.json')
preprocessor.save(preprocessor_path)
split_folders = ['train', 'validation', 'test']
for folder in split_folders:
src_dir = os.path.join(temp_folder, folder)
dest_dir = os.path.join(destination_folder, folder)
preprocess_images(src_dir, dest_dir, preprocessor)
char_table_file_name = 'character_table.txt'
char_table_src = os.path.join(temp_folder, char_table_file_name)
char_table_dest = os.path.join(destination_folder, char_table_file_name)
shutil.copyfile(char_table_src, char_table_dest)
def preprocess_images(source, destination, preprocessor):
from keras_htr.generators import CompiledDataset
shutil.copytree(source, destination)
ds = CompiledDataset(destination)
for image_path, _ in ds:
img = preprocessor.process(image_path)
img.save(image_path)
meta_path = os.path.join(destination, 'meta.json')
os.remove(meta_path)
create_meta_information(destination)
def extract_lines(data_source,
destination_folder='lines_dataset',
size=10000, train_fraction=0.6, val_fraction=0.2):
dest_to_copier = {}
dest_texts = {}
num_created = 0
example_generator = data_source.__iter__()
for triple in split_examples(example_generator, size, train_fraction, val_fraction):
folder_name, file_path, text = triple
split_destination = os.path.join(destination_folder, folder_name)
if folder_name not in dest_to_copier:
dest_to_copier[folder_name] = FileCopier(split_destination)
if split_destination not in dest_texts:
dest_texts[split_destination] = []
copier = dest_to_copier[folder_name]
copier.copy_file(file_path)
dest_texts[split_destination].append(text)
num_created += 1
if num_created % 500 == 0:
completed_percentage = num_created / float(size) * 100
print('Created {} out of {} lines. {} % done'.format(
num_created, size, completed_percentage)
)
for split_folder in dest_texts.keys():
lines_path = os.path.join(split_folder, 'lines.txt')
with open(lines_path, 'w') as f:
for line in dest_texts[split_folder]:
f.write(line + '\n')
print('Creating meta information for {} split folder'.format(split_folder))
create_meta_information(split_folder)
print('Creating a character table')
split_folders = dest_texts.keys()
char_table_lines = create_char_table(split_folders)
char_table_path = os.path.join(destination_folder, 'character_table.txt')
with open(char_table_path, 'w') as f:
f.write(char_table_lines)
class FileCopier:
def __init__(self, folder):
self._folder = folder
if not os.path.exists(self._folder):
os.makedirs(self._folder)
self._num_copied = len(os.listdir(self._folder))
def copy_file(self, obj):
if type(obj) is str:
# obj must be path to image file
file_path = obj
_, ext = os.path.splitext(file_path)
dest = os.path.join(self._folder, str(self._num_copied) + ext)
shutil.copyfile(file_path, dest)
else:
# obj must be Pillow image
dest = os.path.join(self._folder, str(self._num_copied) + '.png')
obj.save(dest)
self._num_copied += 1
return dest
def split_examples(example_generator, size, train_fraction=0.6, val_fraction=0.2):
train_folder = 'train'
val_folder = 'validation'
test_folder = 'test'
folders = [train_folder, val_folder, test_folder]
for count, example in enumerate(example_generator):
if count > size:
break
test_fraction = 1 - train_fraction - val_fraction
pmf = [train_fraction, val_fraction, test_fraction]
destination = np.random.choice(folders, p=pmf)
yield (destination,) + example
def create_meta_information(dataset_path):
widths = []
heights = []
for fname in os.listdir(dataset_path):
_, ext = os.path.splitext(fname)
if ext != '.txt':
image_path = os.path.join(dataset_path, fname)
image = tf.keras.preprocessing.image.load_img(image_path)
widths.append(image.width)
heights.append(image.height)
lines_path = os.path.join(dataset_path, 'lines.txt')
text_lengths = []
with open(lines_path) as f:
for row in f.readlines():
line = row.rstrip('\n')
text_lengths.append(len(line))
max_width = int(np.max(widths))
max_height = int(np.max(heights))
min_width = int(np.min(widths))
min_height = int(np.min(heights))
average_width = int(np.mean(widths))
average_height = int(np.mean(heights))
max_text_length = int(np.max(text_lengths))
num_examples = len(widths)
d = dict(max_width=max_width,
max_height=max_height,
min_width=min_width,
min_height=min_height,
average_width=average_width,
average_height=average_height,
max_text_length=max_text_length,
num_examples=num_examples)
s = json.dumps(d)
meta_path = os.path.join(dataset_path, 'meta.json')
with open(meta_path, 'w') as f:
f.write(s)
def create_char_table(split_folders):
chars = set()
for folder in split_folders:
lines_path = os.path.join(folder, 'lines.txt')
with open(lines_path) as f:
for line in f.readlines():
text = line.rstrip()
line_chars = list(text)
chars = chars.union(line_chars)
char_table = '\n'.join(list(chars))
return char_table
| [
"os.remove",
"os.makedirs",
"keras_htr.generators.CompiledDataset",
"os.path.exists",
"json.dumps",
"tensorflow.keras.preprocessing.image.load_img",
"numpy.max",
"numpy.min",
"numpy.mean",
"os.path.splitext",
"numpy.random.choice",
"shutil.copyfile",
"shutil.copytree",
"os.path.join",
"o... | [((313, 364), 'os.path.join', 'os.path.join', (['destination_folder', '"""extracted_lines"""'], {}), "(destination_folder, 'extracted_lines')\n", (325, 364), False, 'import os\n'), ((464, 498), 'os.path.join', 'os.path.join', (['temp_folder', '"""train"""'], {}), "(temp_folder, 'train')\n", (476, 498), False, 'import os\n'), ((514, 553), 'os.path.join', 'os.path.join', (['temp_folder', '"""validation"""'], {}), "(temp_folder, 'validation')\n", (526, 553), False, 'import os\n'), ((570, 603), 'os.path.join', 'os.path.join', (['temp_folder', '"""test"""'], {}), "(temp_folder, 'test')\n", (582, 603), False, 'import os\n'), ((683, 737), 'os.path.join', 'os.path.join', (['destination_folder', '"""preprocessing.json"""'], {}), "(destination_folder, 'preprocessing.json')\n", (695, 737), False, 'import os\n'), ((1108, 1155), 'os.path.join', 'os.path.join', (['temp_folder', 'char_table_file_name'], {}), '(temp_folder, char_table_file_name)\n', (1120, 1155), False, 'import os\n'), ((1178, 1232), 'os.path.join', 'os.path.join', (['destination_folder', 'char_table_file_name'], {}), '(destination_folder, char_table_file_name)\n', (1190, 1232), False, 'import os\n'), ((1238, 1286), 'shutil.copyfile', 'shutil.copyfile', (['char_table_src', 'char_table_dest'], {}), '(char_table_src, char_table_dest)\n', (1253, 1286), False, 'import shutil\n'), ((1405, 1441), 'shutil.copytree', 'shutil.copytree', (['source', 'destination'], {}), '(source, destination)\n', (1420, 1441), False, 'import shutil\n'), ((1452, 1480), 'keras_htr.generators.CompiledDataset', 'CompiledDataset', (['destination'], {}), '(destination)\n', (1467, 1480), False, 'from keras_htr.generators import CompiledDataset\n'), ((1603, 1641), 'os.path.join', 'os.path.join', (['destination', '"""meta.json"""'], {}), "(destination, 'meta.json')\n", (1615, 1641), False, 'import os\n'), ((1646, 1666), 'os.remove', 'os.remove', (['meta_path'], {}), '(meta_path)\n', (1655, 1666), False, 'import os\n'), ((3323, 3378), 'os.path.join', 'os.path.join', (['destination_folder', '"""character_table.txt"""'], {}), "(destination_folder, 'character_table.txt')\n", (3335, 3378), False, 'import os\n'), ((4822, 4846), 'os.listdir', 'os.listdir', (['dataset_path'], {}), '(dataset_path)\n', (4832, 4846), False, 'import os\n'), ((5142, 5181), 'os.path.join', 'os.path.join', (['dataset_path', '"""lines.txt"""'], {}), "(dataset_path, 'lines.txt')\n", (5154, 5181), False, 'import os\n'), ((5984, 5997), 'json.dumps', 'json.dumps', (['d'], {}), '(d)\n', (5994, 5997), False, 'import json\n'), ((6014, 6053), 'os.path.join', 'os.path.join', (['dataset_path', '"""meta.json"""'], {}), "(dataset_path, 'meta.json')\n", (6026, 6053), False, 'import os\n'), ((884, 917), 'os.path.join', 'os.path.join', (['temp_folder', 'folder'], {}), '(temp_folder, folder)\n', (896, 917), False, 'import os\n'), ((937, 977), 'os.path.join', 'os.path.join', (['destination_folder', 'folder'], {}), '(destination_folder, folder)\n', (949, 977), False, 'import os\n'), ((2140, 2185), 'os.path.join', 'os.path.join', (['destination_folder', 'folder_name'], {}), '(destination_folder, folder_name)\n', (2152, 2185), False, 'import os\n'), ((2865, 2904), 'os.path.join', 'os.path.join', (['split_folder', '"""lines.txt"""'], {}), "(split_folder, 'lines.txt')\n", (2877, 2904), False, 'import os\n'), ((4653, 4685), 'numpy.random.choice', 'np.random.choice', (['folders'], {'p': 'pmf'}), '(folders, p=pmf)\n', (4669, 4685), True, 'import numpy as np\n'), ((4865, 4888), 'os.path.splitext', 'os.path.splitext', (['fname'], {}), '(fname)\n', (4881, 4888), False, 'import os\n'), ((5371, 5385), 'numpy.max', 'np.max', (['widths'], {}), '(widths)\n', (5377, 5385), True, 'import numpy as np\n'), ((5408, 5423), 'numpy.max', 'np.max', (['heights'], {}), '(heights)\n', (5414, 5423), True, 'import numpy as np\n'), ((5445, 5459), 'numpy.min', 'np.min', (['widths'], {}), '(widths)\n', (5451, 5459), True, 'import numpy as np\n'), ((5482, 5497), 'numpy.min', 'np.min', (['heights'], {}), '(heights)\n', (5488, 5497), True, 'import numpy as np\n'), ((5523, 5538), 'numpy.mean', 'np.mean', (['widths'], {}), '(widths)\n', (5530, 5538), True, 'import numpy as np\n'), ((5565, 5581), 'numpy.mean', 'np.mean', (['heights'], {}), '(heights)\n', (5572, 5581), True, 'import numpy as np\n'), ((5609, 5629), 'numpy.max', 'np.max', (['text_lengths'], {}), '(text_lengths)\n', (5615, 5629), True, 'import numpy as np\n'), ((6221, 6254), 'os.path.join', 'os.path.join', (['folder', '"""lines.txt"""'], {}), "(folder, 'lines.txt')\n", (6233, 6254), False, 'import os\n'), ((3552, 3580), 'os.path.exists', 'os.path.exists', (['self._folder'], {}), '(self._folder)\n', (3566, 3580), False, 'import os\n'), ((3594, 3619), 'os.makedirs', 'os.makedirs', (['self._folder'], {}), '(self._folder)\n', (3605, 3619), False, 'import os\n'), ((3652, 3676), 'os.listdir', 'os.listdir', (['self._folder'], {}), '(self._folder)\n', (3662, 3676), False, 'import os\n'), ((3832, 3859), 'os.path.splitext', 'os.path.splitext', (['file_path'], {}), '(file_path)\n', (3848, 3859), False, 'import os\n'), ((3948, 3980), 'shutil.copyfile', 'shutil.copyfile', (['file_path', 'dest'], {}), '(file_path, dest)\n', (3963, 3980), False, 'import shutil\n'), ((4940, 4973), 'os.path.join', 'os.path.join', (['dataset_path', 'fname'], {}), '(dataset_path, fname)\n', (4952, 4973), False, 'import os\n'), ((4994, 5043), 'tensorflow.keras.preprocessing.image.load_img', 'tf.keras.preprocessing.image.load_img', (['image_path'], {}), '(image_path)\n', (5031, 5043), True, 'import tensorflow as tf\n')] |
import numpy as np
import sys, os
import argparse
import tabulate
from Utils.twokenize import *
from Utils.MyMetrics import *
from Utils.WordVecs import *
from Utils.Representations import *
from Utils.Datasets import *
from Utils.Semeval_2013_Dataset import *
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
def print_prediction(file, prediction):
with open(file, 'w') as out:
for line in prediction:
out.write(str(line) + '\n')
def get_best_C(Xtrain, ytrain, Xdev, ydev):
"""
Find the best parameters on the dev set.
"""
best_f1 = 0
best_c = 0
labels = sorted(set(ytrain))
test_cs = [0.001, 0.0025, 0.005, 0.0075,
0.01, 0.025, 0.05, 0.075,
0.1, 0.25, 0.5, 0.75,
1, 2.5, 5, 7.5]
for i, c in enumerate(test_cs):
sys.stdout.write('\rRunning cross-validation: {0} of {1}'.format(i+1, len(test_cs)))
sys.stdout.flush()
clf = LogisticRegression(C=c)
h = clf.fit(Xtrain, ytrain)
pred = clf.predict(Xdev)
if len(labels) == 2:
dev_f1 = f1_score(ydev, pred, pos_label=1)
else:
dev_f1 = f1_score(ydev, pred, labels=labels, average='micro')
if dev_f1 > best_f1:
best_f1 = dev_f1
best_c = c
print()
print('Best F1 on dev data: {0:.3f}'.format(best_f1))
print('Best C on dev data: {0}'.format(best_c))
return best_c, best_f1
def test_embeddings(embedding_file, file_type):
"""
Tang et al. (2014) embeddings and cassification approach
on a number of benchmark datasets.
"""
print('importing vectors...')
vecs = WordVecs(embedding_file, file_type)
dim = vecs.vector_size
print('Importing datasets...')
st_fine = Stanford_Sentiment_Dataset('datasets/stanford_sentanalysis',
None,
one_hot=False,
binary=False,
rep=words)
st_binary = Stanford_Sentiment_Dataset('datasets/stanford_sentanalysis',
None,
one_hot=False,
binary=True,
rep=words)
opener_dataset = General_Dataset('datasets/opener',
vecs,
one_hot=False,
rep=words)
sentube_auto_dataset = General_Dataset('datasets/SenTube/auto',
vecs._w2idx, rep=words,
binary=True,
one_hot=False)
sentube_tablets_dataset = General_Dataset('datasets/SenTube/tablets',
vecs._w2idx, rep=words,
binary=True,
one_hot=False)
semeval_dataset = Semeval_Dataset('datasets/semeval',
vecs._w2idx, rep=words,
one_hot=False)
datasets = [st_fine, st_binary, opener_dataset,
sentube_auto_dataset, sentube_tablets_dataset, semeval_dataset]
names = ['sst_fine', 'sst_binary', 'opener',
'sentube_auto', 'sentube_tablets', 'semeval']
# Collect results here
results = []
for name, dataset in zip(names, datasets):
print('Testing on {0}...'.format(name))
Xtrain = np.array([conv_tweet(' '.join(t), vecs) for t in dataset._Xtrain])
Xtest = np.array([conv_tweet(' '.join(t), vecs) for t in dataset._Xtest])
Xdev = np.array([conv_tweet(' '.join(t), vecs) for t in dataset._Xdev])
# get best parameters on dev set
best_C, best_rate = get_best_C(Xtrain, dataset._ytrain,
Xdev, dataset._ydev)
clf = LogisticRegression(C=best_C)
h = clf.fit(Xtrain, dataset._ytrain)
pred = clf.predict(Xtest)
predictions_file = "predictions/joint/" + name + '/pred.txt'
print_prediction(predictions_file, pred)
labels = sorted(set(dataset._ytrain))
if len(labels) == 2:
average = 'binary'
else:
average = 'micro'
mm = MyMetrics(dataset._ytest, pred, one_hot=False, labels=labels, average=average)
acc, precision, recall, f1 = mm.get_scores()
results.append([acc, precision, recall, f1])
results.append(list(np.array(results).mean(axis=0)))
names.append('overall')
return names, results, dim
def print_results(file, out_file, file_type):
names, results, dim = test_embeddings(file, file_type)
table_data = [[name] + result for name, result in zip(names, results)]
table = tabulate.tabulate(table_data, headers=['dataset', 'acc', 'prec', 'rec', 'f1'], tablefmt='simple', floatfmt='.3f')
if out_file:
with open(out_file, 'a') as f:
f.write('\n')
f.write('+++Joint+++\n')
f.write(table)
f.write('\n')
else:
print()
print('+++Joint+++')
print(table)
def main(args):
parser = argparse.ArgumentParser(
description='test embeddings on a suite of datasets')
parser.add_argument('-emb', help='location of embeddings',
default='embeddings/sswe-u-50.txt')
parser.add_argument('-file_type', help='glove style embeddings or word2vec style: default is w2v',
default='word2vec')
parser.add_argument('-output', help='output file for results', default='./results.txt')
parser.add_argument('-printout', help='instead of printing to file, print to sysout',
type=bool, default=False)
args = vars(parser.parse_args())
embedding_file = args['emb']
file_type = args['file_type']
output = args['output']
printout = args['printout']
print('testing on %s' % embedding_file)
if printout:
print_results(embedding_file, None, file_type)
else:
print_results(embedding_file, output, file_type)
if __name__ == '__main__':
args = sys.argv
main(args)
| [
"argparse.ArgumentParser",
"sklearn.linear_model.LogisticRegression",
"sklearn.metrics.f1_score",
"sys.stdout.flush",
"tabulate.tabulate",
"numpy.array"
] | [((5018, 5135), 'tabulate.tabulate', 'tabulate.tabulate', (['table_data'], {'headers': "['dataset', 'acc', 'prec', 'rec', 'f1']", 'tablefmt': '"""simple"""', 'floatfmt': '""".3f"""'}), "(table_data, headers=['dataset', 'acc', 'prec', 'rec',\n 'f1'], tablefmt='simple', floatfmt='.3f')\n", (5035, 5135), False, 'import tabulate\n'), ((5419, 5496), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""test embeddings on a suite of datasets"""'}), "(description='test embeddings on a suite of datasets')\n", (5442, 5496), False, 'import argparse\n'), ((959, 977), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (975, 977), False, 'import sys, os\n'), ((993, 1016), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': 'c'}), '(C=c)\n', (1011, 1016), False, 'from sklearn.linear_model import LogisticRegression\n'), ((4129, 4157), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': 'best_C'}), '(C=best_C)\n', (4147, 4157), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1136, 1169), 'sklearn.metrics.f1_score', 'f1_score', (['ydev', 'pred'], {'pos_label': '(1)'}), '(ydev, pred, pos_label=1)\n', (1144, 1169), False, 'from sklearn.metrics import f1_score\n'), ((1205, 1257), 'sklearn.metrics.f1_score', 'f1_score', (['ydev', 'pred'], {'labels': 'labels', 'average': '"""micro"""'}), "(ydev, pred, labels=labels, average='micro')\n", (1213, 1257), False, 'from sklearn.metrics import f1_score\n'), ((4729, 4746), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (4737, 4746), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.