code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import torch
import numpy as np
from class_dataset import MyDataSet
from class_model import MyModel
from train_model import train_with_LBFGS
from test_model import test_error, test, plot
from torch import nn
import matplotlib.pyplot as plt
import scipy.stats
import torch.onnx
from matplotlib.pyplot import semilogy
torch.set_default_dtype(torch.float64)
m=5
N_train = int(np.power(2,m+3))
X_train = torch.linspace(0,1,N_train).unsqueeze(1)
y_train = X_train**2
means = X_train.mean(dim=0, keepdim=True)
stds = X_train.std(dim=0, keepdim=True)
X_train_normalized = (X_train - means) / stds
criterion = nn.MSELoss()
train_losses = []
norm_g = []
model = MyModel(1, 3, 1, m, 'xavier')
train_with_LBFGS(model, criterion, X_train, y_train, 1, 0, train_losses, norm_g, record_g = 1, verbose = False)
(mean_err, max_err) = test_error(model, 0, 1, npoints = int(np.power(2,m+2))+1, verbose = False)
maxlogerr = np.log2(max_err).numpy()
# print(norm_g)
norm_g_np = torch.cat(norm_g).numpy()
norm_g_np = norm_g_np/norm_g_np[0]
minid = norm_g_np.size-1
minval = norm_g_np[minid]
print(f"{minid},{minval}")
semilogy(list(range(len(norm_g_np))), norm_g_np, base=2)
# semilogy(list(range(len(norm_g))),[norm_g[i]/norm_g[0] for i in range(len(norm_g))])
plt.ylim((minval/10,10))
plt.annotate(f"({str(minid)}, {str(minval)}):{maxlogerr}",xytext=(0,0),xy=(minid,minval),textcoords='axes pixels')
plt.ylabel("norm(g[i])/norm(g[0])")
plt.xlabel("iteration")
plt.show() | [
"torch.nn.MSELoss",
"matplotlib.pyplot.show",
"train_model.train_with_LBFGS",
"matplotlib.pyplot.ylim",
"numpy.power",
"numpy.log2",
"torch.cat",
"torch.set_default_dtype",
"torch.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"class_model.MyModel"
] | [((331, 369), 'torch.set_default_dtype', 'torch.set_default_dtype', (['torch.float64'], {}), '(torch.float64)\n', (354, 369), False, 'import torch\n'), ((637, 649), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (647, 649), False, 'from torch import nn\n'), ((693, 722), 'class_model.MyModel', 'MyModel', (['(1)', '(3)', '(1)', 'm', '"""xavier"""'], {}), "(1, 3, 1, m, 'xavier')\n", (700, 722), False, 'from class_model import MyModel\n'), ((724, 835), 'train_model.train_with_LBFGS', 'train_with_LBFGS', (['model', 'criterion', 'X_train', 'y_train', '(1)', '(0)', 'train_losses', 'norm_g'], {'record_g': '(1)', 'verbose': '(False)'}), '(model, criterion, X_train, y_train, 1, 0, train_losses,\n norm_g, record_g=1, verbose=False)\n', (740, 835), False, 'from train_model import train_with_LBFGS\n'), ((1296, 1323), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(minval / 10, 10)'], {}), '((minval / 10, 10))\n', (1304, 1323), True, 'import matplotlib.pyplot as plt\n'), ((1438, 1473), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""norm(g[i])/norm(g[0])"""'], {}), "('norm(g[i])/norm(g[0])')\n", (1448, 1473), True, 'import matplotlib.pyplot as plt\n'), ((1475, 1498), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iteration"""'], {}), "('iteration')\n", (1485, 1498), True, 'import matplotlib.pyplot as plt\n'), ((1508, 1518), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1516, 1518), True, 'import matplotlib.pyplot as plt\n'), ((396, 414), 'numpy.power', 'np.power', (['(2)', '(m + 3)'], {}), '(2, m + 3)\n', (404, 414), True, 'import numpy as np\n'), ((424, 453), 'torch.linspace', 'torch.linspace', (['(0)', '(1)', 'N_train'], {}), '(0, 1, N_train)\n', (438, 453), False, 'import torch\n'), ((949, 965), 'numpy.log2', 'np.log2', (['max_err'], {}), '(max_err)\n', (956, 965), True, 'import numpy as np\n'), ((1006, 1023), 'torch.cat', 'torch.cat', (['norm_g'], {}), '(norm_g)\n', (1015, 1023), False, 'import torch\n'), ((899, 917), 'numpy.power', 'np.power', (['(2)', '(m + 2)'], {}), '(2, m + 2)\n', (907, 917), True, 'import numpy as np\n')] |
'''
RegressionTree is tested by comparing its output to that of
sklearn.DecisionTreeRegressor. If equally good splits exist, the output
of these models is non-deterministic. This is mainly a problem when there
is a small amount of data in the nodes, so we test on a larger dataset
and limit the depth.
author: <NAME>
date: November 2017
'''
import unittest
import numpy as np
from .. import RegressionTree
from sklearn.tree import DecisionTreeRegressor
from .. datasets import load_als
class TestRegressionTree(unittest.TestCase):
def test_success(self):
'''Test for the same predictions at several shallow depths'''
xtr, ytr, xte, yte = load_als()
for d in range(1, 4):
with self.subTest(depth=d):
dt = DecisionTreeRegressor(max_depth=d)
dt.fit(xtr, ytr)
pred = dt.predict(xte)
mytree = RegressionTree(max_depth=d)
mytree.fit(xtr, ytr)
mypred = mytree.predict(xte)
self.assertTrue(np.allclose(pred, mypred)) | [
"numpy.allclose",
"sklearn.tree.DecisionTreeRegressor"
] | [((763, 797), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {'max_depth': 'd'}), '(max_depth=d)\n', (784, 797), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((1037, 1062), 'numpy.allclose', 'np.allclose', (['pred', 'mypred'], {}), '(pred, mypred)\n', (1048, 1062), True, 'import numpy as np\n')] |
import numpy as np
from ..core.errors import InvalidConfigError
from .base import ExperimentDesign
from .random_design import RandomDesign
from pyDOE import lhs, doe_lhs
class LatinMixedDesign(ExperimentDesign):
"""
Latin experiment design modified to work with non-continuous variables.
Neglect bandit variables (simply do what LatinDesign does).
"""
def __init__(self, space):
if space.has_constraints():
raise InvalidConfigError('Sampling with constraints is not allowed by latin design')
super(LatinMixedDesign, self).__init__(space)
def get_samples(self, init_points_count, iterations = None, verbose = False):
samples = np.empty((init_points_count, self.space.dimensionality))
if iterations is None:
iterations = min(30, 2 * samples.shape[0])
def _lhs_discrete(dimensions, samples=None, iterations=5):
H = None
retries = 10
retry = 0
while H is None and retry < retries: # stop if not at par with expectations but the retries have been exhausted to avoid an infinite loop
maxdist = 0
for iteration in range(iterations):
if verbose is True:
print('[LHD-mv] Iteration:', iteration+1, 'of', iterations, '| Retry:', retry, 'of (max)', retries)
Hcandidate = _lhs_noRandom(dimensions, samples)
d = doe_lhs._pdist(Hcandidate)
if maxdist<np.min(d):
test_minimum_representation = _check_representation(Hcandidate, discrete_values, display_check = verbose)
if test_minimum_representation is True:
maxdist = np.min(d)
H = Hcandidate.copy()
retry += 1
H = _map_to_discrete_values(H, discrete_values)
return H
def _lhs_noRandom(dimensions, samples=None):
interval_starting_values = np.linspace(0, 1, samples, endpoint = False)
H = np.zeros((samples, dimensions))
for j in range(dimensions):
order = np.random.permutation(range(samples))
H[:, j] = interval_starting_values[order]
return H
def _map_to_discrete_values(H, discrete_values):
mappedH = np.full_like(H, 0)
dimensions = H.shape[1]
for dimension in range(dimensions):
numLevels = len(discrete_values[dimension])
levelindices = (H[:, dimension] * numLevels).astype(int)
for i, index in enumerate(levelindices):
mappedH[i, dimension] = discrete_values[dimension][index]
return mappedH
def _check_representation(H, discrete_values, minimum = None, display_check = False):
H = _map_to_discrete_values(H, discrete_values)
samples = H.shape[0]
dimensions = H.shape[1]
test = True
given_minimum = minimum
for dimension in range(dimensions):
levels = len(discrete_values[dimension])
unique, count = np.unique(H[:, dimension], return_counts = True)
if display_check is True:
unique_count_dict = {int(unique):str(count) + ' times' for unique, count in zip(unique, count)}
print('Discrete Dimension #{}'.format(dimension))
print('Dimension levels:', discrete_values[dimension])
print('Samples taken:', H[:, dimension])
print('Instance counts:', unique_count_dict, '\n')
if samples < levels:
continue #skip checking since it is futile
if given_minimum == None:
minimum = np.maximum(np.floor(0.8 * samples / levels), 1)
if (min(count) < minimum) or not np.all(np.isin(discrete_values[dimension], unique)):
test = False
break
return test
if self.space.has_discrete():
discrete_dimensions = self.space.get_discrete_dims()
discrete_values = self.space.get_discrete_values()
discrete_design = _lhs_discrete(len(discrete_dimensions), init_points_count, iterations)
samples[:, discrete_dimensions] = discrete_design
if self.space.has_continuous():
continuous_dimensions = self.space.get_continuous_dims()
continuous_bounds = self.space.get_continuous_bounds()
lower_bound = np.asarray(continuous_bounds)[:,0].reshape(1, len(continuous_bounds))
upper_bound = np.asarray(continuous_bounds)[:,1].reshape(1, len(continuous_bounds))
diff = upper_bound - lower_bound
X_design_aux_c = lhs(len(continuous_dimensions), init_points_count, criterion = 'maximin', iterations = iterations)
I = np.ones((X_design_aux_c.shape[0], 1))
continuous_design = np.dot(I, lower_bound) + X_design_aux_c * np.dot(I, diff)
samples[:, continuous_dimensions] = continuous_design
return samples | [
"numpy.isin",
"numpy.full_like",
"numpy.empty",
"numpy.floor",
"numpy.asarray",
"numpy.zeros",
"numpy.ones",
"numpy.min",
"numpy.linspace",
"numpy.dot",
"pyDOE.doe_lhs._pdist",
"numpy.unique"
] | [((719, 775), 'numpy.empty', 'np.empty', (['(init_points_count, self.space.dimensionality)'], {}), '((init_points_count, self.space.dimensionality))\n', (727, 775), True, 'import numpy as np\n'), ((2093, 2135), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'samples'], {'endpoint': '(False)'}), '(0, 1, samples, endpoint=False)\n', (2104, 2135), True, 'import numpy as np\n'), ((2155, 2186), 'numpy.zeros', 'np.zeros', (['(samples, dimensions)'], {}), '((samples, dimensions))\n', (2163, 2186), True, 'import numpy as np\n'), ((2467, 2485), 'numpy.full_like', 'np.full_like', (['H', '(0)'], {}), '(H, 0)\n', (2479, 2485), True, 'import numpy as np\n'), ((5137, 5174), 'numpy.ones', 'np.ones', (['(X_design_aux_c.shape[0], 1)'], {}), '((X_design_aux_c.shape[0], 1))\n', (5144, 5174), True, 'import numpy as np\n'), ((3311, 3357), 'numpy.unique', 'np.unique', (['H[:, dimension]'], {'return_counts': '(True)'}), '(H[:, dimension], return_counts=True)\n', (3320, 3357), True, 'import numpy as np\n'), ((5208, 5230), 'numpy.dot', 'np.dot', (['I', 'lower_bound'], {}), '(I, lower_bound)\n', (5214, 5230), True, 'import numpy as np\n'), ((1512, 1538), 'pyDOE.doe_lhs._pdist', 'doe_lhs._pdist', (['Hcandidate'], {}), '(Hcandidate)\n', (1526, 1538), False, 'from pyDOE import lhs, doe_lhs\n'), ((5250, 5265), 'numpy.dot', 'np.dot', (['I', 'diff'], {}), '(I, diff)\n', (5256, 5265), True, 'import numpy as np\n'), ((1571, 1580), 'numpy.min', 'np.min', (['d'], {}), '(d)\n', (1577, 1580), True, 'import numpy as np\n'), ((3988, 4020), 'numpy.floor', 'np.floor', (['(0.8 * samples / levels)'], {}), '(0.8 * samples / levels)\n', (3996, 4020), True, 'import numpy as np\n'), ((4778, 4807), 'numpy.asarray', 'np.asarray', (['continuous_bounds'], {}), '(continuous_bounds)\n', (4788, 4807), True, 'import numpy as np\n'), ((4875, 4904), 'numpy.asarray', 'np.asarray', (['continuous_bounds'], {}), '(continuous_bounds)\n', (4885, 4904), True, 'import numpy as np\n'), ((1817, 1826), 'numpy.min', 'np.min', (['d'], {}), '(d)\n', (1823, 1826), True, 'import numpy as np\n'), ((4082, 4125), 'numpy.isin', 'np.isin', (['discrete_values[dimension]', 'unique'], {}), '(discrete_values[dimension], unique)\n', (4089, 4125), True, 'import numpy as np\n')] |
"""
sonde.formats.greenspan
~~~~~~~~~~~~~~~~~
This module implements the Greenspan format
There are two main greenspan formats also
the files may be in ASCII or Excel (xls) format
The module attempts to autodetect the correct format
"""
from __future__ import absolute_import
import csv
import datetime
import os.path
import pkg_resources
import re
from StringIO import StringIO
import warnings
import xlrd
import numpy as np
import quantities as pq
from .. import sonde
from .. import quantities as sq
from ..timezones import cdt, cst
from .. import util
class BadDatafileError(IOError):
pass
class GreenspanDataset(sonde.BaseSondeDataset):
"""
Dataset object that represents the data contained in a greenspan txt
file. It accepts two optional parameters, `format` overides the
autodetect algorithm that tries to detect the format automatically
`tzinfo` is a datetime.tzinfo object that represents the timezone
of the timestamps in the binary file.
"""
def __init__(self, data_file, tzinfo=None, format_version=None):
self.file_format = 'greenspan'
self.manufacturer = 'greenspan'
self.data_file = data_file
self.format_version = format_version
self.default_tzinfo = tzinfo
super(GreenspanDataset, self).__init__(data_file)
def _read_data(self):
"""
Read the greenspan data file
"""
param_map = {'Temperature': 'water_temperature',
'EC': 'water_electrical_conductivity', # Double Check?
'EC Raw': 'water_electrical_conductivity',
'EC Norm': 'water_specific_conductance',
#'SpCond': 'water_specific_conductance???',
'Salinity': 'seawater_salinity',
#'DO % Sat': 'water_dissolved_oxygen_percent_saturation',
'DO': 'water_dissolved_oxygen_concentration',
'pH': 'water_ph',
'Pressure': 'water_depth_non_vented',
#'Level': 'water_depth_non_vented',
'Batt': 'instrument_battery_voltage',
'Battery': 'instrument_battery_voltage',
'TDS': 'water_total_dissolved_salts',
#'Redox': 'NotImplemented',
}
unit_map = {'deg_C': pq.degC,
'Celcius': pq.degC,
'Celsius': pq.degC,
'deg_F': pq.degF,
'deg_K': pq.degK,
'ft': sq.ftH2O,
'mS/cm': sq.mScm,
'mg/l': sq.mgl,
'm': sq.mH2O,
'Metres': sq.mH2O,
'pH': pq.dimensionless,
'ppm': sq.mgl,
'psu': sq.psu,
'us/cm': sq.uScm,
'uS/cm': sq.uScm,
'volts': pq.volt,
'Volts': pq.volt,
'volt': pq.volt,
}
greenspan_data = GreenspanReader(self.data_file, self.default_tzinfo)
# determine parameters provided and in what units
self.parameters = {}
self.data = {}
for parameter in greenspan_data.parameters:
try:
pcode = param_map[(parameter.name).strip()]
punit = unit_map[(parameter.unit).strip()]
#ignore params that have no data
if not np.all(np.isnan(parameter.data)):
self.parameters[pcode] = sonde.master_parameter_list[pcode]
self.data[param_map[parameter.name]] = parameter.data * \
punit
except KeyError:
warnings.warn('Un-mapped Parameter/Unit Type:\n'
'%s parameter name: "%s"\n'
'%s unit name: "%s"' %
(self.file_format, parameter.name,
self.file_format, parameter.unit),
Warning)
if (greenspan_data.format_version == '2.4.1') or \
(greenspan_data.format_version == '2.3.1'):
self.format_parameters = {
'converter_name': greenspan_data.converter_name,
'source_file_name': greenspan_data.source_file_name,
'target_file_name': greenspan_data.target_file_name,
'site_information': greenspan_data.site_information,
'firmware_version': greenspan_data.firmware_version,
'top_of_case': greenspan_data.top_of_case,
'raingage': greenspan_data.raingage,
'log_file_name': greenspan_data.source_file_name\
.split('\\')[-1].split('.')[0],
}
self.serial_number = greenspan_data.serial_number
self.site_name = greenspan_data.site_name
elif greenspan_data.format_version == 'block':
self.format_parameters = {
'header_lines': greenspan_data.header_lines,
}
self.dates = greenspan_data.dates
class GreenspanReader:
"""
A reader object that opens and reads a Hydrolab txt file.
`data_file` should be either a file path string or a file-like
object. It accepts one optional parameter, `tzinfo` is a
datetime.tzinfo object that represents the timezone of the
timestamps in the txt file.
"""
def __init__(self, data_file, tzinfo=None, format_version=None):
self.default_tzinfo = tzinfo
self.format_version = format_version
self.num_params = 0
self.parameters = []
self.data = {}
self.dates = []
self.xlrd_datemode = 0
if type(data_file) == str:
self.file_name = data_file
elif type(data_file) == file:
self.file_name = data_file.name
self.file_ext = self.file_name.split('.')[-1].lower()
temp_file_path = None
if self.file_ext == 'xls':
temp_file_path, self.xlrd_datemode = util.xls_to_csv(self.file_name)
file_buf = open(temp_file_path, 'rb')
else:
if type(data_file) == str:
file_buf = open(data_file)
elif type(data_file) == file:
file_buf = data_file
try:
if not self.format_version:
self.format_version = self.detect_format_version(file_buf)
self.read_greenspan(file_buf)
except:
raise
finally:
file_buf.close()
if temp_file_path:
os.remove(temp_file_path)
if tzinfo:
self.dates = [i.replace(tzinfo=tzinfo) for i in self.dates]
def detect_format_version(self, data_file):
"""
Reads first several lines of file and tries to autodetect
greenspan file format
expects a file object
"""
if type(data_file) == str:
warnings.warn('Expects File Object', Warning)
else:
fid = data_file
fid.seek(0)
hdr = fid.readline()
#file_ext = data_file.split('.')[-1]
#if file_ext == 'xls':
# self.file_type = 'excel'
# wb = xlrd.open_workbook(data_file)
# sh = wb.sheet_by_index(0)
# hdr = sh.row_values(0)
# del wb
#else:
# self.file_type = 'ascii'
# fid = open(data_file,'r')
# hdr = fid.readline()
# fid.close()
if 'Greenspan data converter .dll Version: 2. 4. 1' in hdr:
fmt = '2.4.1'
elif 'Greenspan data converter .dll Version: 2. 3. 1' in hdr:
fmt = '2.3.1'
elif '# GREENSPAN' in hdr:
fmt = 'block'
else:
fmt = 'unknown'
return fmt
def read_greenspan(self, data_file):
"""
Open and read a Greenspan file.
"""
if type(data_file) == str:
fid = open(data_file, 'r')
else:
fid = data_file
self.read_data(fid)
def read_data(self, fid):
"""
Read header information
"""
fid.seek(0)
if (self.format_version == '2.4.1') or \
(self.format_version == '2.3.1'):
self.converter_name = fid.readline().split(',')[1].rstrip('\r\n')
self.source_file_name = fid.readline().split(',')[2].rstrip('\r\n')
self.target_file_name = fid.readline().split(',')[2].rstrip('\r\n')
#from nose.tools import set_trace; set_trace()
fid.readline() # skip junk
self.site_name = fid.readline().split(',')[1].rstrip(' \r\n')
self.site_information = fid.readline().split(',')[1].rstrip(' \r\n')
self.instrument_type = fid.readline().split(',')[-1].rstrip(' \r\n')
self.serial_number = fid.readline().split(',')[1].rstrip('\x00\r\n')
self.firmware_version = fid.readline().split(',')[1].rstrip('\r\n')
self.top_of_case = fid.readline().split(',')[1].rstrip('\r\n')
self.raingage = fid.readline().split(',')[1].rstrip(' \r\n')
fid.readline()
#column 0,1,2 = 'Data', 'dd/mm/yyyy hh:mm:ss', 'Type/Comment'
#column [3:] = actual data
fields = fid.readline().rstrip('\r\n').split(',')
cols = range(len(fields))[3:]
params = fields[3:]
units = fid.readline().rstrip('\r\n').split(',')[3:]
# skip Channel Number line
fid.readline()
#read data
data_start = fid.tell()
datestr = [line.split(',')[1] for line in fid]
# xlrd reads in dates as floats, but excel isn't too
# careful about datatypes and depending on how the file
# has been handled, there's a chance that the dates have
# already been converted to strings
number_of_unique_dates = len(np.unique(np.array(
[util.possibly_corrupt_xls_date_to_datetime(
dt, self.xlrd_datemode)
for dt in datestr])))
self.dates = np.array((datetime.datetime(1900, 1, 1), ) \
* number_of_unique_dates)
for param, unit in zip(params, units):
param_name, unit_name = param.strip('()_'), unit.strip('()_')
# clean param & unit names
self.parameters.append(Parameter(param_name,
unit_name))
# initialize data dict with empty arrays
self.data[param_name] = np.array((np.nan,) * len(self.dates))
fid.seek(data_start)
data_count = -1
last_date = None
for line in fid:
line_split = line.split(',')
date = util.possibly_corrupt_xls_date_to_datetime(
line_split[1])
if date != last_date:
if last_date and date < last_date:
raise BadDatafileError(
"Non-sequential timestamps found in file '%s'. "
"This shouldn't happen!" % (fid.name,))
data_count += 1
self.dates[data_count] = date
for i, parameter in enumerate(self.parameters, start=3):
val = line_split[i].strip()
if date == last_date:
if np.isnan(self.data[parameter.name][data_count]):
if val != '':
self.data[parameter.name][data_count] = \
float(val)
elif val != '':
warnings.warn("Conflicting values for parameter "
"'%s' on date: %s" % (
parameter.name,
self.dates[data_count]), Warning)
self.data[parameter.name][data_count] = float(val)
else:
if val == '':
self.data[parameter.name][data_count] = np.nan
else:
self.data[parameter.name][data_count] = float(val)
last_date = date
for ii, parameter in enumerate(self.parameters):
self.parameters[ii].data = self.data[parameter.name]
elif self.format_version == 'block':
self.header_lines = []
self.header_lines.append(fid.readline())
buf = fid.readline()
self.header_lines.append(buf)
buf = buf.strip('# \r\n')
fmt = '%Y%m%d%H%M%S'
self.start_time = datetime.datetime.strptime(buf[0:14], fmt)
self.stop_time = datetime.datetime.strptime(buf[14:], fmt)
buf = fid.readline()
while buf:
self.header_lines.append(buf)
if buf[0] == 'T':
break
if buf[0:4] == 'C0 B':
self.num_params += 1
param = 'Batt'
unit = 'volts'
self.parameters.append(Parameter(param.strip('()_'),
unit.strip('()_')))
if buf[0:3] == '# C':
self.num_params += 1
unit, param = buf.split()[2:]
self.parameters.append(Parameter(param.strip('()_'),
unit.strip('()_')))
buf = fid.readline()
fmt = 'T%Y%m%d%H%M%S'
dates = []
data = []
row = None
prev_dt = None
while buf:
if buf[0] == 'T':
dt = datetime.datetime.strptime(buf.strip('\r\n'), fmt)
if dt != prev_dt:
prev_dt = dt
data.append(row)
dates.append(datetime.datetime.strptime(
buf.strip('\r\n'), fmt))
row = np.zeros(self.num_params)
row[:] = np.nan
elif buf[0] == 'D':
col = int(buf[1])
row[col] = float(buf.split()[1])
else:
self.header_lines.append(buf)
buf = fid.readline()
### TODO WORK OUT HOW TO APPEND data.append(row) correctly
#append last record to data
data.append(row)
#remove blank first record and convert to np.array
data = np.array(data[1:])
self.dates = np.array(dates)
for ii in range(self.num_params):
self.parameters[ii].data = data[:, ii]
else:
warnings.warn('Unknown Format Type', Warning)
raise
# if the serial number just contains numbers the cell holding
# it might be formatted as a number, in which case it gets
# read in with a trailing '.0'
if hasattr(self, 'serial_number') and \
self.serial_number.rfind('.0') == len(self.serial_number) - 2:
self.serial_number = self.serial_number[:-2]
class Parameter:
"""
Class that implements the a structure to return a parameters
name, unit and data
"""
def __init__(self, param_name, param_unit):
self.name = param_name
self.unit = param_unit
self.data = []
| [
"numpy.zeros",
"numpy.isnan",
"datetime.datetime",
"datetime.datetime.strptime",
"numpy.array",
"warnings.warn"
] | [((7109, 7154), 'warnings.warn', 'warnings.warn', (['"""Expects File Object"""', 'Warning'], {}), "('Expects File Object', Warning)\n", (7122, 7154), False, 'import warnings\n'), ((13059, 13101), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['buf[0:14]', 'fmt'], {}), '(buf[0:14], fmt)\n', (13085, 13101), False, 'import datetime\n'), ((13131, 13172), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['buf[14:]', 'fmt'], {}), '(buf[14:], fmt)\n', (13157, 13172), False, 'import datetime\n'), ((15007, 15025), 'numpy.array', 'np.array', (['data[1:]'], {}), '(data[1:])\n', (15015, 15025), True, 'import numpy as np\n'), ((15051, 15066), 'numpy.array', 'np.array', (['dates'], {}), '(dates)\n', (15059, 15066), True, 'import numpy as np\n'), ((15195, 15240), 'warnings.warn', 'warnings.warn', (['"""Unknown Format Type"""', 'Warning'], {}), "('Unknown Format Type', Warning)\n", (15208, 15240), False, 'import warnings\n'), ((3818, 4007), 'warnings.warn', 'warnings.warn', (['("""Un-mapped Parameter/Unit Type:\n%s parameter name: "%s"\n%s unit name: "%s\\""""\n % (self.file_format, parameter.name, self.file_format, parameter.unit))', 'Warning'], {}), '(\n """Un-mapped Parameter/Unit Type:\n%s parameter name: "%s"\n%s unit name: "%s\\""""\n % (self.file_format, parameter.name, self.file_format, parameter.unit),\n Warning)\n', (3831, 4007), False, 'import warnings\n'), ((3523, 3547), 'numpy.isnan', 'np.isnan', (['parameter.data'], {}), '(parameter.data)\n', (3531, 3547), True, 'import numpy as np\n'), ((10330, 10359), 'datetime.datetime', 'datetime.datetime', (['(1900)', '(1)', '(1)'], {}), '(1900, 1, 1)\n', (10347, 10359), False, 'import datetime\n'), ((11686, 11733), 'numpy.isnan', 'np.isnan', (['self.data[parameter.name][data_count]'], {}), '(self.data[parameter.name][data_count])\n', (11694, 11733), True, 'import numpy as np\n'), ((14475, 14500), 'numpy.zeros', 'np.zeros', (['self.num_params'], {}), '(self.num_params)\n', (14483, 14500), True, 'import numpy as np\n'), ((12000, 12123), 'warnings.warn', 'warnings.warn', (['("Conflicting values for parameter \'%s\' on date: %s" % (parameter.name,\n self.dates[data_count]))', 'Warning'], {}), '("Conflicting values for parameter \'%s\' on date: %s" % (\n parameter.name, self.dates[data_count]), Warning)\n', (12013, 12123), False, 'import warnings\n')] |
#!/usr/bin/env python3
from mujoco_py import load_model_from_path, MjSim, MjViewer
from gym_kuka_mujoco.utils.kinematics import forwardKin, forwardKinJacobian, forwardKinSite, forwardKinJacobianSite
import mujoco_py
import matplotlib.pyplot as plt
import numpy as np
def trajectory_gen_joints(qd, tf, n, ti=0, dt=0.002, traj=None):
q_ref = [qd for _ in range(n_timesteps)]
qvel_ref = [np.array([0, 0, 0, 0, 0, 0, 0]) for _ in range(n_timesteps)]
if traj == 'spline':
q_d = q_ref[-1]
time = np.linspace(ti, tf, int((tf-ti)/dt))
q0 = np.zeros(7)
# q_ref = np.zeros((n, 7))
for i, t in enumerate(time):
q_ref[i] = 2*(q0-q_d)/tf**3 * t**3 - 3*(q0-q_d)/tf**2 * t**2 + q0
# qvel_ref[i] = 6*(q0-q_d)/tf**3 * t**2 - 6*(q0-q_d)/tf**2 * t
return q_ref, qvel_ref
def ctrl_independent_joints(error_q, error_v, error_q_int_ant):
dt = 0.002
error_q_int = (error_q + error_q_ant)*dt/2 + error_q_int_ant
error_q_int_ant = error_q_int
return np.dot(Kp, erro_q) + np.dot(Kd, erro_v) + np.dot(Ki, error_q_int)
def get_ctrl_action(controller, error_q, error_v, error_q_int_ant=0):
if controller == 'independent_joints':
u = ctrl_independent_joints(error_q, error_v, error_q_int_ant)
return u, error_q_int_ant
def get_pd_matrices(kp, ki=0, kd=0, lambda_H=0):
Kp = np.eye(7)
for i in range(7):
Kp[i, i] = kp * (7 - i)
Kd = np.eye(7)
for i in range(7):
if i == 6:
Kd[i, i] = Kp[i, i] ** 0.005 * (7 - i)
elif i == 4:
Kd[i, i] = Kp[i, i] ** 0.1 * (10 - i)
else:
Kd[i, i] = Kp[i, i] ** 0.25 * (10 - i)
Ki = np.eye(7)
for i in range(7):
Ki[i, i] = 0.9*Kp[i,i]*Kd[i,i]/lambda_H
return Kp, Kd, Ki
if __name__ == '__main__':
simulate = True
model = load_model_from_path(
"/home/glahr/mujoco_gym/gym-kuka-mujoco/gym_kuka_mujoco/envs/assets/full_kuka_all_joints.xml")
sim = MjSim(model)
if simulate:
viewer = MjViewer(sim)
tf = 1
n_timesteps = 3000
controller_type = 'independent_joints'
if controller_type == 'independent_joints':
kp = 10
lambda_H = 11
error_q_int_ant = 0
Kp, Kd, Ki = get_pd_matrices(kp=kp, ki=0, lambda_H=lambda_H)
k = 1
# qd = np.array([0, 0.461, 0, -0.817, 0, 0.69, 0])
# qd = np.array([0, 0, 0, 0, 0, 0, 0])
qd = np.array([0, 0, 0, -np.pi/2, -np.pi/2, 0, 0])
q_ref, qvel_ref = trajectory_gen_joints(qd, tf, n_timesteps, traj='step')
q_log = np.zeros((n_timesteps, 7))
time_log = np.zeros((n_timesteps, 1))
H = np.zeros(sim.model.nv * sim.model.nv)
eps = 0.05
mass_links = sim.model.body_mass[4:11]
name_body = [sim.model.body_id2name(i) for i in range(4, 11)]
name_tcp = sim.model.site_id2name(1)
name_ft_sensor = sim.model.site_id2name(2)
jac_shape = (3, sim.model.nv)
C = np.zeros(7)
sim.forward()
max_trace = 0
error_q_ant = 0
erro_q = q_ref[0] - sim.data.qpos
qd = np.array([0, 0, 0, 0, 0, 0, 0])
while True:
if (np.absolute(erro_q) < eps).all():
qd = np.array([0, 0, 3/2*np.pi/2, 0, 0, -np.pi/2, 0])
# print("tolerancia " + str(sim.data.time))
qpos = sim.data.qpos
qvel = sim.data.qvel
erro_q = q_ref[k] + qd - qpos
erro_v = qvel_ref[k] - qvel
# inertia matrix H
mujoco_py.functions.mj_fullM(sim.model, H, sim.data.qM)
H_ = H.reshape(sim.model.nv, sim.model.nv)
current_trace = np.trace(H_)
if current_trace > max_trace:
max_trace = current_trace
# internal forces: Coriolis + gravitational
C = sim.data.qfrc_bias
u, error_q_int_ant = get_ctrl_action(controller_type, erro_q, erro_v, error_q_int_ant)
sim.data.ctrl[:] = u
sim.step()
if simulate:
viewer.render()
k += 1
if k >= n_timesteps: # and os.getenv('TESTING') is not None:
break
q_log[k] = qpos
time_log[k] = sim.data.time
error_q_ant = erro_q
plt.plot(time_log, q_log)
plt.plot(time_log, [q_r for q_r in q_ref], 'k--')
plt.legend(['q'+str(i+1) for i in range(7)])
plt.show()
print("biggest trace = ", max_trace)
| [
"mujoco_py.MjSim",
"numpy.absolute",
"numpy.trace",
"mujoco_py.load_model_from_path",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.zeros",
"mujoco_py.functions.mj_fullM",
"numpy.array",
"numpy.dot",
"numpy.eye",
"mujoco_py.MjViewer"
] | [((1366, 1375), 'numpy.eye', 'np.eye', (['(7)'], {}), '(7)\n', (1372, 1375), True, 'import numpy as np\n'), ((1441, 1450), 'numpy.eye', 'np.eye', (['(7)'], {}), '(7)\n', (1447, 1450), True, 'import numpy as np\n'), ((1690, 1699), 'numpy.eye', 'np.eye', (['(7)'], {}), '(7)\n', (1696, 1699), True, 'import numpy as np\n'), ((1856, 1981), 'mujoco_py.load_model_from_path', 'load_model_from_path', (['"""/home/glahr/mujoco_gym/gym-kuka-mujoco/gym_kuka_mujoco/envs/assets/full_kuka_all_joints.xml"""'], {}), "(\n '/home/glahr/mujoco_gym/gym-kuka-mujoco/gym_kuka_mujoco/envs/assets/full_kuka_all_joints.xml'\n )\n", (1876, 1981), False, 'from mujoco_py import load_model_from_path, MjSim, MjViewer\n'), ((1992, 2004), 'mujoco_py.MjSim', 'MjSim', (['model'], {}), '(model)\n', (1997, 2004), False, 'from mujoco_py import load_model_from_path, MjSim, MjViewer\n'), ((2433, 2482), 'numpy.array', 'np.array', (['[0, 0, 0, -np.pi / 2, -np.pi / 2, 0, 0]'], {}), '([0, 0, 0, -np.pi / 2, -np.pi / 2, 0, 0])\n', (2441, 2482), True, 'import numpy as np\n'), ((2569, 2595), 'numpy.zeros', 'np.zeros', (['(n_timesteps, 7)'], {}), '((n_timesteps, 7))\n', (2577, 2595), True, 'import numpy as np\n'), ((2611, 2637), 'numpy.zeros', 'np.zeros', (['(n_timesteps, 1)'], {}), '((n_timesteps, 1))\n', (2619, 2637), True, 'import numpy as np\n'), ((2646, 2683), 'numpy.zeros', 'np.zeros', (['(sim.model.nv * sim.model.nv)'], {}), '(sim.model.nv * sim.model.nv)\n', (2654, 2683), True, 'import numpy as np\n'), ((2940, 2951), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (2948, 2951), True, 'import numpy as np\n'), ((3057, 3088), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0])\n', (3065, 3088), True, 'import numpy as np\n'), ((4140, 4165), 'matplotlib.pyplot.plot', 'plt.plot', (['time_log', 'q_log'], {}), '(time_log, q_log)\n', (4148, 4165), True, 'import matplotlib.pyplot as plt\n'), ((4170, 4219), 'matplotlib.pyplot.plot', 'plt.plot', (['time_log', '[q_r for q_r in q_ref]', '"""k--"""'], {}), "(time_log, [q_r for q_r in q_ref], 'k--')\n", (4178, 4219), True, 'import matplotlib.pyplot as plt\n'), ((4273, 4283), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4281, 4283), True, 'import matplotlib.pyplot as plt\n'), ((394, 425), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0])\n', (402, 425), True, 'import numpy as np\n'), ((569, 580), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (577, 580), True, 'import numpy as np\n'), ((1066, 1089), 'numpy.dot', 'np.dot', (['Ki', 'error_q_int'], {}), '(Ki, error_q_int)\n', (1072, 1089), True, 'import numpy as np\n'), ((2040, 2053), 'mujoco_py.MjViewer', 'MjViewer', (['sim'], {}), '(sim)\n', (2048, 2053), False, 'from mujoco_py import load_model_from_path, MjSim, MjViewer\n'), ((3444, 3499), 'mujoco_py.functions.mj_fullM', 'mujoco_py.functions.mj_fullM', (['sim.model', 'H', 'sim.data.qM'], {}), '(sim.model, H, sim.data.qM)\n', (3472, 3499), False, 'import mujoco_py\n'), ((3575, 3587), 'numpy.trace', 'np.trace', (['H_'], {}), '(H_)\n', (3583, 3587), True, 'import numpy as np\n'), ((1024, 1042), 'numpy.dot', 'np.dot', (['Kp', 'erro_q'], {}), '(Kp, erro_q)\n', (1030, 1042), True, 'import numpy as np\n'), ((1045, 1063), 'numpy.dot', 'np.dot', (['Kd', 'erro_v'], {}), '(Kd, erro_v)\n', (1051, 1063), True, 'import numpy as np\n'), ((3170, 3226), 'numpy.array', 'np.array', (['[0, 0, 3 / 2 * np.pi / 2, 0, 0, -np.pi / 2, 0]'], {}), '([0, 0, 3 / 2 * np.pi / 2, 0, 0, -np.pi / 2, 0])\n', (3178, 3226), True, 'import numpy as np\n'), ((3119, 3138), 'numpy.absolute', 'np.absolute', (['erro_q'], {}), '(erro_q)\n', (3130, 3138), True, 'import numpy as np\n')] |
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import math
import scipy.signal
from pylab import *
import cv2
from scipy.signal import convolve2d
import scipy.stats as st
image_house = np.array(Image.open("images/house2.jpg"),dtype='int32')
image_rectangle = np.array(Image.open("images/img2.jpg").convert('L'),dtype='int32')
image_carrelage = np.array(Image.open("images/carrelage_wikipedia.jpg"),dtype='int32')
image_jussieu = np.array(Image.open("images/Jussieu_wikipedia.jpg"),dtype='int32')
def affichage_14(affichages, titres=None):
# list[Array|Image]*list[str] -> NoneType
# effectue entre 1 et 4 affichages avec leurs titres, pour des images ou courbes
# paramètres :
# - liste des affichages (entre 1 et 4)
# - liste des titres (entre 1 et 4, autant que de affichages) Optionnelle
if not type(affichages) == type([]):
affichages = [affichages]
if titres is None:
titres = ['', ] * len(affichages)
if not type(titres) == type([]):
titres = [titres]
nb_affichages = len(affichages)
if nb_affichages > 4 or nb_affichages < 1:
raise ValueError('affichage_14 nécéssite 1 à 4 entrées en paramètre')
if nb_affichages != len(titres):
raise ValueError('affichage_14 nécéssite autant de titres que d\'affichages')
courbes = False
for i in range(0, nb_affichages):
s = plt.subplot(101 + 10 * nb_affichages + i)
s.set_title(titres[i])
if len(affichages[i].shape) == 2 and affichages[i].shape[0] > 1 and affichages[i].shape[1] > 1:
# on affiche une image
s.imshow(affichages[i], cmap="gray", interpolation='nearest', aspect='equal')
else:
# il s'agit d'une seule ligne, à afficher comme une courbe
plt.plot(affichages[i])
courbes = True
agrandissement_h = nb_affichages
agrandissement_v = nb_affichages * 2 if courbes else nb_affichages
params = plt.gcf()
plSize = params.get_size_inches()
params.set_size_inches((plSize[0] * agrandissement_v, plSize[1] * agrandissement_h))
plt.show()
def module_affichage(module):
# permet de transformer un module de DFT en une version jolie à afficher
module = np.array(module, dtype='float32')
ind_max = np.where(module == np.max(module.flatten()))
module[ind_max] = 0.0
module[ind_max] = np.max(module.flatten())
module = sqrt(module)
return sqrt(module)
def gradient(image):
""" Array -> tuple[Array*Array]"""
sobelx = np.array([[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]])
sobely = sobelx.T
dx = convolve2d(image, sobelx, "same")
dy = convolve2d(image, sobely, "same")
return (dx, dy)
def noyau_gaussien(sigma):
""" float -> Array """
x = np.linspace(-3 * sigma, 3 * sigma, int(6 * sigma + 2))
kern1d = np.diff(st.norm.cdf(x))
kern2d = np.outer(kern1d, kern1d)
return kern2d / kern2d.sum()
def harris(image, sigma, kappa):
""" Array*float*float->Array """
# calcule des gradients
Ix, Iy = gradient(image)
gauss = noyau_gaussien(sigma)
# calcule de A
A11 = convolve2d(Ix * Ix, gauss, "same")
A22 = convolve2d(Iy * Iy, gauss, "same")
A12 = convolve2d(Ix * Iy, gauss, "same")
# calcule de R
det = (A11 * A22) - (A12 * A12)
trace = A11 + A22
R = det - kappa * trace ** 2
return R
def maxlocal(image_harris, seuil):
""" Array*float -> Array """
coin = np.zeros(image_harris.shape)
for i in range(2, image_harris.shape[0] - 2):
for j in range(2, image_harris.shape[1] - 2):
if (np.amax(image_harris[i - 1:i + 2, j - 1:j + 2]) == image_harris[i, j]) and image_harris[i, j] > seuil:
coin[i, j] = image_harris[i, j]
image_harris[i - 1:i + 2, j - 1:j + 2] = 0
return np.array(coin)
def maxlocal_fast(image_harris, seuil):
"""Array*float -> Array"""
# thresholding
filtre = np.array([[-1, -1, -1],
[-1, +8, -1],
[-1, -1, -1]])
coin = convolve2d(image_harris, filtre, "same")
# non maximum supression
# F = OFilter(4, 3)
# B = F.ordfilt2(coin)
coin[image_harris < seuil] = 0
coin[coin > 0] = image_harris[coin > 0]
return coin
def coord_maxlocal(image_extrema, seuil):
""" Array*float -> list[list[int,int]] """
print("max extrema ", np.amax(image_extrema))
indecies = np.dstack(
np.unravel_index(np.argsort(image_extrema.ravel()), (image_extrema.shape[0], image_extrema.shape[1])))
indecies = indecies.squeeze()
print("before", indecies.shape[0])
for i in range(len(indecies)):
if (image_extrema[indecies[i][0], indecies[i][1]] > 0):
print("i", i)
indecies = indecies[i:]
break
print("after", indecies.shape[0])
truc = int(seuil * len(indecies) / 100)
print("pourcentage", truc, len(indecies))
return indecies[-truc:]
# def test_harris(image, pourcent):
# R = harris(image, 1, 0.06)
# coin = maxlocal(R, 20000000)
# # print(len(coin))
# ind = coord_maxlocal(coin, pourcent)
# ind = np.array(ind)
# # print(ind[-3:])
# plt.figure(figsize=(10, 10))
#
# # plt.subplot(121)
#
# plt.imshow(image, cmap="gray")
#
# plt.axis('tight')
# plt.axis('off')
#
# plt.scatter(ind[:, 1], ind[:, 0], color="red", marker="o")
#
# plt.show()
#
if __name__ == "__main__":
from time import sleep
webcam = cv2.VideoCapture(0)
sleep(2)
while True:
check, frame = webcam.read()
image_gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
R = harris(image_gray, 1, 0.06)
coin = maxlocal(R, 20000000)
# print(len(coin))
ind = coord_maxlocal(coin, 20)
ind = np.array(ind)
for i in range(ind.shape[0]):
print(i)
cv2.circle(frame, (ind[i, 1], ind[i, 0]), 5, (0, 255, 0), -1)
cv2.imshow("Capturing", image_house)
webcam.release()
print("Camera off.")
print("Program ended.")
cv2.destroyAllWindows() | [
"matplotlib.pyplot.subplot",
"numpy.outer",
"matplotlib.pyplot.show",
"cv2.circle",
"scipy.signal.convolve2d",
"matplotlib.pyplot.plot",
"cv2.cvtColor",
"cv2.destroyAllWindows",
"numpy.zeros",
"cv2.imshow",
"PIL.Image.open",
"cv2.VideoCapture",
"time.sleep",
"scipy.stats.norm.cdf",
"nump... | [((233, 264), 'PIL.Image.open', 'Image.open', (['"""images/house2.jpg"""'], {}), "('images/house2.jpg')\n", (243, 264), False, 'from PIL import Image\n'), ((394, 438), 'PIL.Image.open', 'Image.open', (['"""images/carrelage_wikipedia.jpg"""'], {}), "('images/carrelage_wikipedia.jpg')\n", (404, 438), False, 'from PIL import Image\n'), ((480, 522), 'PIL.Image.open', 'Image.open', (['"""images/Jussieu_wikipedia.jpg"""'], {}), "('images/Jussieu_wikipedia.jpg')\n", (490, 522), False, 'from PIL import Image\n'), ((2034, 2043), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (2041, 2043), True, 'import matplotlib.pyplot as plt\n'), ((2178, 2188), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2186, 2188), True, 'import matplotlib.pyplot as plt\n'), ((2316, 2349), 'numpy.array', 'np.array', (['module'], {'dtype': '"""float32"""'}), "(module, dtype='float32')\n", (2324, 2349), True, 'import numpy as np\n'), ((2621, 2667), 'numpy.array', 'np.array', (['[[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]'], {}), '([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])\n', (2629, 2667), True, 'import numpy as np\n'), ((2751, 2784), 'scipy.signal.convolve2d', 'convolve2d', (['image', 'sobelx', '"""same"""'], {}), "(image, sobelx, 'same')\n", (2761, 2784), False, 'from scipy.signal import convolve2d\n'), ((2795, 2828), 'scipy.signal.convolve2d', 'convolve2d', (['image', 'sobely', '"""same"""'], {}), "(image, sobely, 'same')\n", (2805, 2828), False, 'from scipy.signal import convolve2d\n'), ((3036, 3060), 'numpy.outer', 'np.outer', (['kern1d', 'kern1d'], {}), '(kern1d, kern1d)\n', (3044, 3060), True, 'import numpy as np\n'), ((3302, 3336), 'scipy.signal.convolve2d', 'convolve2d', (['(Ix * Ix)', 'gauss', '"""same"""'], {}), "(Ix * Ix, gauss, 'same')\n", (3312, 3336), False, 'from scipy.signal import convolve2d\n'), ((3348, 3382), 'scipy.signal.convolve2d', 'convolve2d', (['(Iy * Iy)', 'gauss', '"""same"""'], {}), "(Iy * Iy, gauss, 'same')\n", (3358, 3382), False, 'from scipy.signal import convolve2d\n'), ((3394, 3428), 'scipy.signal.convolve2d', 'convolve2d', (['(Ix * Iy)', 'gauss', '"""same"""'], {}), "(Ix * Iy, gauss, 'same')\n", (3404, 3428), False, 'from scipy.signal import convolve2d\n'), ((3651, 3679), 'numpy.zeros', 'np.zeros', (['image_harris.shape'], {}), '(image_harris.shape)\n', (3659, 3679), True, 'import numpy as np\n'), ((4033, 4047), 'numpy.array', 'np.array', (['coin'], {}), '(coin)\n', (4041, 4047), True, 'import numpy as np\n'), ((4163, 4215), 'numpy.array', 'np.array', (['[[-1, -1, -1], [-1, +8, -1], [-1, -1, -1]]'], {}), '([[-1, -1, -1], [-1, +8, -1], [-1, -1, -1]])\n', (4171, 4215), True, 'import numpy as np\n'), ((4278, 4318), 'scipy.signal.convolve2d', 'convolve2d', (['image_harris', 'filtre', '"""same"""'], {}), "(image_harris, filtre, 'same')\n", (4288, 4318), False, 'from scipy.signal import convolve2d\n'), ((5791, 5810), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (5807, 5810), False, 'import cv2\n'), ((5818, 5826), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (5823, 5826), False, 'from time import sleep\n'), ((6402, 6425), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (6423, 6425), False, 'import cv2\n'), ((1450, 1491), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(101 + 10 * nb_affichages + i)'], {}), '(101 + 10 * nb_affichages + i)\n', (1461, 1491), True, 'import matplotlib.pyplot as plt\n'), ((3006, 3020), 'scipy.stats.norm.cdf', 'st.norm.cdf', (['x'], {}), '(x)\n', (3017, 3020), True, 'import scipy.stats as st\n'), ((4636, 4658), 'numpy.amax', 'np.amax', (['image_extrema'], {}), '(image_extrema)\n', (4643, 4658), True, 'import numpy as np\n'), ((5908, 5947), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_RGB2GRAY'], {}), '(frame, cv2.COLOR_RGB2GRAY)\n', (5920, 5947), False, 'import cv2\n'), ((6112, 6125), 'numpy.array', 'np.array', (['ind'], {}), '(ind)\n', (6120, 6125), True, 'import numpy as np\n'), ((6275, 6311), 'cv2.imshow', 'cv2.imshow', (['"""Capturing"""', 'image_house'], {}), "('Capturing', image_house)\n", (6285, 6311), False, 'import cv2\n'), ((308, 337), 'PIL.Image.open', 'Image.open', (['"""images/img2.jpg"""'], {}), "('images/img2.jpg')\n", (318, 337), False, 'from PIL import Image\n'), ((1856, 1879), 'matplotlib.pyplot.plot', 'plt.plot', (['affichages[i]'], {}), '(affichages[i])\n', (1864, 1879), True, 'import matplotlib.pyplot as plt\n'), ((6202, 6263), 'cv2.circle', 'cv2.circle', (['frame', '(ind[i, 1], ind[i, 0])', '(5)', '(0, 255, 0)', '(-1)'], {}), '(frame, (ind[i, 1], ind[i, 0]), 5, (0, 255, 0), -1)\n', (6212, 6263), False, 'import cv2\n'), ((3807, 3854), 'numpy.amax', 'np.amax', (['image_harris[i - 1:i + 2, j - 1:j + 2]'], {}), '(image_harris[i - 1:i + 2, j - 1:j + 2])\n', (3814, 3854), True, 'import numpy as np\n')] |
import numpy as np
import tensorly as tl
import tensorflow as tf
from tensorly.decomposition import parafac
def cp_decomposition_conv_layer(
layers,
rank=None
):
layer = layers[0]
weights = np.asarray(layer.get_weights()[0])
bias = layer.get_weights()[1] if layer.use_bias else None
layer_data = tl.tensor(weights)
rank = rank[0]
tl.set_backend("pytorch")
layer = layers[0]
weights = np.asarray(layer.get_weights()[0])
bias = layer.get_weights()[1] if layer.use_bias else None
layer_data = tl.tensor(weights)
vertical, horizontal, first, last = \
parafac(layer_data, rank=rank, init='random')[1]
new_layers = from_tensor_to_layers(
[vertical, horizontal, first, last],
layer,
bias)
return new_layers
def from_tensor_to_layers(
tensors,
layers,
bias
):
'''
transform tensors to layers
Key arguments:
tensors -- contains data of decomposed layer
layers -- original layers
bias -- bias of layer
Return:
new_layers
'''
layer = layers
[vertical, horizontal, first, last] = tensors
pointwise_s_to_r_layer = tf.keras.layers.Conv2D(
name=layer.name+"p1",
filters=first.shape[1],
kernel_size=[1, 1],
padding="valid",
use_bias=False, input_shape=layer.input_shape[1:])
depthwise_vertical_layer = tf.keras.layers.DepthwiseConv2D(
name=layer.name+"v",
kernel_size=[vertical.shape[0], 1], strides=layer.strides,
padding=layer.padding, dilation_rate=layer.dilation_rate,
use_bias=False)
depthwise_horizontal_layer = tf.keras.layers.DepthwiseConv2D(
name=layer.name+"h",
kernel_size=[1, horizontal.shape[0]], strides=layer.strides,
padding=layer.padding, dilation_rate=layer.dilation_rate,
use_bias=False)
pointwise_r_to_t_layer = tf.keras.layers.Conv2D(
name=layer.name+"p2",
filters=last.shape[0],
kernel_size=[1, 1], use_bias=layer.use_bias,
padding="valid",
activation=layer.activation)
# This section assign weights to the layers
H = tf.expand_dims(tf.expand_dims(
horizontal, axis=0, name=None
), axis=2, name=None)
H = np.transpose(H, (0, 1, 3, 2))
V = tf.expand_dims(tf.expand_dims(
vertical, axis=1, name=None
), axis=1, name=None)
V = np.transpose(V, (0, 1, 3, 2))
F = tf.expand_dims(tf.expand_dims(
first, axis=0, name=None
), axis=0, name=None)
L = tf.expand_dims(tf.expand_dims(
np.transpose(last), axis=0, name=None
), axis=0, name=None)
new_weights = [F, V, H, L]
if layer.use_bias:
new_weights.append(bias)
new_layer = [
pointwise_s_to_r_layer, depthwise_vertical_layer,
depthwise_horizontal_layer, pointwise_r_to_t_layer]
return new_layer, new_weights
| [
"tensorflow.keras.layers.Conv2D",
"numpy.transpose",
"tensorly.set_backend",
"tensorly.decomposition.parafac",
"tensorly.tensor",
"tensorflow.keras.layers.DepthwiseConv2D",
"tensorflow.expand_dims"
] | [((338, 356), 'tensorly.tensor', 'tl.tensor', (['weights'], {}), '(weights)\n', (347, 356), True, 'import tensorly as tl\n'), ((380, 405), 'tensorly.set_backend', 'tl.set_backend', (['"""pytorch"""'], {}), "('pytorch')\n", (394, 405), True, 'import tensorly as tl\n'), ((556, 574), 'tensorly.tensor', 'tl.tensor', (['weights'], {}), '(weights)\n', (565, 574), True, 'import tensorly as tl\n'), ((1209, 1376), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'name': "(layer.name + 'p1')", 'filters': 'first.shape[1]', 'kernel_size': '[1, 1]', 'padding': '"""valid"""', 'use_bias': '(False)', 'input_shape': 'layer.input_shape[1:]'}), "(name=layer.name + 'p1', filters=first.shape[1],\n kernel_size=[1, 1], padding='valid', use_bias=False, input_shape=layer.\n input_shape[1:])\n", (1231, 1376), True, 'import tensorflow as tf\n'), ((1519, 1715), 'tensorflow.keras.layers.DepthwiseConv2D', 'tf.keras.layers.DepthwiseConv2D', ([], {'name': "(layer.name + 'v')", 'kernel_size': '[vertical.shape[0], 1]', 'strides': 'layer.strides', 'padding': 'layer.padding', 'dilation_rate': 'layer.dilation_rate', 'use_bias': '(False)'}), "(name=layer.name + 'v', kernel_size=[\n vertical.shape[0], 1], strides=layer.strides, padding=layer.padding,\n dilation_rate=layer.dilation_rate, use_bias=False)\n", (1550, 1715), True, 'import tensorflow as tf\n'), ((1836, 2033), 'tensorflow.keras.layers.DepthwiseConv2D', 'tf.keras.layers.DepthwiseConv2D', ([], {'name': "(layer.name + 'h')", 'kernel_size': '[1, horizontal.shape[0]]', 'strides': 'layer.strides', 'padding': 'layer.padding', 'dilation_rate': 'layer.dilation_rate', 'use_bias': '(False)'}), "(name=layer.name + 'h', kernel_size=[1,\n horizontal.shape[0]], strides=layer.strides, padding=layer.padding,\n dilation_rate=layer.dilation_rate, use_bias=False)\n", (1867, 2033), True, 'import tensorflow as tf\n'), ((2151, 2319), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'name': "(layer.name + 'p2')", 'filters': 'last.shape[0]', 'kernel_size': '[1, 1]', 'use_bias': 'layer.use_bias', 'padding': '"""valid"""', 'activation': 'layer.activation'}), "(name=layer.name + 'p2', filters=last.shape[0],\n kernel_size=[1, 1], use_bias=layer.use_bias, padding='valid',\n activation=layer.activation)\n", (2173, 2319), True, 'import tensorflow as tf\n'), ((2593, 2622), 'numpy.transpose', 'np.transpose', (['H', '(0, 1, 3, 2)'], {}), '(H, (0, 1, 3, 2))\n', (2605, 2622), True, 'import numpy as np\n'), ((2733, 2762), 'numpy.transpose', 'np.transpose', (['V', '(0, 1, 3, 2)'], {}), '(V, (0, 1, 3, 2))\n', (2745, 2762), True, 'import numpy as np\n'), ((625, 670), 'tensorly.decomposition.parafac', 'parafac', (['layer_data'], {'rank': 'rank', 'init': '"""random"""'}), "(layer_data, rank=rank, init='random')\n", (632, 670), False, 'from tensorly.decomposition import parafac\n'), ((2505, 2550), 'tensorflow.expand_dims', 'tf.expand_dims', (['horizontal'], {'axis': '(0)', 'name': 'None'}), '(horizontal, axis=0, name=None)\n', (2519, 2550), True, 'import tensorflow as tf\n'), ((2647, 2690), 'tensorflow.expand_dims', 'tf.expand_dims', (['vertical'], {'axis': '(1)', 'name': 'None'}), '(vertical, axis=1, name=None)\n', (2661, 2690), True, 'import tensorflow as tf\n'), ((2787, 2827), 'tensorflow.expand_dims', 'tf.expand_dims', (['first'], {'axis': '(0)', 'name': 'None'}), '(first, axis=0, name=None)\n', (2801, 2827), True, 'import tensorflow as tf\n'), ((2910, 2928), 'numpy.transpose', 'np.transpose', (['last'], {}), '(last)\n', (2922, 2928), True, 'import numpy as np\n')] |
# MIT License
#
# Copyright (c) 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from os.path import join, exists
import random
import cv2
import numpy as np
import numpy.random as npr
from tfmtcnn.utils.BBox import BBox
from tfmtcnn.utils.IoU import IoU
import datasets.constants as datasets_constants
from tfmtcnn.datasets.DatasetFactory import DatasetFactory
from tfmtcnn.datasets.Landmark import rotate
from tfmtcnn.datasets.Landmark import flip
from tfmtcnn.datasets.Landmark import randomShift
from tfmtcnn.datasets.Landmark import randomShiftWithArgument
class LandmarkDataset(object):
__landmark_ratio = datasets_constants.landmark_ratio
def __init__(self, name='Landmark'):
self._name = name
self._clear()
def _clear(self):
self._is_valid = False
self._data = []
@classmethod
def landmark_file_name(cls, target_root_dir):
landmark_file_name = os.path.join(target_root_dir, 'landmark.txt')
return (landmark_file_name)
def is_valid(self):
return (self._is_valid)
def data(self):
return (self._data)
def _read(self, landmark_image_dir, landmark_file_name):
self._clear()
#landmark_dataset = DatasetFactory.landmark_dataset('LFWLandmark')
landmark_dataset = DatasetFactory.landmark_dataset('CelebADataset')
if (landmark_dataset.read(landmark_image_dir, landmark_file_name)):
self._is_valid = True
self._data = landmark_dataset.data()
return (self._is_valid)
def _can_generate_sample(self):
return (random.choice([0, 1, 2, 3]) > 1)
def generate(self, landmark_image_dir, landmark_file_name,
base_number_of_images, target_face_size, target_root_dir):
if (not self._read(landmark_image_dir, landmark_file_name)):
return (False)
image_file_names = self._data['images']
ground_truth_boxes = self._data['bboxes']
ground_truth_landmarks = self._data['landmarks']
landmark_dir = os.path.join(target_root_dir, 'landmark')
if (not os.path.exists(landmark_dir)):
os.makedirs(landmark_dir)
landmark_file = open(
LandmarkDataset.landmark_file_name(target_root_dir), 'w')
generated_landmark_images = 0
processed_input_images = 0
total_number_of_input_images = len(image_file_names)
needed_landmark_samples = int(
(1.0 * base_number_of_images * LandmarkDataset.__landmark_ratio) /
total_number_of_input_images)
needed_landmark_samples = max(1, needed_landmark_samples)
base_number_of_attempts = 500
maximum_attempts = base_number_of_attempts * needed_landmark_samples
for image_path, ground_truth_bounding_box, ground_truth_landmark in zip(
image_file_names, ground_truth_boxes, ground_truth_landmarks):
current_face_images = []
current_face_landmarks = []
image_path = image_path.replace("\\", '/')
image = cv2.imread(image_path)
if (image is None):
continue
image_height, image_width, image_channels = image.shape
gt_box = np.array([
ground_truth_bounding_box.left, ground_truth_bounding_box.top,
ground_truth_bounding_box.right,
ground_truth_bounding_box.bottom
])
f_face = image[ground_truth_bounding_box.
top:ground_truth_bounding_box.bottom +
1, ground_truth_bounding_box.
left:ground_truth_bounding_box.right + 1]
f_face = cv2.resize(f_face, (target_face_size, target_face_size))
landmark = np.zeros((5, 2))
for index, one in enumerate(ground_truth_landmark):
landmark_point = (
(one[0] - gt_box[0]) / (gt_box[2] - gt_box[0]),
(one[1] - gt_box[1]) / (gt_box[3] - gt_box[1]))
landmark[index] = landmark_point
current_face_images.append(f_face)
current_face_landmarks.append(landmark.reshape(10))
landmark = np.zeros((5, 2))
current_landmark_samples = 0
number_of_attempts = 0
while ((current_landmark_samples < needed_landmark_samples)
and (number_of_attempts < maximum_attempts)):
number_of_attempts += 1
x1, y1, x2, y2 = gt_box
ground_truth_width = x2 - x1 + 1
ground_truth_height = y2 - y1 + 1
if (x1 < 0) or (y1 < 0):
continue
bounding_box_size = npr.randint(
int(min(ground_truth_width, ground_truth_height) * 0.8),
np.ceil(
1.25 * max(ground_truth_width, ground_truth_height)))
delta_x = npr.randint(-ground_truth_width,
ground_truth_width) * 0.2
delta_y = npr.randint(-ground_truth_height,
ground_truth_height) * 0.2
nx1 = int(
max(
x1 + ground_truth_width / 2 - bounding_box_size / 2 +
delta_x, 0))
ny1 = int(
max(
y1 + ground_truth_height / 2 - bounding_box_size / 2 +
delta_y, 0))
nx2 = nx1 + int(bounding_box_size)
ny2 = ny1 + int(bounding_box_size)
if ((nx2 > image_width) or (ny2 > image_height)):
continue
crop_box = np.array([nx1, ny1, nx2, ny2])
cropped_im = image[ny1:ny2 + 1, nx1:nx2 + 1, :]
resized_im = cv2.resize(cropped_im,
(target_face_size, target_face_size))
current_IoU = IoU(crop_box, np.expand_dims(gt_box, 0))
if (current_IoU >= DatasetFactory.positive_IoU()):
for index, one in enumerate(ground_truth_landmark):
landmark_point = ((one[0] - nx1) / bounding_box_size,
(one[1] - ny1) / bounding_box_size)
landmark[index] = landmark_point
current_face_images.append(resized_im)
current_face_landmarks.append(landmark.reshape(10))
landmark = np.zeros((5, 2))
landmark_ = current_face_landmarks[-1].reshape(-1, 2)
bounding_box = BBox([nx1, ny1, nx2, ny2])
#mirror
if (self._can_generate_sample()):
face_flipped, landmark_flipped = flip(
resized_im, landmark_)
face_flipped = cv2.resize(
face_flipped, (target_face_size, target_face_size))
#c*h*w
current_face_images.append(face_flipped)
current_face_landmarks.append(
landmark_flipped.reshape(10))
#rotate
if (self._can_generate_sample()):
face_rotated_by_alpha, landmark_rotated = rotate(
image, bounding_box,
bounding_box.reprojectLandmark(landmark_), 5)
#landmark_offset
landmark_rotated = bounding_box.projectLandmark(
landmark_rotated)
face_rotated_by_alpha = cv2.resize(
face_rotated_by_alpha,
(target_face_size, target_face_size))
current_face_images.append(face_rotated_by_alpha)
current_face_landmarks.append(
landmark_rotated.reshape(10))
#flip
face_flipped, landmark_flipped = flip(
face_rotated_by_alpha, landmark_rotated)
face_flipped = cv2.resize(
face_flipped, (target_face_size, target_face_size))
current_face_images.append(face_flipped)
current_face_landmarks.append(
landmark_flipped.reshape(10))
#inverse clockwise rotation
if (self._can_generate_sample()):
face_rotated_by_alpha, landmark_rotated = rotate(
image, bounding_box,
bounding_box.reprojectLandmark(landmark_), -5)
landmark_rotated = bounding_box.projectLandmark(
landmark_rotated)
face_rotated_by_alpha = cv2.resize(
face_rotated_by_alpha,
(target_face_size, target_face_size))
current_face_images.append(face_rotated_by_alpha)
current_face_landmarks.append(
landmark_rotated.reshape(10))
face_flipped, landmark_flipped = flip(
face_rotated_by_alpha, landmark_rotated)
face_flipped = cv2.resize(
face_flipped, (target_face_size, target_face_size))
current_face_images.append(face_flipped)
current_face_landmarks.append(
landmark_flipped.reshape(10))
current_image_array, current_landmark_array = np.asarray(
current_face_images), np.asarray(current_face_landmarks)
for i in range(len(current_image_array)):
if np.sum(np.where(current_landmark_array[i] <= 0, 1,
0)) > 0:
continue
if np.sum(np.where(current_landmark_array[i] >= 1, 1,
0)) > 0:
continue
if (current_landmark_samples < needed_landmark_samples):
cv2.imwrite(
join(landmark_dir,
"%d.jpg" % (generated_landmark_images)),
current_image_array[i])
landmarks = map(str, list(current_landmark_array[i]))
landmark_file.write(
join(landmark_dir, "%d.jpg" %
(generated_landmark_images)) + " -2 " +
" ".join(landmarks) + "\n")
generated_landmark_images += 1
current_landmark_samples += 1
else:
break
processed_input_images = processed_input_images + 1
if (processed_input_images % 5000 == 0):
print('( %s / %s ) number of input images are processed.' %
(processed_input_images, total_number_of_input_images))
landmark_file.close()
return (True)
| [
"os.makedirs",
"tfmtcnn.datasets.DatasetFactory.DatasetFactory.positive_IoU",
"numpy.asarray",
"os.path.exists",
"random.choice",
"numpy.zeros",
"numpy.expand_dims",
"tfmtcnn.utils.BBox.BBox",
"cv2.imread",
"numpy.random.randint",
"numpy.array",
"tfmtcnn.datasets.Landmark.flip",
"numpy.where... | [((2055, 2100), 'os.path.join', 'os.path.join', (['target_root_dir', '"""landmark.txt"""'], {}), "(target_root_dir, 'landmark.txt')\n", (2067, 2100), False, 'import os\n'), ((2431, 2479), 'tfmtcnn.datasets.DatasetFactory.DatasetFactory.landmark_dataset', 'DatasetFactory.landmark_dataset', (['"""CelebADataset"""'], {}), "('CelebADataset')\n", (2462, 2479), False, 'from tfmtcnn.datasets.DatasetFactory import DatasetFactory\n'), ((3175, 3216), 'os.path.join', 'os.path.join', (['target_root_dir', '"""landmark"""'], {}), "(target_root_dir, 'landmark')\n", (3187, 3216), False, 'import os\n'), ((2725, 2752), 'random.choice', 'random.choice', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (2738, 2752), False, 'import random\n'), ((3233, 3261), 'os.path.exists', 'os.path.exists', (['landmark_dir'], {}), '(landmark_dir)\n', (3247, 3261), False, 'import os\n'), ((3276, 3301), 'os.makedirs', 'os.makedirs', (['landmark_dir'], {}), '(landmark_dir)\n', (3287, 3301), False, 'import os\n'), ((4196, 4218), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (4206, 4218), False, 'import cv2\n'), ((4366, 4510), 'numpy.array', 'np.array', (['[ground_truth_bounding_box.left, ground_truth_bounding_box.top,\n ground_truth_bounding_box.right, ground_truth_bounding_box.bottom]'], {}), '([ground_truth_bounding_box.left, ground_truth_bounding_box.top,\n ground_truth_bounding_box.right, ground_truth_bounding_box.bottom])\n', (4374, 4510), True, 'import numpy as np\n'), ((4836, 4892), 'cv2.resize', 'cv2.resize', (['f_face', '(target_face_size, target_face_size)'], {}), '(f_face, (target_face_size, target_face_size))\n', (4846, 4892), False, 'import cv2\n'), ((4916, 4932), 'numpy.zeros', 'np.zeros', (['(5, 2)'], {}), '((5, 2))\n', (4924, 4932), True, 'import numpy as np\n'), ((5353, 5369), 'numpy.zeros', 'np.zeros', (['(5, 2)'], {}), '((5, 2))\n', (5361, 5369), True, 'import numpy as np\n'), ((6878, 6908), 'numpy.array', 'np.array', (['[nx1, ny1, nx2, ny2]'], {}), '([nx1, ny1, nx2, ny2])\n', (6886, 6908), True, 'import numpy as np\n'), ((7002, 7062), 'cv2.resize', 'cv2.resize', (['cropped_im', '(target_face_size, target_face_size)'], {}), '(cropped_im, (target_face_size, target_face_size))\n', (7012, 7062), False, 'import cv2\n'), ((6095, 6147), 'numpy.random.randint', 'npr.randint', (['(-ground_truth_width)', 'ground_truth_width'], {}), '(-ground_truth_width, ground_truth_width)\n', (6106, 6147), True, 'import numpy.random as npr\n'), ((6218, 6272), 'numpy.random.randint', 'npr.randint', (['(-ground_truth_height)', 'ground_truth_height'], {}), '(-ground_truth_height, ground_truth_height)\n', (6229, 6272), True, 'import numpy.random as npr\n'), ((7148, 7173), 'numpy.expand_dims', 'np.expand_dims', (['gt_box', '(0)'], {}), '(gt_box, 0)\n', (7162, 7173), True, 'import numpy as np\n'), ((7211, 7240), 'tfmtcnn.datasets.DatasetFactory.DatasetFactory.positive_IoU', 'DatasetFactory.positive_IoU', ([], {}), '()\n', (7238, 7240), False, 'from tfmtcnn.datasets.DatasetFactory import DatasetFactory\n'), ((7692, 7708), 'numpy.zeros', 'np.zeros', (['(5, 2)'], {}), '((5, 2))\n', (7700, 7708), True, 'import numpy as np\n'), ((7818, 7844), 'tfmtcnn.utils.BBox.BBox', 'BBox', (['[nx1, ny1, nx2, ny2]'], {}), '([nx1, ny1, nx2, ny2])\n', (7822, 7844), False, 'from tfmtcnn.utils.BBox import BBox\n'), ((10947, 10978), 'numpy.asarray', 'np.asarray', (['current_face_images'], {}), '(current_face_images)\n', (10957, 10978), True, 'import numpy as np\n'), ((11001, 11035), 'numpy.asarray', 'np.asarray', (['current_face_landmarks'], {}), '(current_face_landmarks)\n', (11011, 11035), True, 'import numpy as np\n'), ((7985, 8012), 'tfmtcnn.datasets.Landmark.flip', 'flip', (['resized_im', 'landmark_'], {}), '(resized_im, landmark_)\n', (7989, 8012), False, 'from tfmtcnn.datasets.Landmark import flip\n'), ((8081, 8143), 'cv2.resize', 'cv2.resize', (['face_flipped', '(target_face_size, target_face_size)'], {}), '(face_flipped, (target_face_size, target_face_size))\n', (8091, 8143), False, 'import cv2\n'), ((8870, 8941), 'cv2.resize', 'cv2.resize', (['face_rotated_by_alpha', '(target_face_size, target_face_size)'], {}), '(face_rotated_by_alpha, (target_face_size, target_face_size))\n', (8880, 8941), False, 'import cv2\n'), ((9274, 9319), 'tfmtcnn.datasets.Landmark.flip', 'flip', (['face_rotated_by_alpha', 'landmark_rotated'], {}), '(face_rotated_by_alpha, landmark_rotated)\n', (9278, 9319), False, 'from tfmtcnn.datasets.Landmark import flip\n'), ((9388, 9450), 'cv2.resize', 'cv2.resize', (['face_flipped', '(target_face_size, target_face_size)'], {}), '(face_flipped, (target_face_size, target_face_size))\n', (9398, 9450), False, 'import cv2\n'), ((10126, 10197), 'cv2.resize', 'cv2.resize', (['face_rotated_by_alpha', '(target_face_size, target_face_size)'], {}), '(face_rotated_by_alpha, (target_face_size, target_face_size))\n', (10136, 10197), False, 'import cv2\n'), ((10500, 10545), 'tfmtcnn.datasets.Landmark.flip', 'flip', (['face_rotated_by_alpha', 'landmark_rotated'], {}), '(face_rotated_by_alpha, landmark_rotated)\n', (10504, 10545), False, 'from tfmtcnn.datasets.Landmark import flip\n'), ((10614, 10676), 'cv2.resize', 'cv2.resize', (['face_flipped', '(target_face_size, target_face_size)'], {}), '(face_flipped, (target_face_size, target_face_size))\n', (10624, 10676), False, 'import cv2\n'), ((11125, 11171), 'numpy.where', 'np.where', (['(current_landmark_array[i] <= 0)', '(1)', '(0)'], {}), '(current_landmark_array[i] <= 0, 1, 0)\n', (11133, 11171), True, 'import numpy as np\n'), ((11281, 11327), 'numpy.where', 'np.where', (['(current_landmark_array[i] >= 1)', '(1)', '(0)'], {}), '(current_landmark_array[i] >= 1, 1, 0)\n', (11289, 11327), True, 'import numpy as np\n'), ((11549, 11605), 'os.path.join', 'join', (['landmark_dir', "('%d.jpg' % generated_landmark_images)"], {}), "(landmark_dir, '%d.jpg' % generated_landmark_images)\n", (11553, 11605), False, 'from os.path import join, exists\n'), ((11845, 11901), 'os.path.join', 'join', (['landmark_dir', "('%d.jpg' % generated_landmark_images)"], {}), "(landmark_dir, '%d.jpg' % generated_landmark_images)\n", (11849, 11901), False, 'from os.path import join, exists\n')] |
import numpy as np
import pandas
from sklearn.metrics import mean_squared_error
import math
from math import sqrt
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.pylab as plt
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
import PIL
from PIL import ImageFilter
import cv2
import itertools
import random
import keras
import imutils
from imutils import paths
import os
from keras import optimizers
from keras.preprocessing.image import img_to_array
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from keras import callbacks
from keras.models import Sequential
from keras.layers.normalization import BatchNormalization
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D , UpSampling2D ,Conv2DTranspose
from keras import backend as K
ree = 0
getter1 = pandas.read_csv('test-nfl-spread.csv')
getter2 = getter1.values
train = np.zeros([1, getter2.shape[1]])
for i in range(len(getter2)):
try:
if not getter2[i, 3] == '-' and not math.isnan(float(getter2[i, 3])):
train = np.vstack((train, getter2[i, :]))
except:
pass
X_trn_raw, y_trn = train[1:,3:21], train[1:,23]
spread = train[1:,30]
cover = train[1:,31]
names = train[1:,1]
y_trn = np.array([float(item) for item in y_trn])
y_trn = y_trn.astype(np.float32)
spread = np.array([float(item) for item in spread])
spread = spread.astype(np.float32)
cover = np.array([int(item) for item in cover])
cover = cover.astype(np.int)
X_trn_raw = np.array([[float(elm) for elm in row] for row in X_trn_raw])
X_trn_raw = X_trn_raw.astype(np.float32)
for i in range(len(X_trn_raw)):
X_trn_raw[i, 1] = X_trn_raw[i, 0] - X_trn_raw[i, 1]
X_trn_raw[i, 10] = X_trn_raw[i, 9] - X_trn_raw[i, 10]
pandas.DataFrame(X_trn_raw).to_csv("raw.csv")
for i in range(len(cover)):
if cover[i] == -1:
cover[i] = 0
X_trn_raw = (X_trn_raw * 1000).astype(int)
X_trn_raw = (X_trn_raw.astype(np.float32)/1000.0)
X_tst_linear, spread_tst, cover_tst, names_tst = X_trn_raw[544:,:], spread[544:], cover[544:], names[544:]
X_trn_linear, spread_trn, cover_trn, names_trn = X_trn_raw[94:480,:], spread[94:480], cover[94:480], names[94:480]
X_tst_linear = np.concatenate((X_tst_linear, np.array([spread_tst]).T), axis=1)
X_trn_linear = np.concatenate((X_trn_linear, np.array([spread_trn]).T), axis=1)
X_tst_linear_large = np.zeros(X_tst_linear.shape)
X_tst_linear_small = np.zeros(X_tst_linear.shape)
spread_tst_large = np.zeros(spread_tst.shape)
spread_tst_small = np.zeros(spread_tst.shape)
cover_tst_large = np.zeros(cover_tst.shape)
cover_tst_small = np.zeros(cover_tst.shape)
names_tst_large = np.ndarray(names_tst.shape, str)
names_tst_small = np.ndarray(names_tst.shape, str)
X_trn_linear_large = np.zeros(X_trn_linear.shape)
X_trn_linear_small = np.zeros(X_trn_linear.shape)
spread_trn_large = np.zeros(spread_trn.shape)
spread_trn_small = np.zeros(spread_trn.shape)
cover_trn_large = np.zeros(cover_trn.shape)
cover_trn_small = np.zeros(cover_trn.shape)
names_trn_large = np.ndarray(names_trn.shape, str)
names_trn_small = np.ndarray(names_trn.shape, str)
large_tst_count = 0
small_tst_count = 0
tst_count = 0
while tst_count < len(spread_tst):
if abs(spread_tst[tst_count]) >= 5:
X_tst_linear_large[large_tst_count,:] = X_tst_linear[tst_count,:]
spread_tst_large[large_tst_count] = spread_tst[tst_count]
cover_tst_large[large_tst_count] = cover_tst[tst_count]
names_tst_large[large_tst_count] = names_tst[tst_count]
large_tst_count+=1
else:
X_tst_linear_small[small_tst_count,:] = X_tst_linear[tst_count,:]
spread_tst_small[small_tst_count] = spread_tst[tst_count]
cover_tst_small[small_tst_count] = cover_tst[tst_count]
names_tst_small[small_tst_count] = names_tst[tst_count]
small_tst_count+=1
tst_count += 1
large_trn_count = 0
small_trn_count = 0
trn_count = 0
while trn_count < len(spread_trn):
if abs(spread_trn[trn_count]) >= 5:
X_trn_linear_large[large_trn_count,:] = X_trn_linear[trn_count,:]
spread_trn_large[large_trn_count] = spread_trn[trn_count]
cover_trn_large[large_trn_count] = cover_trn[trn_count]
names_trn_large[large_trn_count] = names_trn[trn_count]
large_trn_count+=1
else:
X_trn_linear_small[small_trn_count,:] = X_trn_linear[trn_count,:]
spread_trn_small[small_trn_count] = spread_trn[trn_count]
cover_trn_small[small_trn_count] = cover_trn[trn_count]
names_trn_small[small_trn_count] = names_trn[trn_count]
small_trn_count+=1
trn_count += 1
X_trn_linear_large = X_trn_linear_large[:large_trn_count,:]
spread_trn_large = spread_trn_large[:large_trn_count]
cover_trn_large = cover_trn_large[:large_trn_count]
names_trn_large = names_trn_large[:large_trn_count]
X_tst_linear_large = X_tst_linear_large[:large_tst_count,:]
spread_tst_large = spread_tst_large[:large_tst_count]
cover_tst_large = cover_tst_large[:large_tst_count]
names_tst_large = names_tst_large[:large_tst_count]
X_trn_linear_small = X_trn_linear_small[:small_trn_count,:]
spread_trn_small = spread_trn_small[:small_trn_count]
cover_trn_small = cover_trn_small[:small_trn_count]
names_trn_small = names_trn_small[:small_trn_count]
X_tst_linear_small = X_tst_linear_small[:small_tst_count,:]
spread_tst_small = spread_tst_small[:small_tst_count]
cover_tst_small = cover_tst_small[:small_tst_count]
names_tst_small = names_tst_small[:small_tst_count]
pandas.DataFrame(X_trn_linear).to_csv("trn_gen.csv")
print(np.any(np.isnan(X_trn_linear)))
print(np.all(np.isfinite(X_trn_linear)))
print(np.any(np.isnan(cover_trn)))
print(np.all(np.isfinite(cover_trn)))
def create_model():
model=Sequential()
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(512,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(128,activation='relu'))
model.add(Dense(2, activation='softmax'))
return model
batch_size = 128
epochs = 400
class_large = create_model()
class_small = create_model()
class_gen = create_model()
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
class_large.compile(loss='mean_squared_error', optimizer=sgd, metrics=['accuracy'])
class_small.compile(loss='mean_squared_error', optimizer=sgd, metrics=['accuracy'])
class_gen.compile(loss='mean_squared_error', optimizer=sgd, metrics=['accuracy'])
early_stopping_large=callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0, mode='min')
filepath_large="top_model_large.h5"
checkpoint_large = callbacks.ModelCheckpoint(filepath_large, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
callbacks_list_large = [early_stopping_large,checkpoint_large]
early_stopping_small=callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0, mode='min')
filepath_small="top_model_small.h5"
checkpoint_small = callbacks.ModelCheckpoint(filepath_small, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
callbacks_list_small = [early_stopping_small,checkpoint_small]
early_stopping_gen=callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0, mode='min')
filepath_gen="top_model_gen.h5"
checkpoint_gen = callbacks.ModelCheckpoint(filepath_gen, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
callbacks_list_gen = [early_stopping_gen,checkpoint_gen]
class_large.fit(X_trn_linear_large, cover_trn_large,
validation_split=.3,
shuffle=True,
batch_size=batch_size,
epochs=epochs,
verbose=1,
callbacks=callbacks_list_large)
class_small.fit(X_trn_linear_small, cover_trn_small,
validation_split=.3,
shuffle=True,
batch_size=batch_size,
epochs=epochs,
verbose=1,
callbacks=callbacks_list_small)
class_gen.fit(X_trn_linear, cover_trn,
validation_split=.3,
shuffle=True,
batch_size=batch_size,
epochs=epochs,
verbose=1,
callbacks=callbacks_list_gen)
| [
"pandas.DataFrame",
"keras.optimizers.SGD",
"pandas.read_csv",
"keras.callbacks.ModelCheckpoint",
"keras.layers.Dropout",
"numpy.zeros",
"numpy.isfinite",
"numpy.isnan",
"keras.callbacks.EarlyStopping",
"keras.layers.Dense",
"numpy.array",
"keras.models.Sequential",
"numpy.ndarray",
"numpy... | [((896, 934), 'pandas.read_csv', 'pandas.read_csv', (['"""test-nfl-spread.csv"""'], {}), "('test-nfl-spread.csv')\n", (911, 934), False, 'import pandas\n'), ((968, 999), 'numpy.zeros', 'np.zeros', (['[1, getter2.shape[1]]'], {}), '([1, getter2.shape[1]])\n', (976, 999), True, 'import numpy as np\n'), ((2436, 2464), 'numpy.zeros', 'np.zeros', (['X_tst_linear.shape'], {}), '(X_tst_linear.shape)\n', (2444, 2464), True, 'import numpy as np\n'), ((2486, 2514), 'numpy.zeros', 'np.zeros', (['X_tst_linear.shape'], {}), '(X_tst_linear.shape)\n', (2494, 2514), True, 'import numpy as np\n'), ((2534, 2560), 'numpy.zeros', 'np.zeros', (['spread_tst.shape'], {}), '(spread_tst.shape)\n', (2542, 2560), True, 'import numpy as np\n'), ((2580, 2606), 'numpy.zeros', 'np.zeros', (['spread_tst.shape'], {}), '(spread_tst.shape)\n', (2588, 2606), True, 'import numpy as np\n'), ((2625, 2650), 'numpy.zeros', 'np.zeros', (['cover_tst.shape'], {}), '(cover_tst.shape)\n', (2633, 2650), True, 'import numpy as np\n'), ((2669, 2694), 'numpy.zeros', 'np.zeros', (['cover_tst.shape'], {}), '(cover_tst.shape)\n', (2677, 2694), True, 'import numpy as np\n'), ((2713, 2745), 'numpy.ndarray', 'np.ndarray', (['names_tst.shape', 'str'], {}), '(names_tst.shape, str)\n', (2723, 2745), True, 'import numpy as np\n'), ((2764, 2796), 'numpy.ndarray', 'np.ndarray', (['names_tst.shape', 'str'], {}), '(names_tst.shape, str)\n', (2774, 2796), True, 'import numpy as np\n'), ((2818, 2846), 'numpy.zeros', 'np.zeros', (['X_trn_linear.shape'], {}), '(X_trn_linear.shape)\n', (2826, 2846), True, 'import numpy as np\n'), ((2868, 2896), 'numpy.zeros', 'np.zeros', (['X_trn_linear.shape'], {}), '(X_trn_linear.shape)\n', (2876, 2896), True, 'import numpy as np\n'), ((2916, 2942), 'numpy.zeros', 'np.zeros', (['spread_trn.shape'], {}), '(spread_trn.shape)\n', (2924, 2942), True, 'import numpy as np\n'), ((2962, 2988), 'numpy.zeros', 'np.zeros', (['spread_trn.shape'], {}), '(spread_trn.shape)\n', (2970, 2988), True, 'import numpy as np\n'), ((3007, 3032), 'numpy.zeros', 'np.zeros', (['cover_trn.shape'], {}), '(cover_trn.shape)\n', (3015, 3032), True, 'import numpy as np\n'), ((3051, 3076), 'numpy.zeros', 'np.zeros', (['cover_trn.shape'], {}), '(cover_trn.shape)\n', (3059, 3076), True, 'import numpy as np\n'), ((3095, 3127), 'numpy.ndarray', 'np.ndarray', (['names_trn.shape', 'str'], {}), '(names_trn.shape, str)\n', (3105, 3127), True, 'import numpy as np\n'), ((3146, 3178), 'numpy.ndarray', 'np.ndarray', (['names_trn.shape', 'str'], {}), '(names_trn.shape, str)\n', (3156, 3178), True, 'import numpy as np\n'), ((6239, 6304), 'keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': '(0.01)', 'decay': '(1e-06)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.01, decay=1e-06, momentum=0.9, nesterov=True)\n', (6253, 6304), False, 'from keras import optimizers\n'), ((6576, 6672), 'keras.callbacks.EarlyStopping', 'callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0)', 'patience': '(10)', 'verbose': '(0)', 'mode': '"""min"""'}), "(monitor='val_loss', min_delta=0, patience=10,\n verbose=0, mode='min')\n", (6599, 6672), False, 'from keras import callbacks\n'), ((6726, 6835), 'keras.callbacks.ModelCheckpoint', 'callbacks.ModelCheckpoint', (['filepath_large'], {'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""min"""'}), "(filepath_large, monitor='val_loss', verbose=1,\n save_best_only=True, mode='min')\n", (6751, 6835), False, 'from keras import callbacks\n'), ((6918, 7014), 'keras.callbacks.EarlyStopping', 'callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0)', 'patience': '(10)', 'verbose': '(0)', 'mode': '"""min"""'}), "(monitor='val_loss', min_delta=0, patience=10,\n verbose=0, mode='min')\n", (6941, 7014), False, 'from keras import callbacks\n'), ((7068, 7177), 'keras.callbacks.ModelCheckpoint', 'callbacks.ModelCheckpoint', (['filepath_small'], {'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""min"""'}), "(filepath_small, monitor='val_loss', verbose=1,\n save_best_only=True, mode='min')\n", (7093, 7177), False, 'from keras import callbacks\n'), ((7258, 7354), 'keras.callbacks.EarlyStopping', 'callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0)', 'patience': '(10)', 'verbose': '(0)', 'mode': '"""min"""'}), "(monitor='val_loss', min_delta=0, patience=10,\n verbose=0, mode='min')\n", (7281, 7354), False, 'from keras import callbacks\n'), ((7402, 7509), 'keras.callbacks.ModelCheckpoint', 'callbacks.ModelCheckpoint', (['filepath_gen'], {'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""min"""'}), "(filepath_gen, monitor='val_loss', verbose=1,\n save_best_only=True, mode='min')\n", (7427, 7509), False, 'from keras import callbacks\n'), ((5786, 5798), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (5796, 5798), False, 'from keras.models import Sequential\n'), ((1816, 1843), 'pandas.DataFrame', 'pandas.DataFrame', (['X_trn_raw'], {}), '(X_trn_raw)\n', (1832, 1843), False, 'import pandas\n'), ((5552, 5582), 'pandas.DataFrame', 'pandas.DataFrame', (['X_trn_linear'], {}), '(X_trn_linear)\n', (5568, 5582), False, 'import pandas\n'), ((5618, 5640), 'numpy.isnan', 'np.isnan', (['X_trn_linear'], {}), '(X_trn_linear)\n', (5626, 5640), True, 'import numpy as np\n'), ((5656, 5681), 'numpy.isfinite', 'np.isfinite', (['X_trn_linear'], {}), '(X_trn_linear)\n', (5667, 5681), True, 'import numpy as np\n'), ((5697, 5716), 'numpy.isnan', 'np.isnan', (['cover_trn'], {}), '(cover_trn)\n', (5705, 5716), True, 'import numpy as np\n'), ((5732, 5754), 'numpy.isfinite', 'np.isfinite', (['cover_trn'], {}), '(cover_trn)\n', (5743, 5754), True, 'import numpy as np\n'), ((5812, 5841), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (5817, 5841), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((5856, 5868), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (5863, 5868), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((5883, 5913), 'keras.layers.Dense', 'Dense', (['(1024)'], {'activation': '"""relu"""'}), "(1024, activation='relu')\n", (5888, 5913), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((5928, 5940), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (5935, 5940), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((5955, 5984), 'keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (5960, 5984), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((5998, 6010), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (6005, 6010), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((6025, 6054), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (6030, 6054), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((6068, 6098), 'keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (6073, 6098), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((1137, 1170), 'numpy.vstack', 'np.vstack', (['(train, getter2[i, :])'], {}), '((train, getter2[i, :]))\n', (1146, 1170), True, 'import numpy as np\n'), ((2299, 2321), 'numpy.array', 'np.array', (['[spread_tst]'], {}), '([spread_tst])\n', (2307, 2321), True, 'import numpy as np\n'), ((2379, 2401), 'numpy.array', 'np.array', (['[spread_trn]'], {}), '([spread_trn])\n', (2387, 2401), True, 'import numpy as np\n')] |
import os
import torch
import numpy as np
from torch.utils.data import DataLoader
from pytorch_med_imaging.med_img_dataset import ImageDataSet
import tqdm.auto as auto
import pandas as pd
import SimpleITK as sitk
__all__ = ['label_statistics']
def label_statistics(label_dir: str,
id_globber: str = "^[0-9]+_(L|R)",
num_workers: int = 8,
verbose: bool = True,
normalized: bool = False) -> pd.DataFrame:
r"""Return the data statistics of the labels"""
# Prepare torchio sampler
labelimages = ImageDataSet(label_dir, verbose=verbose, dtype='uint8', idGlobber=id_globber)
out_df = pd.DataFrame()
for i, s in enumerate(auto.tqdm(labelimages.data_source_path)):
s = sitk.ReadImage(s)
shape_stat = sitk.LabelShapeStatisticsImageFilter()
shape_stat.Execute(s)
val = shape_stat.GetLabels()
#-------------
# Volume
#-------------
names = [f'Volume_{a}' for a in val]
data = np.asarray([shape_stat.GetNumberOfPixels(v) for v in val])
# Calculate null labels
total_counts = np.prod(s.GetSize())
null_count = total_counts - data.sum()
names = np.concatenate([['Volume_0'], names])
data = np.concatenate([[null_count], data])
# normalizem exclude null label
if normalized:
data = data / data[1:].sum()
#--------------
# Roundness
#--------------
names = np.concatenate([names, [f'Roundness_{a}' for a in val]])
roundness = [shape_stat.GetRoundness(int(a)) for a in val]
data = np.concatenate([data, np.asarray(roundness)])
#--------------
# Perimeter
#--------------
names = np.concatenate([names, [f'Perimeter_{a}' for a in val]])
perim = np.asarray([shape_stat.GetPerimeter(a) for a in val])
data = np.concatenate([data, perim])
row = pd.Series(data = data.tolist(), index=names, name=labelimages.get_unique_IDs()[i])
out_df = out_df.join(row, how='outer')
out_df.fillna(0, inplace=True)
out_df = out_df.T
# Compute sum of counts
dsum = out_df.sum()
davg = out_df.mean()
dsum.name = 'sum'
davg.name = 'avg'
out_df = out_df.append([dsum, davg])
out_df.index.name = 'Patient ID'
labelimages._logger.info(f"\n{out_df.to_string()}")
return out_df | [
"pandas.DataFrame",
"SimpleITK.ReadImage",
"numpy.asarray",
"tqdm.auto.tqdm",
"pytorch_med_imaging.med_img_dataset.ImageDataSet",
"SimpleITK.LabelShapeStatisticsImageFilter",
"numpy.concatenate"
] | [((590, 667), 'pytorch_med_imaging.med_img_dataset.ImageDataSet', 'ImageDataSet', (['label_dir'], {'verbose': 'verbose', 'dtype': '"""uint8"""', 'idGlobber': 'id_globber'}), "(label_dir, verbose=verbose, dtype='uint8', idGlobber=id_globber)\n", (602, 667), False, 'from pytorch_med_imaging.med_img_dataset import ImageDataSet\n'), ((682, 696), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (694, 696), True, 'import pandas as pd\n'), ((723, 762), 'tqdm.auto.tqdm', 'auto.tqdm', (['labelimages.data_source_path'], {}), '(labelimages.data_source_path)\n', (732, 762), True, 'import tqdm.auto as auto\n'), ((777, 794), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['s'], {}), '(s)\n', (791, 794), True, 'import SimpleITK as sitk\n'), ((816, 854), 'SimpleITK.LabelShapeStatisticsImageFilter', 'sitk.LabelShapeStatisticsImageFilter', ([], {}), '()\n', (852, 854), True, 'import SimpleITK as sitk\n'), ((1246, 1283), 'numpy.concatenate', 'np.concatenate', (["[['Volume_0'], names]"], {}), "([['Volume_0'], names])\n", (1260, 1283), True, 'import numpy as np\n'), ((1299, 1335), 'numpy.concatenate', 'np.concatenate', (['[[null_count], data]'], {}), '([[null_count], data])\n', (1313, 1335), True, 'import numpy as np\n'), ((1527, 1583), 'numpy.concatenate', 'np.concatenate', (["[names, [f'Roundness_{a}' for a in val]]"], {}), "([names, [f'Roundness_{a}' for a in val]])\n", (1541, 1583), True, 'import numpy as np\n'), ((1797, 1853), 'numpy.concatenate', 'np.concatenate', (["[names, [f'Perimeter_{a}' for a in val]]"], {}), "([names, [f'Perimeter_{a}' for a in val]])\n", (1811, 1853), True, 'import numpy as np\n'), ((1939, 1968), 'numpy.concatenate', 'np.concatenate', (['[data, perim]'], {}), '([data, perim])\n', (1953, 1968), True, 'import numpy as np\n'), ((1688, 1709), 'numpy.asarray', 'np.asarray', (['roundness'], {}), '(roundness)\n', (1698, 1709), True, 'import numpy as np\n')] |
import numpy as np
from skimage import io
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cm
from math import floor
from scipy.signal import convolve2d
from scipy.optimize import linear_sum_assignment
from scipy.stats import linregress
from scipy.spatial import Delaunay
from copy import deepcopy
xmin, xmax, ymin, ymax = 400, 850, 200, 650 # Fenêtre dans laquelle se situe la goutte
x0, y0 = 600, 400 # Un point proche du centre de la goutte
# Filtre utilisé pour détecter les microbilles
filtre = np.array([[0., 1, 0, 1, 0],
[1.,-1,-1,-1, 1],
[0.,-1, 0,-1, 0],
[1.,-1,-1,-1, 1],
[0., 1, 0, 1, 0]])
# Noyau de convolution pour le lissage de la forme de la goutte
kernelSize = 10
noyau = np.zeros((ymax-ymin, xmax-xmin))
dx, dy = (xmax-xmin)//2, (ymax-ymin)//2
noyau[dy-kernelSize:dy+kernelSize+1, dx-kernelSize:dx+kernelSize+1] = np.ones((2*kernelSize+1, 2*kernelSize+1))
noyau = np.fft.ifftshift(noyau)
noyau = np.fft.fft2(noyau)
class Debit:
"""Configuration des paramètres utilisés pour le traitement de chacun des films"""
_debits = []
_lastPics = []
_nbilles = []
_steps = []
_droites = []
def __init__(self, d):
self.d = d
self.debit = Debit._debits[d]
self.lastPic = Debit._lastPics[d]
self.nbilles = Debit._nbilles[d]
self.step = Debit._steps[d]
self.droites = Debit._droites[d]
self.n = self.lastPic // self.step
def kToID(self, k):
return (k + 1) * self.step
def IDToK(self, imageID):
return imageID // self.step - 1
def setLastPic(self, lP):
self.lastPic = lP
self.n = self.lastPic // self.step
class Image(Debit):
"""Chaque instance de cette classe représente une image de goutte"""
def quatreChiffres(n):
"""Retourne une chaine de caractères de taille 4 représentant l'entier n (supposé < 10⁴)"""
return (4 - len(str(n))) * '0' + str(n)
def fname(self, imageID):
"""Retourne le chemin relatif pour accéder à l'image numéro ID du film en cours de traitement"""
movie = 'compression' + self.debit
movie = movie + '/' + movie
return movie + Image.quatreChiffres(imageID) + '.tif'
def __init__(self, d, imageID):
super().__init__(d)
assert imageID <= self.lastPic, "L'image numéro {} n'existe pas pour le débit {}".format(imageID, self.debit)
self.numpy = io.imread(self.fname(imageID))
self.imageID = imageID
# Champs définis dans d'autres fonctions :
# goutte
# convol
# used
# billes
def traceDroite(self, droite):
"""Trace une droite noire entre les points p1 et p2 sur le tableau numpy"""
x1, y1, x2, y2 = droite
assert x1 != x2, "Les abscisses des deux points doivent être distinces"
if x2 < x1:
x1, y1, x2, y2 = x2, y2, x1, y1
m = (y2 - y1) / (x2 - x1)
for x in range(x1, x2 + 1):
y = floor(y1 + m * (x - x1))
self.numpy[y, x] = 0
self.numpy[y+1, x] = 0
def initGoutte(self):
"""Détermine la forme de la goutte"""
for droite in self.droites:
self.traceDroite(droite)
# Parcours en profondeur pour déterminer la forme de la goutte
self.goutte = np.zeros((1024,1024), dtype = bool)
L = [(x0, y0, 255)] # x, y, min(couleurs) ; min(couleurs) n'est pas utilisé dans cette version
A = 0
while len(L) > 0:
x, y, b = L.pop()
if not self.goutte[y,x] and self.numpy[y, x] > 140:
b1 = min(b, self.numpy[y, x])
#goutte[y,x] = 0
self.goutte[y,x] = True
A += 1
L.append((x+1,y,b1))
L.append((x-1,y,b1))
L.append((x,y-1,b1))
L.append((x,y+1,b1))
# Convolution pour lisser : combler les trous et "limer" le bord de la goutte
self.goutte = self.goutte.astype(np.float64)
gouttefft = np.fft.fft2(self.goutte[ymin:ymax, xmin:xmax])
#self.goutte = convolve2d(self.goutte, np.ones((0,10)), mode='same')
gouttefft = gouttefft * noyau
self.goutte[ymin:ymax, xmin:xmax] = np.fft.ifft2(gouttefft).real
self._goutte = self.goutte[ymin:ymax, xmin:xmax]
self.goutte = np.where(self.goutte > 310, np.ones((1024,1024), dtype = bool), np.zeros((1024,1024), dtype = bool))
def reperageBilles(self):
"""Détection des billes à l'aide du filtre 'filtre'"""
dmin = 2 # Distance minimale entre 2 billes
self.convol = convolve2d(self.numpy[ymin:ymax, xmin:xmax], filtre, mode='same')
argsorted = np.argsort(self.convol.flatten())
self.used = np.zeros((1024,1024), dtype = bool) # Pour ne pas détecter des billes trop proches les unes des autres
self.billes = []
i = argsorted.size - 1
while len(self.billes) < self.nbilles:
j = argsorted[i]
x, y = xmin + j % (xmax-xmin), ymin + j // (xmax-xmin)
if self.goutte[y,x] and not self.used[y,x]:
self.billes.append([x, y])
for dx in range(-dmin, dmin+1):
for dy in range(-dmin, dmin+1):
self.used[y+dy,x+dx] = True
i -= 1
def initBW(self):
"""Prépare une copie en noir et blanc de l'image que l'on peut 'colorier'"""
self.numpyBW = deepcopy(self.numpy)
def initRGB(self):
self.numpyRGB = np.zeros((1024, 1024, 3), dtype=np.uint8)
for c in range (3):
self.numpyRGB[:,:,c] = deepcopy(self.numpy)
def show(self, im='VO', x = (xmin + xmax)//2, y = (ymin + ymax) // 2, a = (xmax-xmin)//2, save=False):
xmin0, xmax0, ymin0, ymax0 = x - a, x + a, y - a, y + a
plt.rcParams['figure.figsize'] = [50/2.54, 40/2.54]
if im == 'VO':
io.imshow(self.numpy[ymin0:ymax0, xmin0:xmax0])
elif im == 'BW':
io.imshow(self.numpyBW[ymin0:ymax0, xmin0:xmax0])
elif im == 'RGB':
io.imshow(self.numpyRGB[ymin0:ymax0, xmin0:xmax0, :])
else:
print("Invalid argument : im")
raise
if save:
plt.savefig('out.jpg')
plt.show()
def afficheBille(self, x, y, c = [], a = 2, f = 'o'):
"""c = couleur, a = taille de la boîte, f = forme (. = carre plein, o = carre vide, x = croix)"""
if len(c) == 0:
c = np.random.randint(0, 255, size = (3,))
for dx in range(-a, a+1):
for dy in range(-a, a+1):
if f == '.' or (f=='o' and (abs(dx) == 2 or abs(dy)==2)) or f=='x' and abs(dx) == abs(dy):
self.numpyRGB[y+dy, x+dx,:] = c
d = 2
seuil = 10
seuil2 = 4
def cisaillement(A):
trOrder = list(range(A.ndim-2)) + [A.ndim-1] + [A.ndim-2]
B = np.matmul(np.transpose(A, axes=trOrder), A)
res = np.trace(B, axis1=-2, axis2=-1)/np.linalg.det(A) - 2
return np.sqrt(res)
def couleur(niveau, a=1, b=0): # FIXME : a supprimer
c = 255 * a * niveau + b
return min(255, max(0, c))
class Movie(Debit):
"""Chaque instance de cette classe représente un film associé à un débit particulier."""
def __init__(self, d):
super().__init__(d)
self.trajets_v = dict()
self.billesValides_v = dict()
self.cnorm = colors.Normalize(vmin=0.5, vmax=1.5, clip=True)
self.cmap = cm.get_cmap(name='Reds')
# Champs de cette classe :
# k
# trajets, trajets2, trajets_v
# billesValides, _billesValides, billesValides2, billesValides_v
# plusProchesVoisins
# J, detJ
# cnorm, cmap
def Movie_billes(self, imageID):
"""Retourne un tableau numpy avec les billes trouvées pour l'image imageID"""
image = Image(self.d, imageID)
image.initGoutte()
image.reperageBilles()
return np.array(image.billes)
Movie.billes = Movie_billes
def Movie_calculTrajectoires(self):
# Pour suivre la trajectoire de chaque goutte :
self.trajets = np.zeros((self.nbilles, self.n, 2), dtype = np.int32)
self.trajets_v['1'] = self.trajets
# Billes sans anomalies de trajectoire :
self.billesValides = np.ones(self.nbilles, dtype = bool)
self.billesValides_v['1'] = self.billesValides
# Calcul des trajectoires :
self.trajets[:,0,:] = self.billes(self.step)
self.k = 1
while (self.k < self.n):
self._nextPosition()
self.k += 1
self._billesValides = deepcopy(self.billesValides) # Garde une copie de l'état avant suppression des taches
Movie.calculTrajectoires = Movie_calculTrajectoires
def Movie_nextPosition(self):
"""Calcul la position suivante de chacune des billes en utilisant l'algorithme hongrois"""
print(self.k, end=' ')
imageID = self.kToID(self.k)
currentBilles = self.billes(imageID)
distances = np.zeros((self.nbilles, self.nbilles))
for i in range(self.nbilles):
for j in range(self.nbilles):
if self.billesValides[i]:
dij = np.linalg.norm(currentBilles[j] - self.trajets[i, self.k - 1,:])
if dij < seuil:
distances[i,j] = dij
else:
distances[i,j] = 100
else:
distances[i,j] = 1
_, assignment = linear_sum_assignment(distances)
for i in range(self.nbilles):
self.trajets[i, self.k, :] = currentBilles[assignment[i]]
if distances[i, assignment[i]] >= seuil:
self.billesValides[i] = False
Movie._nextPosition = Movie_nextPosition
def Movie_suppressionTaches(self):
"""Suppression des billes qui n'ont pas bougées -> taches"""
eps = 10
for i in range(self.nbilles):
if self.billesValides[i]:
if self.dist(i, -1, i, 0) < eps:
self.billesValides[i] = False
Movie.suppressionTaches = Movie_suppressionTaches
def Movie_deplacementMedian(self, k, bille, a=50):
"""Déplacement médian des billes au sein d'un carré de taille 2*a"""
x0, y0 = self.coordonnees(bille, self.kToID(k))
DU = []
for i in range(self.nbilles):
x, y = self.coordonnees(i, self.kToID(k))
if self.billesValides[i] and x >= x0 - a and x <= x0 + a and y >= y0 - a and y <= y0 + a:
DU.append(self.vecteur(i, k+1, i, k))
return np.median(DU, axis=0)
Movie.deplacementMedian = Movie_deplacementMedian
def Movie_affinerTrajectoires(self):
"""Affine la trajectoire des billes en fonction des résultats du premier passage"""
self.trajets2 = np.zeros((self.nbilles, self.n, 2), dtype = np.int32)
self.trajets2[:,0,:] = self.trajets[:,0,:]
self.trajets_v['2'] = self.trajets2
self.billesValides2 = deepcopy(self.billesValides)
self.billesValides_v['2'] = self.billesValides2
self.k = 1
while (self.k < self.n):
self._nextPosition2()
self.k += 1
Movie.affinerTrajectoires = Movie_affinerTrajectoires
def Movie_nextPosition2(self):
"""Calcul la position suivante de chacune des billes en utilisant l'algorithme hongrois"""
print(self.k, end=' ')
imageID = self.kToID(self.k)
currentBilles = self.billes(imageID)
distances = np.zeros((self.nbilles, self.nbilles))
for i in range(self.nbilles):
if self.billesValides2[i]:
positionEstimee = self.trajets2[i, self.k - 1,:] + self.deplacementMedian(self.k-1, i)
for j in range(self.nbilles):
if self.billesValides2[i]:
dij = np.linalg.norm(currentBilles[j] - positionEstimee) ** 2
if dij < seuil2:
distances[i,j] = dij
else:
distances[i,j] = 50
else:
distances[i,j] = 1
_, assignment = linear_sum_assignment(distances)
for i in range(self.nbilles):
self.trajets2[i, self.k, :] = currentBilles[assignment[i]]
if distances[i, assignment[i]] >= seuil2:
self.billesValides2[i] = False
Movie._nextPosition2 = Movie_nextPosition2
def Movie_getPosition(self, i, k, v='1'):
return self.trajets_v[v][i, k, :]
Movie.getPosition = Movie_getPosition
def Movie_coordonnees(self, bille, imageID, v='1'):
k = self.IDToK(imageID)
return self.getPosition(bille, k, v=v)
Movie.coordonnees = Movie_coordonnees
def Movie_vecteur(self, i1, k1, i2, k2, v='1'):
"""Vecteur AB où A est la position de i2 à l'instant k2 et B est la position de i1 à l'instant k1"""
return self.trajets_v[v][i1, k1, :] - self.trajets_v[v][i2, k2, :]
Movie.vecteur = Movie_vecteur
def Movie_dist(self, i1, k1, i2, k2, v='1'):
"""Distance entre la position de i2 à l'instant k2 et la position de i1 à l'instant k1"""
return np.linalg.norm(self.vecteur(i1, k1, i2, k2, v=v))
Movie.dist = Movie_dist
def Movie_isBilleValide(self, bille, v='1'):
return self.billesValides_v[v][bille]
Movie.isBilleValide = Movie_isBilleValide
def Movie_calculPlusProchesVoisins(self, v='1'):
"""Calcul des plus proches voisins de chaque point"""
bv = np.where(self.billesValides_v[v])[0]
self.plusProchesVoisins = np.zeros((self.nbilles, len(bv) - 1), dtype = np.int32)
for i in bv:
distances = []
for j in bv:
if j!=i:
distances.append((j, self.dist(i,0,j,0)))
distances = [x[0] for x in sorted(distances, key=lambda x: x[1])]
self.plusProchesVoisins[i, :] = np.array(distances)
Movie.calculPlusProchesVoisins = Movie_calculPlusProchesVoisins
def Movie_calculMatriceDeformation(self, nvoisins = 4, v='1'):
bv = np.where(self.billesValides_v[v])[0]
assert nvoisins <= len(bv) - 1, "Il n'y a pas suffisamment de billes valides"
self.J = np.zeros((self.nbilles, 2, 2))
for i in bv:
ppv = self.plusProchesVoisins[i, :nvoisins]
ref = i * np.ones(nvoisins, dtype=np.int32)
BT = self.vecteur(ppv, 0, ref, 0, v=v)
CT = self.vecteur(ppv, -1, ref, -1, v=v)
AT, _, _, _ = np.linalg.lstsq(BT, CT, rcond=None)
self.J[i,:,:] = AT.T
self.detJ = np.linalg.det(self.J)
Movie.calculMatriceDeformation = Movie_calculMatriceDeformation
def Movie_showCompression(self, imageID, v='1', indic = 'det'):
"""FIXME : N'utilise pas encore le résultat de l'affinement de trajectoires"""
k = self.IDToK(imageID)
bv = np.where(self.billesValides_v[v])[0]
points = self.trajets_v[v][bv,k,:]
delaunay = Delaunay(points) # Triangulation
print("Triangulation terminée")
image = Image(self.d, imageID)
image.initRGB()
for simplex in delaunay.simplices:
coords = points[simplex]
T = np.ones((3,3))
T[:2,:] = coords.T
xmin, ymin = coords.min(axis=0)
xmax, ymax = coords.max(axis=0)
for x in range(xmin, xmax+1):
for y in range(ymin, ymax+1):
lmbda = np.linalg.solve(T, np.array([x, y, 1]))
if np.all(lmbda >= -1e-6) and np.all(lmbda <= 1+1e-6):
J = sum([lmbda[i] * self.J[bv,:,:][simplex[i]] for i in range(3)])
if indic == 'det':
image.numpyRGB[y, x, :] = self.cmap(self.cnorm(np.linalg.det(J)), bytes=True)[:3]
elif indic == 'cis':
image.numpyRGB[y, x, :] = self.cmap(self.cnorm(cisaillement(J)), bytes=True)[:3]
image.show(im='RGB')
self._image = image
Movie.showCompression = Movie_showCompression
def Movie_showTrajectoires(self, enVert = [], v='1', random=False, save=False):
bv = np.where(self.billesValides_v[v])[0]
image = Image(self.d, self.step)
image.initRGB()
detJnorm = self.cnorm(2 - self.detJ)
c = self.cmap(detJnorm, bytes=True)
for point in bv:
c1 = c[point,:3]
if random:
c1 = np.random.randint(0, 255, size=3)
if point in enVert:
c1 = [0, 255, 0]
for imageID in range(self.step, self.lastPic, self.step):
x, y = self.coordonnees(point, imageID, v=v)
image.afficheBille(x, y, c=c1, a=1, f='.')
image.show(im='RGB', save=save)
self._image = image
Movie.showTrajectoires = Movie_showTrajectoires
def Movie_showTrajectoire(self, bille, start = 'min', stop = 'max', s=1, nvoisins='all', static=False, v='1'):
"""Requiert que plusProchesVoisins soit initialisé, sauf si all==None"""
bV = self.billesValides_v[v]
# Initialisation et interprétation des arguments
if start == 'min':
start = 0
else:
start = self.IDToK(start)
if stop == 'max':
stop = self.n
else:
stop = self.IDToK(stop)
if nvoisins == 'all':
nvoisins = np.count_nonzero(np.where(bV)[0]) - 1
if nvoisins != None:
voisinnage = [bille] + list(self.plusProchesVoisins[bille,:nvoisins])
else:
voisinnage = [] # Si all==None on affiche l'image brute, sans repérage des billes
x0, y0 = self.coordonnees(bille, self.step, v=v)
c = np.random.randint(0, 255, size = (self.nbilles, 3))
# Parcours des images
for k in range(start, stop, s):
imageID = self.kToID(k)
image = Image(self.d, imageID)
image.initRGB()
for point in range(self.nbilles):
if (nvoisins == 'all' or point in voisinnage) and bV[point]:
x, y = self.coordonnees(point, imageID, v=v)
image.afficheBille(x, y, c=c[point])
if not static: # Si static vaut True la fenêtre est immobile, sinon elle suit la bille
x0, y0 = self.coordonnees(bille, imageID, v=v)
print(imageID)
image.show(im='RGB', x=x0, y=y0, a=50)
Movie.showTrajectoire = Movie_showTrajectoire
def Movie_showSumOfDistances(self):
sumOfDistances = np.zeros(self.n - 1)
for k in range(1, self.n):
sumOfDistances[k-1] = np.sum(np.linalg.norm(self.trajets[:, k,:] - self.trajets[:, 0,:], axis = 1), where = self.billesValides)
plt.plot(range(2 * self.step, self.lastPic + 1, self.step), sumOfDistances)
plt.show()
Movie.showSumOfDistances = Movie_showSumOfDistances | [
"numpy.trace",
"matplotlib.cm.get_cmap",
"numpy.ones",
"numpy.random.randint",
"numpy.linalg.norm",
"numpy.fft.ifft2",
"scipy.spatial.Delaunay",
"numpy.fft.ifftshift",
"scipy.signal.convolve2d",
"matplotlib.colors.Normalize",
"numpy.transpose",
"skimage.io.imshow",
"numpy.linalg.det",
"cop... | [((544, 662), 'numpy.array', 'np.array', (['[[0.0, 1, 0, 1, 0], [1.0, -1, -1, -1, 1], [0.0, -1, 0, -1, 0], [1.0, -1, -1,\n -1, 1], [0.0, 1, 0, 1, 0]]'], {}), '([[0.0, 1, 0, 1, 0], [1.0, -1, -1, -1, 1], [0.0, -1, 0, -1, 0], [\n 1.0, -1, -1, -1, 1], [0.0, 1, 0, 1, 0]])\n', (552, 662), True, 'import numpy as np\n'), ((810, 846), 'numpy.zeros', 'np.zeros', (['(ymax - ymin, xmax - xmin)'], {}), '((ymax - ymin, xmax - xmin))\n', (818, 846), True, 'import numpy as np\n'), ((953, 1002), 'numpy.ones', 'np.ones', (['(2 * kernelSize + 1, 2 * kernelSize + 1)'], {}), '((2 * kernelSize + 1, 2 * kernelSize + 1))\n', (960, 1002), True, 'import numpy as np\n'), ((1003, 1026), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['noyau'], {}), '(noyau)\n', (1019, 1026), True, 'import numpy as np\n'), ((1035, 1053), 'numpy.fft.fft2', 'np.fft.fft2', (['noyau'], {}), '(noyau)\n', (1046, 1053), True, 'import numpy as np\n'), ((7221, 7233), 'numpy.sqrt', 'np.sqrt', (['res'], {}), '(res)\n', (7228, 7233), True, 'import numpy as np\n'), ((8146, 8168), 'numpy.array', 'np.array', (['image.billes'], {}), '(image.billes)\n', (8154, 8168), True, 'import numpy as np\n'), ((8306, 8357), 'numpy.zeros', 'np.zeros', (['(self.nbilles, self.n, 2)'], {'dtype': 'np.int32'}), '((self.nbilles, self.n, 2), dtype=np.int32)\n', (8314, 8357), True, 'import numpy as np\n'), ((8469, 8502), 'numpy.ones', 'np.ones', (['self.nbilles'], {'dtype': 'bool'}), '(self.nbilles, dtype=bool)\n', (8476, 8502), True, 'import numpy as np\n'), ((8756, 8784), 'copy.deepcopy', 'deepcopy', (['self.billesValides'], {}), '(self.billesValides)\n', (8764, 8784), False, 'from copy import deepcopy\n'), ((9139, 9177), 'numpy.zeros', 'np.zeros', (['(self.nbilles, self.nbilles)'], {}), '((self.nbilles, self.nbilles))\n', (9147, 9177), True, 'import numpy as np\n'), ((9586, 9618), 'scipy.optimize.linear_sum_assignment', 'linear_sum_assignment', (['distances'], {}), '(distances)\n', (9607, 9618), False, 'from scipy.optimize import linear_sum_assignment\n'), ((10609, 10630), 'numpy.median', 'np.median', (['DU'], {'axis': '(0)'}), '(DU, axis=0)\n', (10618, 10630), True, 'import numpy as np\n'), ((10828, 10879), 'numpy.zeros', 'np.zeros', (['(self.nbilles, self.n, 2)'], {'dtype': 'np.int32'}), '((self.nbilles, self.n, 2), dtype=np.int32)\n', (10836, 10879), True, 'import numpy as np\n'), ((10995, 11023), 'copy.deepcopy', 'deepcopy', (['self.billesValides'], {}), '(self.billesValides)\n', (11003, 11023), False, 'from copy import deepcopy\n'), ((11470, 11508), 'numpy.zeros', 'np.zeros', (['(self.nbilles, self.nbilles)'], {}), '((self.nbilles, self.nbilles))\n', (11478, 11508), True, 'import numpy as np\n'), ((12043, 12075), 'scipy.optimize.linear_sum_assignment', 'linear_sum_assignment', (['distances'], {}), '(distances)\n', (12064, 12075), False, 'from scipy.optimize import linear_sum_assignment\n'), ((14021, 14051), 'numpy.zeros', 'np.zeros', (['(self.nbilles, 2, 2)'], {}), '((self.nbilles, 2, 2))\n', (14029, 14051), True, 'import numpy as np\n'), ((14374, 14395), 'numpy.linalg.det', 'np.linalg.det', (['self.J'], {}), '(self.J)\n', (14387, 14395), True, 'import numpy as np\n'), ((14737, 14753), 'scipy.spatial.Delaunay', 'Delaunay', (['points'], {}), '(points)\n', (14745, 14753), False, 'from scipy.spatial import Delaunay\n'), ((17287, 17336), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {'size': '(self.nbilles, 3)'}), '(0, 255, size=(self.nbilles, 3))\n', (17304, 17336), True, 'import numpy as np\n'), ((18054, 18074), 'numpy.zeros', 'np.zeros', (['(self.n - 1)'], {}), '(self.n - 1)\n', (18062, 18074), True, 'import numpy as np\n'), ((18326, 18336), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18334, 18336), True, 'import matplotlib.pyplot as plt\n'), ((3469, 3503), 'numpy.zeros', 'np.zeros', (['(1024, 1024)'], {'dtype': 'bool'}), '((1024, 1024), dtype=bool)\n', (3477, 3503), True, 'import numpy as np\n'), ((4202, 4248), 'numpy.fft.fft2', 'np.fft.fft2', (['self.goutte[ymin:ymax, xmin:xmax]'], {}), '(self.goutte[ymin:ymax, xmin:xmax])\n', (4213, 4248), True, 'import numpy as np\n'), ((4811, 4876), 'scipy.signal.convolve2d', 'convolve2d', (['self.numpy[ymin:ymax, xmin:xmax]', 'filtre'], {'mode': '"""same"""'}), "(self.numpy[ymin:ymax, xmin:xmax], filtre, mode='same')\n", (4821, 4876), False, 'from scipy.signal import convolve2d\n'), ((4952, 4986), 'numpy.zeros', 'np.zeros', (['(1024, 1024)'], {'dtype': 'bool'}), '((1024, 1024), dtype=bool)\n', (4960, 4986), True, 'import numpy as np\n'), ((5660, 5680), 'copy.deepcopy', 'deepcopy', (['self.numpy'], {}), '(self.numpy)\n', (5668, 5680), False, 'from copy import deepcopy\n'), ((5733, 5774), 'numpy.zeros', 'np.zeros', (['(1024, 1024, 3)'], {'dtype': 'np.uint8'}), '((1024, 1024, 3), dtype=np.uint8)\n', (5741, 5774), True, 'import numpy as np\n'), ((6492, 6502), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6500, 6502), True, 'import matplotlib.pyplot as plt\n'), ((7113, 7142), 'numpy.transpose', 'np.transpose', (['A'], {'axes': 'trOrder'}), '(A, axes=trOrder)\n', (7125, 7142), True, 'import numpy as np\n'), ((7613, 7660), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': '(0.5)', 'vmax': '(1.5)', 'clip': '(True)'}), '(vmin=0.5, vmax=1.5, clip=True)\n', (7629, 7660), True, 'import matplotlib.colors as colors\n'), ((7681, 7705), 'matplotlib.cm.get_cmap', 'cm.get_cmap', ([], {'name': '"""Reds"""'}), "(name='Reds')\n", (7692, 7705), True, 'import matplotlib.cm as cm\n'), ((13335, 13368), 'numpy.where', 'np.where', (['self.billesValides_v[v]'], {}), '(self.billesValides_v[v])\n', (13343, 13368), True, 'import numpy as np\n'), ((13713, 13732), 'numpy.array', 'np.array', (['distances'], {}), '(distances)\n', (13721, 13732), True, 'import numpy as np\n'), ((13888, 13921), 'numpy.where', 'np.where', (['self.billesValides_v[v]'], {}), '(self.billesValides_v[v])\n', (13896, 13921), True, 'import numpy as np\n'), ((14292, 14327), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['BT', 'CT'], {'rcond': 'None'}), '(BT, CT, rcond=None)\n', (14307, 14327), True, 'import numpy as np\n'), ((14646, 14679), 'numpy.where', 'np.where', (['self.billesValides_v[v]'], {}), '(self.billesValides_v[v])\n', (14654, 14679), True, 'import numpy as np\n'), ((14945, 14960), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (14952, 14960), True, 'import numpy as np\n'), ((15846, 15879), 'numpy.where', 'np.where', (['self.billesValides_v[v]'], {}), '(self.billesValides_v[v])\n', (15854, 15879), True, 'import numpy as np\n'), ((3106, 3130), 'math.floor', 'floor', (['(y1 + m * (x - x1))'], {}), '(y1 + m * (x - x1))\n', (3111, 3130), False, 'from math import floor\n'), ((4408, 4431), 'numpy.fft.ifft2', 'np.fft.ifft2', (['gouttefft'], {}), '(gouttefft)\n', (4420, 4431), True, 'import numpy as np\n'), ((4544, 4577), 'numpy.ones', 'np.ones', (['(1024, 1024)'], {'dtype': 'bool'}), '((1024, 1024), dtype=bool)\n', (4551, 4577), True, 'import numpy as np\n'), ((4580, 4614), 'numpy.zeros', 'np.zeros', (['(1024, 1024)'], {'dtype': 'bool'}), '((1024, 1024), dtype=bool)\n', (4588, 4614), True, 'import numpy as np\n'), ((5838, 5858), 'copy.deepcopy', 'deepcopy', (['self.numpy'], {}), '(self.numpy)\n', (5846, 5858), False, 'from copy import deepcopy\n'), ((6130, 6177), 'skimage.io.imshow', 'io.imshow', (['self.numpy[ymin0:ymax0, xmin0:xmax0]'], {}), '(self.numpy[ymin0:ymax0, xmin0:xmax0])\n', (6139, 6177), False, 'from skimage import io\n'), ((6461, 6483), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""out.jpg"""'], {}), "('out.jpg')\n", (6472, 6483), True, 'import matplotlib.pyplot as plt\n'), ((6712, 6748), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {'size': '(3,)'}), '(0, 255, size=(3,))\n', (6729, 6748), True, 'import numpy as np\n'), ((7157, 7188), 'numpy.trace', 'np.trace', (['B'], {'axis1': '(-2)', 'axis2': '(-1)'}), '(B, axis1=-2, axis2=-1)\n', (7165, 7188), True, 'import numpy as np\n'), ((7189, 7205), 'numpy.linalg.det', 'np.linalg.det', (['A'], {}), '(A)\n', (7202, 7205), True, 'import numpy as np\n'), ((14140, 14173), 'numpy.ones', 'np.ones', (['nvoisins'], {'dtype': 'np.int32'}), '(nvoisins, dtype=np.int32)\n', (14147, 14173), True, 'import numpy as np\n'), ((16103, 16136), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {'size': '(3)'}), '(0, 255, size=3)\n', (16120, 16136), True, 'import numpy as np\n'), ((18143, 18212), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.trajets[:, k, :] - self.trajets[:, 0, :])'], {'axis': '(1)'}), '(self.trajets[:, k, :] - self.trajets[:, 0, :], axis=1)\n', (18157, 18212), True, 'import numpy as np\n'), ((6215, 6264), 'skimage.io.imshow', 'io.imshow', (['self.numpyBW[ymin0:ymax0, xmin0:xmax0]'], {}), '(self.numpyBW[ymin0:ymax0, xmin0:xmax0])\n', (6224, 6264), False, 'from skimage import io\n'), ((9311, 9376), 'numpy.linalg.norm', 'np.linalg.norm', (['(currentBilles[j] - self.trajets[i, self.k - 1, :])'], {}), '(currentBilles[j] - self.trajets[i, self.k - 1, :])\n', (9325, 9376), True, 'import numpy as np\n'), ((6303, 6356), 'skimage.io.imshow', 'io.imshow', (['self.numpyRGB[ymin0:ymax0, xmin0:xmax0, :]'], {}), '(self.numpyRGB[ymin0:ymax0, xmin0:xmax0, :])\n', (6312, 6356), False, 'from skimage import io\n'), ((11777, 11827), 'numpy.linalg.norm', 'np.linalg.norm', (['(currentBilles[j] - positionEstimee)'], {}), '(currentBilles[j] - positionEstimee)\n', (11791, 11827), True, 'import numpy as np\n'), ((15190, 15209), 'numpy.array', 'np.array', (['[x, y, 1]'], {}), '([x, y, 1])\n', (15198, 15209), True, 'import numpy as np\n'), ((15230, 15253), 'numpy.all', 'np.all', (['(lmbda >= -1e-06)'], {}), '(lmbda >= -1e-06)\n', (15236, 15253), True, 'import numpy as np\n'), ((15257, 15283), 'numpy.all', 'np.all', (['(lmbda <= 1 + 1e-06)'], {}), '(lmbda <= 1 + 1e-06)\n', (15263, 15283), True, 'import numpy as np\n'), ((17002, 17014), 'numpy.where', 'np.where', (['bV'], {}), '(bV)\n', (17010, 17014), True, 'import numpy as np\n'), ((15479, 15495), 'numpy.linalg.det', 'np.linalg.det', (['J'], {}), '(J)\n', (15492, 15495), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import utils
from numpy import array, shape, arange
def plot_best_fit(weights, data_matrix, label_matrix):
"""
Graph this
"""
data_array = array(data_matrix)
n = shape(data_array)[0]
x_coord_1 = []
y_coord_1 = []
x_coord_2 = []
y_coord_2 = []
for i in range(n):
if int(label_matrix[i]) == 1:
x_coord_1.append(data_array[i, 1])
y_coord_1.append(data_array[i, 2])
else:
x_coord_2.append(data_array[i, 1])
y_coord_2.append(data_array[i, 2])
figure = plt.figure()
ax = figure.add_subplot(111)
ax.scatter(x_coord_1, y_coord_1, s=30, c='red', marker='s')
ax.scatter(x_coord_2, y_coord_2, s=30, c='green')
x = arange(-3.0, 3.0, 0.1)
y = (-weights[0] - weights[1] * x)/weights[2]
ax.plot(x, y)
plt.xlabel('X1')
plt.ylabel('X2')
plt.show()
| [
"matplotlib.pyplot.show",
"numpy.shape",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((188, 206), 'numpy.array', 'array', (['data_matrix'], {}), '(data_matrix)\n', (193, 206), False, 'from numpy import array, shape, arange\n'), ((588, 600), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (598, 600), True, 'import matplotlib.pyplot as plt\n'), ((760, 782), 'numpy.arange', 'arange', (['(-3.0)', '(3.0)', '(0.1)'], {}), '(-3.0, 3.0, 0.1)\n', (766, 782), False, 'from numpy import array, shape, arange\n'), ((855, 871), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X1"""'], {}), "('X1')\n", (865, 871), True, 'import matplotlib.pyplot as plt\n'), ((876, 892), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""X2"""'], {}), "('X2')\n", (886, 892), True, 'import matplotlib.pyplot as plt\n'), ((897, 907), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (905, 907), True, 'import matplotlib.pyplot as plt\n'), ((215, 232), 'numpy.shape', 'shape', (['data_array'], {}), '(data_array)\n', (220, 232), False, 'from numpy import array, shape, arange\n')] |
import unittest
import numpy as np
from mmag.unit_cell.fields import field_dipole
from mmag.unit_cell.cell import Cuboid
_acc = 0.0001
# Following tests compare the magnetic field created by a dipole to the field generated by the uniformly
# charged sheets. If far enough, the resulting fields must be similar
class TestCubicCell(unittest.TestCase):
def setUp(self):
position = np.array([0.0, 0.0, 0.0], dtype=np.float64)
delta = np.array([1.0, 1.0, 1.0], dtype=np.float64)
self.init_obj = Cuboid(position, delta)
def test_magnetic_field_1(self):
# Testing field unirormly magnetized in Z
m = np.array([0.0, 0.0, 1.0], dtype=np.float64)
r = np.array([0.0, 0.0, 3.0], dtype=np.float64)
dipole_field = field_dipole(self.init_obj.position, m, r)
box_field = self.init_obj.unit_field(r, m)
magnitude_dipole = np.sqrt(dipole_field.dot(dipole_field))
magnitude_box = np.sqrt(box_field.dot(box_field))
self.assertTrue(np.fabs(magnitude_box - magnitude_dipole) < _acc)
self.assertTrue(np.fabs(dipole_field[0] - box_field[0]) < _acc)
self.assertTrue(np.fabs(dipole_field[1] - box_field[1]) < _acc)
self.assertTrue(np.fabs(dipole_field[2] - box_field[2]) < _acc)
def test_magnetic_field_2(self):
# Testing uniform magnetized in X is same as uniformly magnetized in Z
m = np.array([0.0, 0.0, 1.0], dtype=np.float64)
r = np.array([0.0, 0.0, 3.0], dtype=np.float64)
box_field_1 = self.init_obj.unit_field(r, m)
m = np.array([1.0, 0.0, 0.0], dtype=np.float64)
r = np.array([3.0, 0.0, 0.0], dtype=np.float64)
box_field_2 = self.init_obj.unit_field(r, m)
self.assertTrue(np.fabs(box_field_1[2] - box_field_2[0]) < _acc)
def test_magnetic_field_3(self):
# Testing at different positions
m = np.array([0.0, 0.0, 1.0], dtype=np.float64)
r = np.array([4.0, 1.0, 2.0], dtype=np.float64)
dipole_field = field_dipole(self.init_obj.position, m, r)
box_field = self.init_obj.unit_field(r, m)
magnitude_dipole = np.sqrt(dipole_field.dot(dipole_field))
magnitude_box = np.sqrt(box_field.dot(box_field))
self.assertTrue(np.fabs(magnitude_box - magnitude_dipole) < _acc)
self.assertTrue(np.fabs(dipole_field[0] - box_field[0]) < _acc)
self.assertTrue(np.fabs(dipole_field[1] - box_field[1]) < _acc)
self.assertTrue(np.fabs(dipole_field[2] - box_field[2]) < _acc)
def test_magnetic_field_4(self):
m = np.array([0.0, 0.0, 1.0], dtype=np.float64)
r = np.array([1.0, 3.0, 2.0], dtype=np.float64)
dipole_field = field_dipole(self.init_obj.position, m, r)
box_field = self.init_obj.unit_field(r, m)
magnitude_dipole = np.sqrt(dipole_field.dot(dipole_field))
magnitude_box = np.sqrt(box_field.dot(box_field))
self.assertTrue(np.fabs(magnitude_box - magnitude_dipole) < _acc)
self.assertTrue(np.fabs(dipole_field[0] - box_field[0]) < _acc)
self.assertTrue(np.fabs(dipole_field[1] - box_field[1]) < _acc)
self.assertTrue(np.fabs(dipole_field[2] - box_field[2]) < _acc)
class TestCubicCellDifferentDirections(unittest.TestCase):
def setUp(self):
position = np.array([0.0, 0.0, 0.0], dtype=np.float64)
delta = np.array([1.0, 1.0, 1.0], dtype=np.float64)
self.init_obj = Cuboid(position, delta)
def test_magnetic_field_1(self):
# Testing field unirormly magnetized in Z
m = np.array(
[1.0 / np.sqrt(3.0), 1.0 / np.sqrt(3.0), 1.0 / np.sqrt(3.0)],
dtype=np.float64,
)
r = np.array([0.0, 0.0, 3.0], dtype=np.float64)
dipole_field = field_dipole(self.init_obj.position, m, r)
box_field = self.init_obj.unit_field(r, m)
magnitude_dipole = np.sqrt(dipole_field.dot(dipole_field))
magnitude_box = np.sqrt(box_field.dot(box_field))
self.assertTrue(np.fabs(magnitude_box - magnitude_dipole) < _acc)
self.assertTrue(np.fabs(dipole_field[0] - box_field[0]) < _acc)
self.assertTrue(np.fabs(dipole_field[1] - box_field[1]) < _acc)
self.assertTrue(np.fabs(dipole_field[2] - box_field[2]) < _acc)
def test_magnetic_field_2(self):
# Testing field unirormly magnetized in Z
m = np.array(
[1.0 / np.sqrt(3.0), 1.0 / np.sqrt(3.0), 1.0 / np.sqrt(3.0)],
dtype=np.float64,
)
r = np.array([1.0, 0.0, 2.0], dtype=np.float64)
dipole_field = field_dipole(self.init_obj.position, m, r)
box_field = self.init_obj.unit_field(r, m)
magnitude_dipole = np.sqrt(dipole_field.dot(dipole_field))
magnitude_box = np.sqrt(box_field.dot(box_field))
self.assertTrue(np.fabs(magnitude_box - magnitude_dipole) < _acc)
self.assertTrue(np.fabs(dipole_field[0] - box_field[0]) < _acc)
self.assertTrue(np.fabs(dipole_field[1] - box_field[1]) < _acc)
self.assertTrue(np.fabs(dipole_field[2] - box_field[2]) < _acc)
def test_magnetic_field_3(self):
# Testing field uniformly magnetized in Z
m = np.array(
[1.0 / np.sqrt(3.0), 1.0 / np.sqrt(3.0), 1.0 / np.sqrt(3.0)],
dtype=np.float64,
)
r = np.array([1.0, 5.0, 2.0], dtype=np.float64)
dipole_field = field_dipole(self.init_obj.position, m, r)
box_field = self.init_obj.unit_field(r, m)
magnitude_dipole = np.sqrt(dipole_field.dot(dipole_field))
magnitude_box = np.sqrt(box_field.dot(box_field))
self.assertTrue(np.fabs(magnitude_box - magnitude_dipole) < _acc)
self.assertTrue(np.fabs(dipole_field[0] - box_field[0]) < _acc)
self.assertTrue(np.fabs(dipole_field[1] - box_field[1]) < _acc)
self.assertTrue(np.fabs(dipole_field[2] - box_field[2]) < _acc)
class TestCubicCellNotOrigin(unittest.TestCase):
def setUp(self):
position = np.array([1.0, 2.0, 3.0], dtype=np.float64)
delta = np.array([1.0, 1.0, 1.0], dtype=np.float64)
self.init_obj = Cuboid(position, delta)
def test_magnetic_field_1(self):
# Testing field unirormly magnetized in Z
m = np.array(
[1.0 / np.sqrt(3.0), 1.0 / np.sqrt(3.0), 1.0 / np.sqrt(3.0)],
dtype=np.float64,
)
r = np.array([7.0, 8.0, 7.0], dtype=np.float64)
dipole_field = field_dipole(self.init_obj.position, m, r)
box_field = self.init_obj.unit_field(r, m)
magnitude_dipole = np.sqrt(dipole_field.dot(dipole_field))
magnitude_box = np.sqrt(box_field.dot(box_field))
print(magnitude_box, magnitude_dipole)
print(box_field, dipole_field)
self.assertTrue(np.fabs(magnitude_box - magnitude_dipole) < _acc)
self.assertTrue(np.fabs(dipole_field[0] - box_field[0]) < _acc)
self.assertTrue(np.fabs(dipole_field[1] - box_field[1]) < _acc)
self.assertTrue(np.fabs(dipole_field[2] - box_field[2]) < _acc)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"mmag.unit_cell.cell.Cuboid",
"mmag.unit_cell.fields.field_dipole",
"numpy.array",
"numpy.fabs",
"numpy.sqrt"
] | [((7094, 7109), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7107, 7109), False, 'import unittest\n'), ((392, 435), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {'dtype': 'np.float64'}), '([0.0, 0.0, 0.0], dtype=np.float64)\n', (400, 435), True, 'import numpy as np\n'), ((452, 495), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {'dtype': 'np.float64'}), '([1.0, 1.0, 1.0], dtype=np.float64)\n', (460, 495), True, 'import numpy as np\n'), ((520, 543), 'mmag.unit_cell.cell.Cuboid', 'Cuboid', (['position', 'delta'], {}), '(position, delta)\n', (526, 543), False, 'from mmag.unit_cell.cell import Cuboid\n'), ((645, 688), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {'dtype': 'np.float64'}), '([0.0, 0.0, 1.0], dtype=np.float64)\n', (653, 688), True, 'import numpy as np\n'), ((701, 744), 'numpy.array', 'np.array', (['[0.0, 0.0, 3.0]'], {'dtype': 'np.float64'}), '([0.0, 0.0, 3.0], dtype=np.float64)\n', (709, 744), True, 'import numpy as np\n'), ((769, 811), 'mmag.unit_cell.fields.field_dipole', 'field_dipole', (['self.init_obj.position', 'm', 'r'], {}), '(self.init_obj.position, m, r)\n', (781, 811), False, 'from mmag.unit_cell.fields import field_dipole\n'), ((1409, 1452), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {'dtype': 'np.float64'}), '([0.0, 0.0, 1.0], dtype=np.float64)\n', (1417, 1452), True, 'import numpy as np\n'), ((1465, 1508), 'numpy.array', 'np.array', (['[0.0, 0.0, 3.0]'], {'dtype': 'np.float64'}), '([0.0, 0.0, 3.0], dtype=np.float64)\n', (1473, 1508), True, 'import numpy as np\n'), ((1576, 1619), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {'dtype': 'np.float64'}), '([1.0, 0.0, 0.0], dtype=np.float64)\n', (1584, 1619), True, 'import numpy as np\n'), ((1632, 1675), 'numpy.array', 'np.array', (['[3.0, 0.0, 0.0]'], {'dtype': 'np.float64'}), '([3.0, 0.0, 0.0], dtype=np.float64)\n', (1640, 1675), True, 'import numpy as np\n'), ((1894, 1937), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {'dtype': 'np.float64'}), '([0.0, 0.0, 1.0], dtype=np.float64)\n', (1902, 1937), True, 'import numpy as np\n'), ((1950, 1993), 'numpy.array', 'np.array', (['[4.0, 1.0, 2.0]'], {'dtype': 'np.float64'}), '([4.0, 1.0, 2.0], dtype=np.float64)\n', (1958, 1993), True, 'import numpy as np\n'), ((2018, 2060), 'mmag.unit_cell.fields.field_dipole', 'field_dipole', (['self.init_obj.position', 'm', 'r'], {}), '(self.init_obj.position, m, r)\n', (2030, 2060), False, 'from mmag.unit_cell.fields import field_dipole\n'), ((2579, 2622), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {'dtype': 'np.float64'}), '([0.0, 0.0, 1.0], dtype=np.float64)\n', (2587, 2622), True, 'import numpy as np\n'), ((2635, 2678), 'numpy.array', 'np.array', (['[1.0, 3.0, 2.0]'], {'dtype': 'np.float64'}), '([1.0, 3.0, 2.0], dtype=np.float64)\n', (2643, 2678), True, 'import numpy as np\n'), ((2703, 2745), 'mmag.unit_cell.fields.field_dipole', 'field_dipole', (['self.init_obj.position', 'm', 'r'], {}), '(self.init_obj.position, m, r)\n', (2715, 2745), False, 'from mmag.unit_cell.fields import field_dipole\n'), ((3315, 3358), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {'dtype': 'np.float64'}), '([0.0, 0.0, 0.0], dtype=np.float64)\n', (3323, 3358), True, 'import numpy as np\n'), ((3375, 3418), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {'dtype': 'np.float64'}), '([1.0, 1.0, 1.0], dtype=np.float64)\n', (3383, 3418), True, 'import numpy as np\n'), ((3443, 3466), 'mmag.unit_cell.cell.Cuboid', 'Cuboid', (['position', 'delta'], {}), '(position, delta)\n', (3449, 3466), False, 'from mmag.unit_cell.cell import Cuboid\n'), ((3704, 3747), 'numpy.array', 'np.array', (['[0.0, 0.0, 3.0]'], {'dtype': 'np.float64'}), '([0.0, 0.0, 3.0], dtype=np.float64)\n', (3712, 3747), True, 'import numpy as np\n'), ((3772, 3814), 'mmag.unit_cell.fields.field_dipole', 'field_dipole', (['self.init_obj.position', 'm', 'r'], {}), '(self.init_obj.position, m, r)\n', (3784, 3814), False, 'from mmag.unit_cell.fields import field_dipole\n'), ((4520, 4563), 'numpy.array', 'np.array', (['[1.0, 0.0, 2.0]'], {'dtype': 'np.float64'}), '([1.0, 0.0, 2.0], dtype=np.float64)\n', (4528, 4563), True, 'import numpy as np\n'), ((4588, 4630), 'mmag.unit_cell.fields.field_dipole', 'field_dipole', (['self.init_obj.position', 'm', 'r'], {}), '(self.init_obj.position, m, r)\n', (4600, 4630), False, 'from mmag.unit_cell.fields import field_dipole\n'), ((5336, 5379), 'numpy.array', 'np.array', (['[1.0, 5.0, 2.0]'], {'dtype': 'np.float64'}), '([1.0, 5.0, 2.0], dtype=np.float64)\n', (5344, 5379), True, 'import numpy as np\n'), ((5404, 5446), 'mmag.unit_cell.fields.field_dipole', 'field_dipole', (['self.init_obj.position', 'm', 'r'], {}), '(self.init_obj.position, m, r)\n', (5416, 5446), False, 'from mmag.unit_cell.fields import field_dipole\n'), ((6006, 6049), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0]'], {'dtype': 'np.float64'}), '([1.0, 2.0, 3.0], dtype=np.float64)\n', (6014, 6049), True, 'import numpy as np\n'), ((6066, 6109), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {'dtype': 'np.float64'}), '([1.0, 1.0, 1.0], dtype=np.float64)\n', (6074, 6109), True, 'import numpy as np\n'), ((6134, 6157), 'mmag.unit_cell.cell.Cuboid', 'Cuboid', (['position', 'delta'], {}), '(position, delta)\n', (6140, 6157), False, 'from mmag.unit_cell.cell import Cuboid\n'), ((6395, 6438), 'numpy.array', 'np.array', (['[7.0, 8.0, 7.0]'], {'dtype': 'np.float64'}), '([7.0, 8.0, 7.0], dtype=np.float64)\n', (6403, 6438), True, 'import numpy as np\n'), ((6463, 6505), 'mmag.unit_cell.fields.field_dipole', 'field_dipole', (['self.init_obj.position', 'm', 'r'], {}), '(self.init_obj.position, m, r)\n', (6475, 6505), False, 'from mmag.unit_cell.fields import field_dipole\n'), ((1014, 1055), 'numpy.fabs', 'np.fabs', (['(magnitude_box - magnitude_dipole)'], {}), '(magnitude_box - magnitude_dipole)\n', (1021, 1055), True, 'import numpy as np\n'), ((1088, 1127), 'numpy.fabs', 'np.fabs', (['(dipole_field[0] - box_field[0])'], {}), '(dipole_field[0] - box_field[0])\n', (1095, 1127), True, 'import numpy as np\n'), ((1160, 1199), 'numpy.fabs', 'np.fabs', (['(dipole_field[1] - box_field[1])'], {}), '(dipole_field[1] - box_field[1])\n', (1167, 1199), True, 'import numpy as np\n'), ((1232, 1271), 'numpy.fabs', 'np.fabs', (['(dipole_field[2] - box_field[2])'], {}), '(dipole_field[2] - box_field[2])\n', (1239, 1271), True, 'import numpy as np\n'), ((1754, 1794), 'numpy.fabs', 'np.fabs', (['(box_field_1[2] - box_field_2[0])'], {}), '(box_field_1[2] - box_field_2[0])\n', (1761, 1794), True, 'import numpy as np\n'), ((2263, 2304), 'numpy.fabs', 'np.fabs', (['(magnitude_box - magnitude_dipole)'], {}), '(magnitude_box - magnitude_dipole)\n', (2270, 2304), True, 'import numpy as np\n'), ((2337, 2376), 'numpy.fabs', 'np.fabs', (['(dipole_field[0] - box_field[0])'], {}), '(dipole_field[0] - box_field[0])\n', (2344, 2376), True, 'import numpy as np\n'), ((2409, 2448), 'numpy.fabs', 'np.fabs', (['(dipole_field[1] - box_field[1])'], {}), '(dipole_field[1] - box_field[1])\n', (2416, 2448), True, 'import numpy as np\n'), ((2481, 2520), 'numpy.fabs', 'np.fabs', (['(dipole_field[2] - box_field[2])'], {}), '(dipole_field[2] - box_field[2])\n', (2488, 2520), True, 'import numpy as np\n'), ((2948, 2989), 'numpy.fabs', 'np.fabs', (['(magnitude_box - magnitude_dipole)'], {}), '(magnitude_box - magnitude_dipole)\n', (2955, 2989), True, 'import numpy as np\n'), ((3022, 3061), 'numpy.fabs', 'np.fabs', (['(dipole_field[0] - box_field[0])'], {}), '(dipole_field[0] - box_field[0])\n', (3029, 3061), True, 'import numpy as np\n'), ((3094, 3133), 'numpy.fabs', 'np.fabs', (['(dipole_field[1] - box_field[1])'], {}), '(dipole_field[1] - box_field[1])\n', (3101, 3133), True, 'import numpy as np\n'), ((3166, 3205), 'numpy.fabs', 'np.fabs', (['(dipole_field[2] - box_field[2])'], {}), '(dipole_field[2] - box_field[2])\n', (3173, 3205), True, 'import numpy as np\n'), ((4017, 4058), 'numpy.fabs', 'np.fabs', (['(magnitude_box - magnitude_dipole)'], {}), '(magnitude_box - magnitude_dipole)\n', (4024, 4058), True, 'import numpy as np\n'), ((4091, 4130), 'numpy.fabs', 'np.fabs', (['(dipole_field[0] - box_field[0])'], {}), '(dipole_field[0] - box_field[0])\n', (4098, 4130), True, 'import numpy as np\n'), ((4163, 4202), 'numpy.fabs', 'np.fabs', (['(dipole_field[1] - box_field[1])'], {}), '(dipole_field[1] - box_field[1])\n', (4170, 4202), True, 'import numpy as np\n'), ((4235, 4274), 'numpy.fabs', 'np.fabs', (['(dipole_field[2] - box_field[2])'], {}), '(dipole_field[2] - box_field[2])\n', (4242, 4274), True, 'import numpy as np\n'), ((4833, 4874), 'numpy.fabs', 'np.fabs', (['(magnitude_box - magnitude_dipole)'], {}), '(magnitude_box - magnitude_dipole)\n', (4840, 4874), True, 'import numpy as np\n'), ((4907, 4946), 'numpy.fabs', 'np.fabs', (['(dipole_field[0] - box_field[0])'], {}), '(dipole_field[0] - box_field[0])\n', (4914, 4946), True, 'import numpy as np\n'), ((4979, 5018), 'numpy.fabs', 'np.fabs', (['(dipole_field[1] - box_field[1])'], {}), '(dipole_field[1] - box_field[1])\n', (4986, 5018), True, 'import numpy as np\n'), ((5051, 5090), 'numpy.fabs', 'np.fabs', (['(dipole_field[2] - box_field[2])'], {}), '(dipole_field[2] - box_field[2])\n', (5058, 5090), True, 'import numpy as np\n'), ((5649, 5690), 'numpy.fabs', 'np.fabs', (['(magnitude_box - magnitude_dipole)'], {}), '(magnitude_box - magnitude_dipole)\n', (5656, 5690), True, 'import numpy as np\n'), ((5723, 5762), 'numpy.fabs', 'np.fabs', (['(dipole_field[0] - box_field[0])'], {}), '(dipole_field[0] - box_field[0])\n', (5730, 5762), True, 'import numpy as np\n'), ((5795, 5834), 'numpy.fabs', 'np.fabs', (['(dipole_field[1] - box_field[1])'], {}), '(dipole_field[1] - box_field[1])\n', (5802, 5834), True, 'import numpy as np\n'), ((5867, 5906), 'numpy.fabs', 'np.fabs', (['(dipole_field[2] - box_field[2])'], {}), '(dipole_field[2] - box_field[2])\n', (5874, 5906), True, 'import numpy as np\n'), ((6795, 6836), 'numpy.fabs', 'np.fabs', (['(magnitude_box - magnitude_dipole)'], {}), '(magnitude_box - magnitude_dipole)\n', (6802, 6836), True, 'import numpy as np\n'), ((6869, 6908), 'numpy.fabs', 'np.fabs', (['(dipole_field[0] - box_field[0])'], {}), '(dipole_field[0] - box_field[0])\n', (6876, 6908), True, 'import numpy as np\n'), ((6941, 6980), 'numpy.fabs', 'np.fabs', (['(dipole_field[1] - box_field[1])'], {}), '(dipole_field[1] - box_field[1])\n', (6948, 6980), True, 'import numpy as np\n'), ((7013, 7052), 'numpy.fabs', 'np.fabs', (['(dipole_field[2] - box_field[2])'], {}), '(dipole_field[2] - box_field[2])\n', (7020, 7052), True, 'import numpy as np\n'), ((3597, 3609), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (3604, 3609), True, 'import numpy as np\n'), ((3617, 3629), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (3624, 3629), True, 'import numpy as np\n'), ((3637, 3649), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (3644, 3649), True, 'import numpy as np\n'), ((4413, 4425), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (4420, 4425), True, 'import numpy as np\n'), ((4433, 4445), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (4440, 4445), True, 'import numpy as np\n'), ((4453, 4465), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (4460, 4465), True, 'import numpy as np\n'), ((5229, 5241), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (5236, 5241), True, 'import numpy as np\n'), ((5249, 5261), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (5256, 5261), True, 'import numpy as np\n'), ((5269, 5281), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (5276, 5281), True, 'import numpy as np\n'), ((6288, 6300), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (6295, 6300), True, 'import numpy as np\n'), ((6308, 6320), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (6315, 6320), True, 'import numpy as np\n'), ((6328, 6340), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (6335, 6340), True, 'import numpy as np\n')] |
# Copyright
# 2019 Department of Dermatology, School of Medicine, Tohoku University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import itertools
from skimage.measure import approximate_polygon, subdivide_polygon
from sympy.geometry import Segment, Point
import numpy as np
def has_intersection_slow(p_a0, p_a1, p_b0, p_b1):
"""Check intersection of two segment(line)s
:param p_a0 tuple[int, int]|numpy.ndarray:
:param p_a1 tuple[int, int]|numpy.ndarray:
:param p_b0 tuple[int, int]|numpy.ndarray:
:param p_b1 tuple[int, int]|numpy.ndarray:
:return bool, int, int:
"""
segment_a = Segment(Point(p_a0), Point(p_a1))
segment_b = Segment(Point(p_b0), Point(p_b1))
intersections = segment_a.intersection(segment_b)
if intersections:
p = intersections[0]
return True, int(p.x), int(p.y)
else:
return False, 0, 0
def has_intersection(p_a0, p_a1, p_b0, p_b1):
"""Returns tuple[is_parallel: bool, has_intersection: bool, x: int, y]"""
da_x, da_y = p_a1[0] - p_a0[0], p_a1[1] - p_a0[1]
db_x, db_y = p_b1[0] - p_b0[0], p_b1[1] - p_b0[1]
if da_x * db_y == db_x * da_y: # Check parallelism
return True, False, 0, 0
a_x, a_y = p_a0
b_x, b_y = p_b0
k = (
(db_y*(a_x - b_x) - db_x*(a_y - b_y)) / (da_y*db_x - da_x*db_y)
)
u = (
(da_y*(a_x - b_x) - da_x*(a_y - b_y)) / (da_y*db_x - da_x*db_y)
)
if 0 <= k <= 1 and 0 <= u <= 1:
# print(da_x, da_y, db_x, db_y, p_a0, p_a1, p_b0, p_b1, k)
return False, True, int(a_x + k * da_x), int(a_y + k * da_y)
else:
return False, False, 0, 0
def has_intersection_with_k(p_a0, p_a1, p_b0, p_b1):
"""Returns tuple[is_parallel: bool, has_intersection: bool, x: int, y]"""
da_x, da_y = p_a1[0] - p_a0[0], p_a1[1] - p_a0[1]
db_x, db_y = p_b1[0] - p_b0[0], p_b1[1] - p_b0[1]
if da_x * db_y == db_x * da_y: # Check parallelism
return True, False, 0, 0, 0
a_x, a_y = p_a0
b_x, b_y = p_b0
k = (
(db_y*(a_x - b_x) - db_x*(a_y - b_y)) / (da_y*db_x - da_x*db_y)
)
u = (
(da_y*(a_x - b_x) - da_x*(a_y - b_y)) / (da_y*db_x - da_x*db_y)
)
if 0 <= k <= 1 and 0 <= u <= 1:
# print(da_x, da_y, db_x, db_y, p_a0, p_a1, p_b0, p_b1, k)
return False, True, int(a_x + k * da_x), int(a_y + k * da_y), k
else:
return False, False, 0, 0, k
def has_intersection2(p_a0, p_a1, p_b0, p_b1):
x_a, y_a = p_a0
x_b, y_b = p_a1
x_c, y_c = p_b0
x_d, y_d = p_b1
l_num = ((y_c - y_a) * (x_b - x_a) - (x_c - x_a) * (y_b - y_c))
l_den = ((x_d - x_c) * (y_b - y_a) - (x_b - x_a) * (y_d - y_c))
k_num = ((y_a - y_c) * (x_d - x_c) - (x_a - x_c) * (y_d - y_c))
k_den = ((x_b - x_a) * (y_d - y_c) - (y_b - y_a) * (x_d - x_c))
if l_den == 0 or k_den == 0:
return False, False, 0, 0
l = l_num / l_den
k = k_num / k_den
if 0 < l < 1 and 0 < k < 1:
return False, True, int(x_a + k*(x_b - x_a)), int(y_a + k*(y_b - y_a))
else:
return False, False, 0, 0
# has_intersection = has_intersection2
Clockwise = np.array([-1])
CounterClockwise = np.array([1])
Undefined = np.array([0])
def get_circled_pairs(vertices):
num_v = len(vertices)
result = list()
for i in range(0, num_v-1):
result.append((vertices[i], vertices[i+1]))
result.append((vertices[num_v-1], vertices[0]))
return result
def get_orientation(vertices):
s = 0
for (x1, y1), (x2, y2) in get_circled_pairs(vertices):
s += (x2 - x1) * (y2 + y1)
if s > 0:
return CounterClockwise
elif s < 0:
return Clockwise
else:
return Undefined
def has_point_in_triangle(triangle, points):
a, b, c = triangle
s = b - a
t = c - a
inv_mat = np.linalg.inv(np.vstack([s, t]).transpose())
for p in points:
ps, pt = np.dot(inv_mat, p - a)
if ps >= 0 and pt >= 0 and ps + pt <= 1:
return True
return False
def convert_dim1to2(l):
return [(l[2*i], l[2*i+1]) for i in range(len(l)//2)]
def triangulate(vertices):
"""Return List[int] pyglet.gl.OpenGl like vertices list
:param vertices List[int]: v2i style
:return List[int]:
"""
vertices = np.array(convert_dim1to2(vertices))
orientation = get_orientation(vertices)
num_vertex = len(vertices)
vertex_id_pointer = 0
remained_point_indices = list(range(num_vertex))
result = list()
flag = True
while len(remained_point_indices) > 2 and flag:
if vertex_id_pointer not in remained_point_indices:
vertex_id_pointer += 1
if vertex_id_pointer > remained_point_indices[-1]:
# Can't make triangulate
print("Cant", len(remained_point_indices))
# return result
flag = False
return list()
else:
continue
i = remained_point_indices.index(vertex_id_pointer)
triangle_ids = \
(remained_point_indices + remained_point_indices[:2])[i:i+3]
triangle = (
vertices[triangle_ids[0]], vertices[triangle_ids[1]],
vertices[triangle_ids[2]]
)
a, b, c = triangle
c_prod = np.cross(c-b, b-a)
d_prod = np.dot(orientation, c_prod)
if d_prod > 0:
other_points = [
vertices[i]
for i in remained_point_indices if i not in triangle_ids
]
if not has_point_in_triangle(triangle, points=other_points):
result.extend(triangle_ids)
remained_point_indices.remove(triangle_ids[1])
vertex_id_pointer = remained_point_indices[0]
continue
vertex_id_pointer += 1
return result
def get_close_curve(x, y, dx, dy, vertices):
"""
:param int x:
:param int y:
:param int dx:
:param int dy:
:param List[int] vertices:
:return bool, List[int]:
"""
check = partial(has_intersection, (x, y), (x+dx, y+dy))
result = list()
for p1, p2 in vertices:
is_para, has_ins, i_x, i_y = check(p1, p2)
result.extend(p1)
if has_ins:
# return True, [x, y] + result # + [i_x, i_y]
# return True, result[:-2] + [i_x, i_y]
return True, result[2:-2] + [i_x, i_y]
return False, list()
def get_closed_curve_points(x, y, dx, dy, points, dim_points=2):
if dim_points == 1:
vs = [
(points[2*p-2:2*p], points[2*p:2*p+2])
for p in reversed(range(1, len(points)//2-1))
]
elif dim_points == 2:
vs = [
(points[p-1], points[p])
for p in reversed(range(1, len(points)-1))
]
else:
raise ValueError
return get_close_curve(x, y, dx, dy, vs)
def reduce_coordinates(points, dim_points=2):
if dim_points == 1:
vs = np.array([
[points[2*i], points[2*i+1]]
for i in reversed(range(0, len(points)//2))
])
elif dim_points == 2:
vs = points
else:
raise ValueError
for _ in range(5):
if len(vs) < 4:
return list()
vs = subdivide_polygon(vs, preserve_ends=True)
vs = approximate_polygon(vs, tolerance=1.8)
if dim_points == 1:
return [int(i) for i in itertools.chain.from_iterable(vs)]
return vs
def get_circumscribed_rectangle(points, dim_points=2):
if dim_points == 2:
xs = [r[0] for r in points]
ys = [r[1] for r in points]
elif dim_points == 1:
xs = [points[i] for i in range(0, len(points), 2)]
ys = [points[i] for i in range(1, len(points), 2)]
else:
raise ValueError
x0, x1 = min(xs), max(xs)
y0, y1 = min(ys), max(ys)
return x0, y0, x1-x0, y1-y0
def safe_append(result, i_x, i_y):
pass
def get_inscribed_polygon_vertices(vertices):
threshold = 5 * 2
num_min_points = 10
num_points = len(vertices) // 2
if num_points <= num_min_points:
return list()
pairs = list()
for i in reversed(range(num_min_points, num_points)):
if len(pairs) > 0:
break
if len(pairs) > 1:
break
elif pairs[0][-1] == i:
break
x, y = vertices[2*i: 2*i+2]
for j in reversed(range(0, i-num_min_points)):
h, v = vertices[2*j: 2*j+2]
if (x-h) ** 2 + (y-v) ** 2 < threshold:
pairs.append((i, j))
break
num_pairs = len(pairs)
if num_pairs == 0:
return list()
elif num_pairs == 1:
end, start = pairs[0]
return vertices[2*start:2*end+2]
elif len(pairs) == 2:
print("Two pair")
end, p0 = pairs[0]
start, p1 = pairs[1]
result = vertices[2*p0:2*p0+2] + vertices[2*start:2*end+2] \
+ vertices[2*p1:2*p1+2]
return result
else:
return list()
| [
"functools.partial",
"sympy.geometry.Point",
"skimage.measure.subdivide_polygon",
"numpy.cross",
"numpy.array",
"numpy.dot",
"itertools.chain.from_iterable",
"skimage.measure.approximate_polygon",
"numpy.vstack"
] | [((3684, 3698), 'numpy.array', 'np.array', (['[-1]'], {}), '([-1])\n', (3692, 3698), True, 'import numpy as np\n'), ((3718, 3731), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (3726, 3731), True, 'import numpy as np\n'), ((3744, 3757), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (3752, 3757), True, 'import numpy as np\n'), ((6585, 6636), 'functools.partial', 'partial', (['has_intersection', '(x, y)', '(x + dx, y + dy)'], {}), '(has_intersection, (x, y), (x + dx, y + dy))\n', (6592, 6636), False, 'from functools import partial\n'), ((7834, 7872), 'skimage.measure.approximate_polygon', 'approximate_polygon', (['vs'], {'tolerance': '(1.8)'}), '(vs, tolerance=1.8)\n', (7853, 7872), False, 'from skimage.measure import approximate_polygon, subdivide_polygon\n'), ((1180, 1191), 'sympy.geometry.Point', 'Point', (['p_a0'], {}), '(p_a0)\n', (1185, 1191), False, 'from sympy.geometry import Segment, Point\n'), ((1193, 1204), 'sympy.geometry.Point', 'Point', (['p_a1'], {}), '(p_a1)\n', (1198, 1204), False, 'from sympy.geometry import Segment, Point\n'), ((1230, 1241), 'sympy.geometry.Point', 'Point', (['p_b0'], {}), '(p_b0)\n', (1235, 1241), False, 'from sympy.geometry import Segment, Point\n'), ((1243, 1254), 'sympy.geometry.Point', 'Point', (['p_b1'], {}), '(p_b1)\n', (1248, 1254), False, 'from sympy.geometry import Segment, Point\n'), ((4447, 4469), 'numpy.dot', 'np.dot', (['inv_mat', '(p - a)'], {}), '(inv_mat, p - a)\n', (4453, 4469), True, 'import numpy as np\n'), ((5828, 5850), 'numpy.cross', 'np.cross', (['(c - b)', '(b - a)'], {}), '(c - b, b - a)\n', (5836, 5850), True, 'import numpy as np\n'), ((5864, 5891), 'numpy.dot', 'np.dot', (['orientation', 'c_prod'], {}), '(orientation, c_prod)\n', (5870, 5891), True, 'import numpy as np\n'), ((7783, 7824), 'skimage.measure.subdivide_polygon', 'subdivide_polygon', (['vs'], {'preserve_ends': '(True)'}), '(vs, preserve_ends=True)\n', (7800, 7824), False, 'from skimage.measure import approximate_polygon, subdivide_polygon\n'), ((4378, 4395), 'numpy.vstack', 'np.vstack', (['[s, t]'], {}), '([s, t])\n', (4387, 4395), True, 'import numpy as np\n'), ((7929, 7962), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['vs'], {}), '(vs)\n', (7958, 7962), False, 'import itertools\n')] |
from functions import *
from global_variables import init_global
import simpy
import matplotlib.pyplot as plt
import random as rd
import numpy as np
import os
from scipy.optimize import curve_fit
from scipy.special import factorial
n_server = 1
mu = 0.80
l = 0.64
end_n_actions = 60000
repetitions = 30
initialisation_period = 10000
n_simulations = 1
LT_value = 5
sjf = False # use shortest job first
list_average_queuelength = []
list_average_queuingtimes = []
list_stddev = []
diff_batchsizes = np.arange(1, 30000, 6000)
# run the simulation multiple times
for i in diff_batchsizes:
total_standard_deviation = 0
for j in range(repetitions):
print("current batch size: ", i)
print("current repetition: ", j)
n_batches = (end_n_actions - initialisation_period) / i / 2.
# initialize the global lists
init_global()
# create a simpy environment
env = simpy.Environment()
# set up the system
env.process(setup(env, n_server, mu, l, sjf, end_n_actions, "M", LT_value))
# run the program
env.run()
average_queuelength = np.average(global_variables.queue_length_list)
list_average_queuelength.append(average_queuelength)
list_batch_averages = batch_averages(i, initialisation_period)
average_queuingtimes = np.average(global_variables.time_spend_in_queue_list)
list_average_queuingtimes.append(average_queuingtimes)
variance = 1 / (n_batches - 1) * np.sum((list_batch_averages - np.average(list_batch_averages))**2)
total_standard_deviation += np.sqrt(variance)
list_stddev.append(total_standard_deviation/repetitions)
plt.figure()
ax = plt.gca()
plt.plot(diff_batchsizes, list_stddev, linewidth=3)
plt.xlabel("batch size (#)", fontsize=16, fontweight='bold')
plt.ylabel("standard deviation (a.u.)", fontsize=16, fontweight='bold')
ax.xaxis.set_tick_params(labelsize=14)
ax.yaxis.set_tick_params(labelsize=14)
plt.show()
########################################################################################################
| [
"matplotlib.pyplot.show",
"numpy.average",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"global_variables.init_global",
"numpy.arange",
"simpy.Environment",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.sqrt"
] | [((502, 527), 'numpy.arange', 'np.arange', (['(1)', '(30000)', '(6000)'], {}), '(1, 30000, 6000)\n', (511, 527), True, 'import numpy as np\n'), ((1683, 1695), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1693, 1695), True, 'import matplotlib.pyplot as plt\n'), ((1701, 1710), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1708, 1710), True, 'import matplotlib.pyplot as plt\n'), ((1712, 1763), 'matplotlib.pyplot.plot', 'plt.plot', (['diff_batchsizes', 'list_stddev'], {'linewidth': '(3)'}), '(diff_batchsizes, list_stddev, linewidth=3)\n', (1720, 1763), True, 'import matplotlib.pyplot as plt\n'), ((1764, 1824), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""batch size (#)"""'], {'fontsize': '(16)', 'fontweight': '"""bold"""'}), "('batch size (#)', fontsize=16, fontweight='bold')\n", (1774, 1824), True, 'import matplotlib.pyplot as plt\n'), ((1825, 1896), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""standard deviation (a.u.)"""'], {'fontsize': '(16)', 'fontweight': '"""bold"""'}), "('standard deviation (a.u.)', fontsize=16, fontweight='bold')\n", (1835, 1896), True, 'import matplotlib.pyplot as plt\n'), ((1975, 1985), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1983, 1985), True, 'import matplotlib.pyplot as plt\n'), ((855, 868), 'global_variables.init_global', 'init_global', ([], {}), '()\n', (866, 868), False, 'from global_variables import init_global\n'), ((921, 940), 'simpy.Environment', 'simpy.Environment', ([], {}), '()\n', (938, 940), False, 'import simpy\n'), ((1130, 1176), 'numpy.average', 'np.average', (['global_variables.queue_length_list'], {}), '(global_variables.queue_length_list)\n', (1140, 1176), True, 'import numpy as np\n'), ((1341, 1394), 'numpy.average', 'np.average', (['global_variables.time_spend_in_queue_list'], {}), '(global_variables.time_spend_in_queue_list)\n', (1351, 1394), True, 'import numpy as np\n'), ((1603, 1620), 'numpy.sqrt', 'np.sqrt', (['variance'], {}), '(variance)\n', (1610, 1620), True, 'import numpy as np\n'), ((1530, 1561), 'numpy.average', 'np.average', (['list_batch_averages'], {}), '(list_batch_averages)\n', (1540, 1561), True, 'import numpy as np\n')] |
import numpy as np
temp = np.array([[1, 2], [3, 4]])
print (temp[0][0])
print (temp[0][1])
def iround(x):
"""iround(number) -> integer
Round a number to the nearest integer."""
y = round(x) - .5
return int(y) + (y > 0)
import decimal
x = decimal.Decimal("2.4999999999999999999999999")
whole, remain = divmod(x, 1)
if remain >= decimal.Decimal("0.5"):
whole += 1
print(whole)
print ("lalalala = ", np.around([0.37, 1.64], decimals=1))
| [
"numpy.around",
"numpy.array",
"decimal.Decimal"
] | [((26, 52), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (34, 52), True, 'import numpy as np\n'), ((256, 302), 'decimal.Decimal', 'decimal.Decimal', (['"""2.4999999999999999999999999"""'], {}), "('2.4999999999999999999999999')\n", (271, 302), False, 'import decimal\n'), ((345, 367), 'decimal.Decimal', 'decimal.Decimal', (['"""0.5"""'], {}), "('0.5')\n", (360, 367), False, 'import decimal\n'), ((421, 456), 'numpy.around', 'np.around', (['[0.37, 1.64]'], {'decimals': '(1)'}), '([0.37, 1.64], decimals=1)\n', (430, 456), True, 'import numpy as np\n')] |
# -*- coding:utf-8 -*-
"""
file name: sif_sent2vec.py
Created on 2019/1/14
@author: kyy_b
@desc:
"""
import numpy as np
from sklearn.decomposition import PCA
from typing import List
from collections import Counter
import pickle
class SIFSent2Vec:
def __init__(self, corpus, word_embeddings, embedding_size, a=1.0e-3, stop_words_set=None):
"""
分词后的list
:param corpus:
"""
self.embedding_size = embedding_size
self.a = a
self.corpus = corpus
self.we = word_embeddings
self.word_freq = {}
for sentence in self.corpus:
if isinstance(sentence, str):
sentence = sentence.split(" ")
self.word_freq.update(dict(Counter(sentence)))
total = sum(self.word_freq.values())
self.word_freq = {k: v / total for (k, v) in self.word_freq.items()}
print("sif vocab size = ", len(self.word_freq))
self.sentence_vecs = self.sentence_to_vec()
def get_word_frequency(self, word):
if word in self.word_freq:
return self.word_freq[word]
return 0.0001
def sentence_to_vec(self):
sentence_set = []
for sentence in self.corpus:
if isinstance(sentence, str):
sentence = sentence.split(" ")
vs = np.zeros(self.embedding_size) # add all word2vec values into one vector for the sentence
sentence_length = len(sentence)
for word in sentence:
if word in self.we.wv:
a_value = self.a / (self.a + self.get_word_frequency(self.get_word_frequency(word)))
vs = np.add(vs, np.multiply(a_value, self.we.wv[word])) # vs += sif * word_vector
vs = np.divide(vs, sentence_length) # weighted average
sentence_set.append(vs) # add to our existing re-calculated set of sentences
assert len(self.corpus) == len(sentence_set)
# calculate PCA of this sentence set
pca = PCA()
pca.fit(np.array(sentence_set))
u = pca.components_[0] # the PCA vector
u = np.multiply(u, np.transpose(u)) # u x uT
# pad the vector? (occurs if we have less sentences than embeddings_size)
if len(u) < self.embedding_size:
for i in range(self.embedding_size - len(u)):
u = np.append(u, 0) # add needed extension for multiplication below
# resulting sentence vectors, vs = vs -u x uT x vs
sentence_vecs = []
for vs in sentence_set:
sub = np.multiply(u, vs)
sentence_vecs.append(np.subtract(vs, sub))
# return zip([" ".join(sentence) for sentence in self.corpus], sentence_vecs)
# return dict(zip([" ".join(sentence) for sentence in self.corpus], sentence_vecs))
return sentence_vecs
def save(self, path):
"""
save to pickle
:return:
"""
pickle.dump(self.sentence_vecs, open(path, "wb"))
@staticmethod
def load(path):
"""
load the pickle model
:return:
"""
return pickle.load(open(path, "rb"))
| [
"numpy.divide",
"numpy.multiply",
"numpy.subtract",
"numpy.zeros",
"numpy.transpose",
"numpy.append",
"sklearn.decomposition.PCA",
"numpy.array",
"collections.Counter"
] | [((2011, 2016), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (2014, 2016), False, 'from sklearn.decomposition import PCA\n'), ((1323, 1352), 'numpy.zeros', 'np.zeros', (['self.embedding_size'], {}), '(self.embedding_size)\n', (1331, 1352), True, 'import numpy as np\n'), ((1756, 1786), 'numpy.divide', 'np.divide', (['vs', 'sentence_length'], {}), '(vs, sentence_length)\n', (1765, 1786), True, 'import numpy as np\n'), ((2033, 2055), 'numpy.array', 'np.array', (['sentence_set'], {}), '(sentence_set)\n', (2041, 2055), True, 'import numpy as np\n'), ((2133, 2148), 'numpy.transpose', 'np.transpose', (['u'], {}), '(u)\n', (2145, 2148), True, 'import numpy as np\n'), ((2566, 2584), 'numpy.multiply', 'np.multiply', (['u', 'vs'], {}), '(u, vs)\n', (2577, 2584), True, 'import numpy as np\n'), ((2363, 2378), 'numpy.append', 'np.append', (['u', '(0)'], {}), '(u, 0)\n', (2372, 2378), True, 'import numpy as np\n'), ((2618, 2638), 'numpy.subtract', 'np.subtract', (['vs', 'sub'], {}), '(vs, sub)\n', (2629, 2638), True, 'import numpy as np\n'), ((731, 748), 'collections.Counter', 'Counter', (['sentence'], {}), '(sentence)\n', (738, 748), False, 'from collections import Counter\n'), ((1671, 1709), 'numpy.multiply', 'np.multiply', (['a_value', 'self.we.wv[word]'], {}), '(a_value, self.we.wv[word])\n', (1682, 1709), True, 'import numpy as np\n')] |
from .inmoov_shadow_hand_v2 import InmoovShadowNew
from . import utils
import pybullet as p
import time
import gym, gym.utils.seeding, gym.spaces
import numpy as np
import math
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
class InmoovShadowHandGraspEnvV6(gym.Env):
metadata = {'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 50}
def __init__(self,
renders=True,
init_noise=True,
up=True,
random_top_shape=True,
det_top_shape_ind=1, # if not random shape, 1 box, 0 cyl, -1 sphere,
cotrain_onstack_grasp=False,
grasp_floor=True, # if not cotrain, is grasp from stack or grasp on table
control_skip=6,
obs_noise=True,
n_best_cand=2,
has_test_phase=True,
warm_start_phase=False,
use_obj_heights=False,
overwrite_size=False # overwrite obj size from outside
):
self.renders = renders
self.init_noise = init_noise
self.up = up
self.warm_start = warm_start_phase
self.random_top_shape = random_top_shape
self.det_top_shape_ind = det_top_shape_ind
self.cotrain_onstack_grasp = cotrain_onstack_grasp
self.grasp_floor = grasp_floor
self.obs_noise = obs_noise
self.has_test_phase = has_test_phase
self.test_start = 50
self.overwrite_size = overwrite_size
self.overwrite_height = 0.1
self.overwrite_radius = 0.05
self.n_best_cand = int(n_best_cand)
self.use_obj_heights = use_obj_heights
# dummy, to be overwritten
self.top_obj = {'id': None,
'mass': -1,
'mu': -1,
'shape': utils.SHAPE_IND_MAP[0],
'half_width': -1,
'height': -1}
self.btm_obj = {'id': None,
'mass': -1,
'mu': -1,
'shape': utils.SHAPE_IND_MAP[0],
'half_width': -1,
'height': -1}
self.table_id = None
self._timeStep = 1. / 240.
if self.renders:
p.connect(p.GUI)
else:
p.connect(p.DIRECT) # this session seems always 0
self.np_random = None
self.robot = None
self.seed(0) # used once temporarily, will be overwritten outside by env
self.viewer = None
self.timer = 0
self.final_states = [] # wont be cleared unless call clear function
self.control_skip = int(control_skip)
# shadow hand is 22-5=17dof
self.action_scale = np.array([0.009 / self.control_skip] * 7 + [0.024 / self.control_skip] * 17)
self.p_pos_of_init = utils.PALM_POS_OF_INIT
self.p_quat_of_init = p.getQuaternionFromEuler(utils.PALM_EULER_OF_INIT)
self.tx = -1 # dummy
self.ty = -1 # dummy
self.tz = -1 # dummy
self.tx_act = -1 # dummy
self.ty_act = -1 # dummy
self.tz_act = -1 # dummy
self.reset() # and update init obs
action_dim = len(self.action_scale)
self.act = self.action_scale * 0.0
self.action_space = gym.spaces.Box(low=np.array([-1.]*action_dim), high=np.array([+1.]*action_dim))
obs_dim = len(self.observation)
obs_dummy = np.array([1.12234567]*obs_dim)
self.observation_space = gym.spaces.Box(low=-np.inf*obs_dummy, high=np.inf*obs_dummy)
def sample_valid_arm_q(self):
while True:
if self.init_noise:
self.tx, self.ty, self.tz, self.tx_act, self.ty_act, self.tz_act = \
utils.sample_tx_ty_tz(self.np_random, self.up, self.grasp_floor, 0.02, 0.02)
else:
self.tx, self.ty, self.tz, self.tx_act, self.ty_act, self.tz_act = \
utils.sample_tx_ty_tz(self.np_random, self.up, self.grasp_floor, 0.0, 0.0)
# desired_obj_pos = [self.tx, self.ty, self.tz] # used for planning
if self.grasp_floor:
desired_obj_pos = [self.tx, self.ty, self.np_random.uniform(-0.01, 0.01)]
else:
desired_obj_pos = [self.tx, self.ty, self.np_random.uniform(0.15, 0.17)] # TODO: hardcoded
arm_qs = utils.get_n_optimal_init_arm_qs(self.robot, self.p_pos_of_init, self.p_quat_of_init,
desired_obj_pos, self.table_id, n=self.n_best_cand,
wrist_gain=3.0) # TODO
if len(arm_qs) == 0:
continue
else:
arm_q = arm_qs[self.np_random.randint(len(arm_qs))]
return arm_q
def reset(self):
p.resetSimulation()
p.setPhysicsEngineParameter(numSolverIterations=utils.BULLET_CONTACT_ITER)
p.setTimeStep(self._timeStep)
p.setGravity(0, 0, -10)
self.timer = 0
if self.cotrain_onstack_grasp:
self.grasp_floor = self.np_random.randint(10) >= 6 # 40%
mu_f = self.np_random.uniform(utils.MU_MIN, utils.MU_MAX)
self.table_id = utils.create_table(mu_f)
self.robot = InmoovShadowNew(init_noise=self.init_noise, timestep=self._timeStep, np_random=self.np_random)
arm_q = self.sample_valid_arm_q()
self.robot.reset_with_certain_arm_q(arm_q)
if not self.grasp_floor:
bo = self.btm_obj # reference for safety
bo['shape'] = utils.SHAPE_IND_MAP[self.np_random.randint(2)] # btm cyl or box
bo['half_width'] = self.np_random.uniform(utils.HALF_W_MIN_BTM, utils.HALF_W_MAX)
if bo['shape'] == p.GEOM_BOX:
bo['half_width'] *= 0.8
bo['height'] = self.tz_act
bo['mass'] = self.np_random.uniform(utils.MASS_MIN, utils.MASS_MAX)
bo['mu'] = self.np_random.uniform(utils.MU_MIN, utils.MU_MAX)
btm_xy = utils.perturb(self.np_random, [self.tx_act, self.ty_act], 0.015)
btm_xyz = list(np.array(list(btm_xy) + [self.tz_act / 2.0]))
btm_quat = p.getQuaternionFromEuler([0., 0., self.np_random.uniform(low=0, high=2.0 * math.pi)])
bo['id'] = utils.create_sym_prim_shape_helper(bo, btm_xyz, btm_quat)
if not self.warm_start:
# https://github.com/bulletphysics/bullet3/blob/master/examples/pybullet/examples/constraint.py#L11
_ = p.createConstraint(bo['id'], -1, -1, -1, p.JOINT_FIXED, [0, 0, 0], [0, 0, 0],
childFramePosition=btm_xyz,
childFrameOrientation=btm_quat)
to = self.top_obj
shape_ind = self.np_random.randint(2) if self.random_top_shape else self.det_top_shape_ind
to['shape'] = utils.SHAPE_IND_MAP[shape_ind]
to['mass'] = self.np_random.uniform(utils.MASS_MIN, utils.MASS_MAX)
to['mu'] = self.np_random.uniform(utils.MU_MIN, utils.MU_MAX)
to['half_width'] = self.np_random.uniform(utils.HALF_W_MIN, utils.HALF_W_MAX) if not self.overwrite_size else self.overwrite_radius
to['height'] = self.np_random.uniform(utils.H_MIN, utils.H_MAX) if not self.overwrite_size else self.overwrite_height
if to['shape'] == p.GEOM_BOX:
to['half_width'] *= 0.8
elif to['shape'] == p.GEOM_SPHERE:
to['height'] *= 0.75
to['half_width'] = None
top_xyz = np.array([self.tx_act, self.ty_act, self.tz_act + to['height'] / 2.0])
top_quat = p.getQuaternionFromEuler([0., 0., self.np_random.uniform(low=0, high=2.0 * math.pi)])
to['id'] = utils.create_sym_prim_shape_helper(to, top_xyz, top_quat)
# note, one-time (same for all frames) noise from init vision module
if self.obs_noise:
self.half_height_est = utils.perturb_scalar(self.np_random, self.top_obj['height']/2.0, 0.01)
else:
self.half_height_est = self.top_obj['height']/2.0
p.stepSimulation() # TODO
self.observation = self.getExtendedObservation()
self.success = True
return np.array(self.observation)
def step(self, action):
bottom_id = self.table_id if self.grasp_floor else self.btm_obj['id']
if self.has_test_phase:
if self.timer == self.test_start * self.control_skip:
self.force_global = [self.np_random.uniform(-100, 100),
self.np_random.uniform(-100, 100),
-200.]
if self.timer > self.test_start * self.control_skip:
p.setCollisionFilterPair(self.top_obj['id'], bottom_id, -1, -1, enableCollision=0)
_, quat = p.getBasePositionAndOrientation(self.top_obj['id'])
_, quat_inv = p.invertTransform([0, 0, 0], quat)
force_local, _ = p.multiplyTransforms([0, 0, 0], quat_inv, self.force_global, [0, 0, 0, 1])
p.applyExternalForce(self.top_obj['id'], -1, force_local, [0, 0, 0], flags=p.LINK_FRAME)
for _ in range(self.control_skip):
# action is in not -1,1
if action is not None:
# action = np.clip(np.array(action), -1, 1) # TODO
self.act = action
act_array = self.act * self.action_scale
self.robot.apply_action(act_array)
p.stepSimulation()
if self.renders:
time.sleep(self._timeStep * 0.5)
self.timer += 1
reward = 0.0
# diff_norm = self.robot.get_norm_diff_tar_arm() * 10
# reward += np.maximum(2. - diff_norm, 0)
# # print(reward)
# rewards is height of target object
top_pos, _ = p.getBasePositionAndOrientation(self.top_obj['id'])
top_xy_ideal = np.array([self.tx_act, self.ty_act])
xy_dist = np.linalg.norm(top_xy_ideal - np.array(top_pos[:2]))
reward += -np.minimum(xy_dist, 0.4) * 6.0
vel_palm = np.linalg.norm(self.robot.get_link_v_w(self.robot.ee_id)[0])
reward += -vel_palm * 1.0
for i in self.robot.fin_tips[:4]:
tip_pos = p.getLinkState(self.robot.arm_id, i)[0]
reward += -np.minimum(np.linalg.norm(np.array(tip_pos) - np.array(top_pos)), 0.5) # 4 finger tips
tip_pos = p.getLinkState(self.robot.arm_id, self.robot.fin_tips[4])[0] # thumb tip
reward += -np.minimum(np.linalg.norm(np.array(tip_pos) - np.array(top_pos)), 0.5) * 5.0
palm_com_pos = p.getLinkState(self.robot.arm_id, self.robot.ee_id)[0]
dist = np.minimum(np.linalg.norm(np.array(palm_com_pos) - np.array(top_pos)), 0.5)
reward += -dist * 2.0
# rot_metric = None
# if self.warm_start:
# # not used when grasp from floor
# _, btm_quat = p.getBasePositionAndOrientation(bottom_id)
#
# btm_vels = p.getBaseVelocity(bottom_id)
# btm_linv = np.array(btm_vels[0])
# btm_angv = np.array(btm_vels[1])
# reward += np.maximum(-np.linalg.norm(btm_linv) * 4.0 - np.linalg.norm(btm_angv), -5.0)
#
# z_axis, _ = p.multiplyTransforms(
# [0, 0, 0], btm_quat, [0, 0, 1], [0, 0, 0, 1]
# ) # R_cl * unitz[0,0,1]
# rot_metric = np.array(z_axis).dot(np.array([0, 0, 1]))
# reward += np.maximum(rot_metric * 20 - 15, 0.0) * 2
cps = p.getContactPoints(self.top_obj['id'], self.robot.arm_id, -1, self.robot.ee_id) # palm
if len(cps) > 0:
reward += 5.0
f_bp = [0, 3, 6, 9, 12, 17] # 3*4+5
for ind_f in range(5):
con = False
# for dof in self.robot.fin_actdofs[f_bp[ind_f]:f_bp[ind_f+1]]:
# for dof in self.robot.fin_actdofs[(f_bp[ind_f + 1] - 2):f_bp[ind_f + 1]]:
for dof in self.robot.fin_actdofs[(f_bp[ind_f + 1] - 3):f_bp[ind_f + 1]]:
cps = p.getContactPoints(self.top_obj['id'], self.robot.arm_id, -1, dof)
if len(cps) > 0:
con = True
if con:
reward += 5.0
if con and ind_f == 4:
reward += 20.0 # reward thumb even more
reward -= self.robot.get_4_finger_deviation() * 1.5
# object dropped during testing
if top_pos[2] < (self.tz_act + 0.06) and self.timer > self.test_start * self.control_skip:
reward += -15.
self.success = False
# print(self.robot.get_q_dq(range(29, 34))[0])
return self.getExtendedObservation(), reward, False, {"height": self.top_obj['height'],
"radius": self.top_obj['half_width'],
"success": self.success}
def getExtendedObservation(self):
self.observation = self.robot.get_robot_observation(diff_tar=True)
curContact = []
for i in range(self.robot.ee_id, p.getNumJoints(self.robot.arm_id)):
cps = p.getContactPoints(bodyA=self.robot.arm_id, linkIndexA=i)
con_this_link = False
for cp in cps:
if cp[1] != cp[2]: # not self-collision of the robot
con_this_link = True
break
if con_this_link:
curContact.extend([1.0])
else:
curContact.extend([-1.0])
self.observation.extend(curContact)
if self.use_obj_heights:
xyz = np.array([self.tx, self.ty, self.tz])
self.observation.extend(list(xyz))
if self.obs_noise:
self.observation.extend(list(xyz))
else:
self.observation.extend([self.tx_act, self.ty_act, self.tz_act])
# top height info, btm height info included in tz
self.observation.extend([self.half_height_est])
else:
xy = np.array([self.tx, self.ty])
self.observation.extend(list(xy))
if self.obs_noise:
self.observation.extend(list(xy))
else:
self.observation.extend([self.tx_act, self.ty_act])
# btm obj shape is not important.
if self.top_obj['shape'] == p.GEOM_BOX:
shape_info = [1, -1, -1]
elif self.top_obj['shape'] == p.GEOM_CYLINDER:
shape_info = [-1, 1, -1]
elif self.top_obj['shape'] == p.GEOM_SPHERE:
shape_info = [-1, -1, 1]
else:
assert False
self.observation.extend(shape_info)
return self.observation
def append_final_state(self):
# output obj in palm frame (no need to output palm frame in world)
# output finger q's, finger tar q's.
# velocity will be assumed to be zero at the end of transporting phase
# return a dict.
assert not self.has_test_phase
obj_pos, obj_quat = p.getBasePositionAndOrientation(self.top_obj['id']) # w2o
hand_pos, hand_quat = self.robot.get_link_pos_quat(self.robot.ee_id) # w2p
inv_h_p, inv_h_q = p.invertTransform(hand_pos, hand_quat) # p2w
o_p_hf, o_q_hf = p.multiplyTransforms(inv_h_p, inv_h_q, obj_pos, obj_quat) # p2w*w2o
unitz_hf = p.multiplyTransforms([0, 0, 0], o_q_hf, [0, 0, 1], [0, 0, 0, 1])[0]
# TODO: a heuritics that if obj up_vec points outside palm, then probably holding bottom & bad
# ball does not care up vector pointing
if self.top_obj['shape'] != p.GEOM_SPHERE and unitz_hf[1] < -0.3:
return
else:
fin_q, _ = self.robot.get_q_dq(self.robot.all_findofs)
shape = self.top_obj['shape']
dim = utils.to_bullet_dimension(shape, self.top_obj['half_width'], self.top_obj['height'])
state = {'obj_pos_in_palm': o_p_hf, 'obj_quat_in_palm': o_q_hf,
'all_fin_q': fin_q, 'fin_tar_q': self.robot.tar_fin_q,
'obj_dim': dim, 'obj_shape': shape}
# print(state)
# print(self.robot.get_joints_last_tau(self.robot.all_findofs))
# self.robot.get_wrist_wrench()
self.final_states.append(state)
def clear_final_states(self):
self.final_states = []
def calc_average_obj_in_palm(self):
assert not self.has_test_phase
count = len(self.final_states)
o_pos_hf_sum = np.array([0., 0, 0])
o_quat_hf_sum = np.array([0., 0, 0, 0])
for state_dict in self.final_states:
o_pos_hf_sum += np.array(state_dict['obj_pos_in_palm'])
o_quat_hf_sum += np.array(state_dict['obj_quat_in_palm'])
o_pos_hf_sum /= count
o_quat_hf_sum /= count # rough estimate of quat average
o_quat_hf_sum /= np.linalg.norm(o_quat_hf_sum) # normalize quat
return list(o_pos_hf_sum), list(o_quat_hf_sum)
def calc_average_obj_in_palm_rot_invariant(self):
assert not self.has_test_phase
count = len(self.final_states)
o_pos_hf_sum = np.array([0., 0, 0])
o_unitz_hf_sum = np.array([0., 0, 0])
for state_dict in self.final_states:
o_pos_hf_sum += np.array(state_dict['obj_pos_in_palm'])
unitz_hf = p.multiplyTransforms([0, 0, 0], state_dict['obj_quat_in_palm'], [0, 0, 1], [0, 0, 0, 1])[0]
o_unitz_hf_sum += np.array(unitz_hf)
o_pos_hf_sum /= count
o_unitz_hf_sum /= count # rough estimate of unit z average
o_unitz_hf_sum /= np.linalg.norm(o_unitz_hf_sum) # normalize unit z vector
x, y, z = o_unitz_hf_sum
a1_solved = np.arcsin(-y)
a2_solved = np.arctan2(x, z)
# a3_solved is zero since equation has under-determined
quat_solved = p.getQuaternionFromEuler([a1_solved, a2_solved, 0])
uz_check = p.multiplyTransforms([0, 0, 0], quat_solved, [0, 0, 1], [0, 0, 0, 1])[0]
assert np.linalg.norm(np.array(o_unitz_hf_sum) - np.array(uz_check)) < 1e-3
return list(o_pos_hf_sum), list(quat_solved)
def seed(self, seed=None):
self.np_random, seed = gym.utils.seeding.np_random(seed)
if self.robot is not None:
self.robot.np_random = self.np_random # use the same np_randomizer for robot as for env
return [seed]
def getSourceCode(self):
s = inspect.getsource(type(self))
s = s + inspect.getsource(type(self.robot))
return s
| [
"pybullet.resetSimulation",
"numpy.arctan2",
"pybullet.setCollisionFilterPair",
"pybullet.applyExternalForce",
"numpy.linalg.norm",
"pybullet.connect",
"gym.utils.seeding.np_random",
"pybullet.getQuaternionFromEuler",
"pybullet.getContactPoints",
"pybullet.getLinkState",
"pybullet.setGravity",
... | [((2875, 2951), 'numpy.array', 'np.array', (['([0.009 / self.control_skip] * 7 + [0.024 / self.control_skip] * 17)'], {}), '([0.009 / self.control_skip] * 7 + [0.024 / self.control_skip] * 17)\n', (2883, 2951), True, 'import numpy as np\n'), ((3035, 3085), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['utils.PALM_EULER_OF_INIT'], {}), '(utils.PALM_EULER_OF_INIT)\n', (3059, 3085), True, 'import pybullet as p\n'), ((3594, 3626), 'numpy.array', 'np.array', (['([1.12234567] * obs_dim)'], {}), '([1.12234567] * obs_dim)\n', (3602, 3626), True, 'import numpy as np\n'), ((3658, 3722), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(-np.inf * obs_dummy)', 'high': '(np.inf * obs_dummy)'}), '(low=-np.inf * obs_dummy, high=np.inf * obs_dummy)\n', (3672, 3722), False, 'import gym, gym.utils.seeding, gym.spaces\n'), ((5015, 5034), 'pybullet.resetSimulation', 'p.resetSimulation', ([], {}), '()\n', (5032, 5034), True, 'import pybullet as p\n'), ((5043, 5117), 'pybullet.setPhysicsEngineParameter', 'p.setPhysicsEngineParameter', ([], {'numSolverIterations': 'utils.BULLET_CONTACT_ITER'}), '(numSolverIterations=utils.BULLET_CONTACT_ITER)\n', (5070, 5117), True, 'import pybullet as p\n'), ((5126, 5155), 'pybullet.setTimeStep', 'p.setTimeStep', (['self._timeStep'], {}), '(self._timeStep)\n', (5139, 5155), True, 'import pybullet as p\n'), ((5164, 5187), 'pybullet.setGravity', 'p.setGravity', (['(0)', '(0)', '(-10)'], {}), '(0, 0, -10)\n', (5176, 5187), True, 'import pybullet as p\n'), ((7741, 7811), 'numpy.array', 'np.array', (["[self.tx_act, self.ty_act, self.tz_act + to['height'] / 2.0]"], {}), "([self.tx_act, self.ty_act, self.tz_act + to['height'] / 2.0])\n", (7749, 7811), True, 'import numpy as np\n'), ((8290, 8308), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (8306, 8308), True, 'import pybullet as p\n'), ((8420, 8446), 'numpy.array', 'np.array', (['self.observation'], {}), '(self.observation)\n', (8428, 8446), True, 'import numpy as np\n'), ((10055, 10106), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (["self.top_obj['id']"], {}), "(self.top_obj['id'])\n", (10086, 10106), True, 'import pybullet as p\n'), ((10131, 10167), 'numpy.array', 'np.array', (['[self.tx_act, self.ty_act]'], {}), '([self.tx_act, self.ty_act])\n', (10139, 10167), True, 'import numpy as np\n'), ((11757, 11836), 'pybullet.getContactPoints', 'p.getContactPoints', (["self.top_obj['id']", 'self.robot.arm_id', '(-1)', 'self.robot.ee_id'], {}), "(self.top_obj['id'], self.robot.arm_id, -1, self.robot.ee_id)\n", (11775, 11836), True, 'import pybullet as p\n'), ((15292, 15343), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (["self.top_obj['id']"], {}), "(self.top_obj['id'])\n", (15323, 15343), True, 'import pybullet as p\n'), ((15468, 15506), 'pybullet.invertTransform', 'p.invertTransform', (['hand_pos', 'hand_quat'], {}), '(hand_pos, hand_quat)\n', (15485, 15506), True, 'import pybullet as p\n'), ((15544, 15601), 'pybullet.multiplyTransforms', 'p.multiplyTransforms', (['inv_h_p', 'inv_h_q', 'obj_pos', 'obj_quat'], {}), '(inv_h_p, inv_h_q, obj_pos, obj_quat)\n', (15564, 15601), True, 'import pybullet as p\n'), ((16780, 16801), 'numpy.array', 'np.array', (['[0.0, 0, 0]'], {}), '([0.0, 0, 0])\n', (16788, 16801), True, 'import numpy as np\n'), ((16825, 16849), 'numpy.array', 'np.array', (['[0.0, 0, 0, 0]'], {}), '([0.0, 0, 0, 0])\n', (16833, 16849), True, 'import numpy as np\n'), ((17156, 17185), 'numpy.linalg.norm', 'np.linalg.norm', (['o_quat_hf_sum'], {}), '(o_quat_hf_sum)\n', (17170, 17185), True, 'import numpy as np\n'), ((17419, 17440), 'numpy.array', 'np.array', (['[0.0, 0, 0]'], {}), '([0.0, 0, 0])\n', (17427, 17440), True, 'import numpy as np\n'), ((17465, 17486), 'numpy.array', 'np.array', (['[0.0, 0, 0]'], {}), '([0.0, 0, 0])\n', (17473, 17486), True, 'import numpy as np\n'), ((17891, 17921), 'numpy.linalg.norm', 'np.linalg.norm', (['o_unitz_hf_sum'], {}), '(o_unitz_hf_sum)\n', (17905, 17921), True, 'import numpy as np\n'), ((18007, 18020), 'numpy.arcsin', 'np.arcsin', (['(-y)'], {}), '(-y)\n', (18016, 18020), True, 'import numpy as np\n'), ((18041, 18057), 'numpy.arctan2', 'np.arctan2', (['x', 'z'], {}), '(x, z)\n', (18051, 18057), True, 'import numpy as np\n'), ((18144, 18195), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['[a1_solved, a2_solved, 0]'], {}), '([a1_solved, a2_solved, 0])\n', (18168, 18195), True, 'import pybullet as p\n'), ((18490, 18523), 'gym.utils.seeding.np_random', 'gym.utils.seeding.np_random', (['seed'], {}), '(seed)\n', (18517, 18523), False, 'import gym, gym.utils.seeding, gym.spaces\n'), ((265, 287), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (285, 287), False, 'import inspect\n'), ((2401, 2417), 'pybullet.connect', 'p.connect', (['p.GUI'], {}), '(p.GUI)\n', (2410, 2417), True, 'import pybullet as p\n'), ((2444, 2463), 'pybullet.connect', 'p.connect', (['p.DIRECT'], {}), '(p.DIRECT)\n', (2453, 2463), True, 'import pybullet as p\n'), ((9702, 9720), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (9718, 9720), True, 'import pybullet as p\n'), ((10638, 10695), 'pybullet.getLinkState', 'p.getLinkState', (['self.robot.arm_id', 'self.robot.fin_tips[4]'], {}), '(self.robot.arm_id, self.robot.fin_tips[4])\n', (10652, 10695), True, 'import pybullet as p\n'), ((10835, 10886), 'pybullet.getLinkState', 'p.getLinkState', (['self.robot.arm_id', 'self.robot.ee_id'], {}), '(self.robot.arm_id, self.robot.ee_id)\n', (10849, 10886), True, 'import pybullet as p\n'), ((13336, 13369), 'pybullet.getNumJoints', 'p.getNumJoints', (['self.robot.arm_id'], {}), '(self.robot.arm_id)\n', (13350, 13369), True, 'import pybullet as p\n'), ((13390, 13447), 'pybullet.getContactPoints', 'p.getContactPoints', ([], {'bodyA': 'self.robot.arm_id', 'linkIndexA': 'i'}), '(bodyA=self.robot.arm_id, linkIndexA=i)\n', (13408, 13447), True, 'import pybullet as p\n'), ((13877, 13914), 'numpy.array', 'np.array', (['[self.tx, self.ty, self.tz]'], {}), '([self.tx, self.ty, self.tz])\n', (13885, 13914), True, 'import numpy as np\n'), ((14296, 14324), 'numpy.array', 'np.array', (['[self.tx, self.ty]'], {}), '([self.tx, self.ty])\n', (14304, 14324), True, 'import numpy as np\n'), ((15633, 15697), 'pybullet.multiplyTransforms', 'p.multiplyTransforms', (['[0, 0, 0]', 'o_q_hf', '[0, 0, 1]', '[0, 0, 0, 1]'], {}), '([0, 0, 0], o_q_hf, [0, 0, 1], [0, 0, 0, 1])\n', (15653, 15697), True, 'import pybullet as p\n'), ((16922, 16961), 'numpy.array', 'np.array', (["state_dict['obj_pos_in_palm']"], {}), "(state_dict['obj_pos_in_palm'])\n", (16930, 16961), True, 'import numpy as np\n'), ((16991, 17031), 'numpy.array', 'np.array', (["state_dict['obj_quat_in_palm']"], {}), "(state_dict['obj_quat_in_palm'])\n", (16999, 17031), True, 'import numpy as np\n'), ((17559, 17598), 'numpy.array', 'np.array', (["state_dict['obj_pos_in_palm']"], {}), "(state_dict['obj_pos_in_palm'])\n", (17567, 17598), True, 'import numpy as np\n'), ((17744, 17762), 'numpy.array', 'np.array', (['unitz_hf'], {}), '(unitz_hf)\n', (17752, 17762), True, 'import numpy as np\n'), ((18216, 18285), 'pybullet.multiplyTransforms', 'p.multiplyTransforms', (['[0, 0, 0]', 'quat_solved', '[0, 0, 1]', '[0, 0, 0, 1]'], {}), '([0, 0, 0], quat_solved, [0, 0, 1], [0, 0, 0, 1])\n', (18236, 18285), True, 'import pybullet as p\n'), ((3473, 3502), 'numpy.array', 'np.array', (['([-1.0] * action_dim)'], {}), '([-1.0] * action_dim)\n', (3481, 3502), True, 'import numpy as np\n'), ((3506, 3535), 'numpy.array', 'np.array', (['([+1.0] * action_dim)'], {}), '([+1.0] * action_dim)\n', (3514, 3535), True, 'import numpy as np\n'), ((6729, 6871), 'pybullet.createConstraint', 'p.createConstraint', (["bo['id']", '(-1)', '(-1)', '(-1)', 'p.JOINT_FIXED', '[0, 0, 0]', '[0, 0, 0]'], {'childFramePosition': 'btm_xyz', 'childFrameOrientation': 'btm_quat'}), "(bo['id'], -1, -1, -1, p.JOINT_FIXED, [0, 0, 0], [0, 0, 0\n ], childFramePosition=btm_xyz, childFrameOrientation=btm_quat)\n", (6747, 6871), True, 'import pybullet as p\n'), ((8924, 9010), 'pybullet.setCollisionFilterPair', 'p.setCollisionFilterPair', (["self.top_obj['id']", 'bottom_id', '(-1)', '(-1)'], {'enableCollision': '(0)'}), "(self.top_obj['id'], bottom_id, -1, -1,\n enableCollision=0)\n", (8948, 9010), True, 'import pybullet as p\n'), ((9033, 9084), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (["self.top_obj['id']"], {}), "(self.top_obj['id'])\n", (9064, 9084), True, 'import pybullet as p\n'), ((9115, 9149), 'pybullet.invertTransform', 'p.invertTransform', (['[0, 0, 0]', 'quat'], {}), '([0, 0, 0], quat)\n', (9132, 9149), True, 'import pybullet as p\n'), ((9183, 9257), 'pybullet.multiplyTransforms', 'p.multiplyTransforms', (['[0, 0, 0]', 'quat_inv', 'self.force_global', '[0, 0, 0, 1]'], {}), '([0, 0, 0], quat_inv, self.force_global, [0, 0, 0, 1])\n', (9203, 9257), True, 'import pybullet as p\n'), ((9274, 9367), 'pybullet.applyExternalForce', 'p.applyExternalForce', (["self.top_obj['id']", '(-1)', 'force_local', '[0, 0, 0]'], {'flags': 'p.LINK_FRAME'}), "(self.top_obj['id'], -1, force_local, [0, 0, 0], flags=\n p.LINK_FRAME)\n", (9294, 9367), True, 'import pybullet as p\n'), ((9766, 9798), 'time.sleep', 'time.sleep', (['(self._timeStep * 0.5)'], {}), '(self._timeStep * 0.5)\n', (9776, 9798), False, 'import time\n'), ((10216, 10237), 'numpy.array', 'np.array', (['top_pos[:2]'], {}), '(top_pos[:2])\n', (10224, 10237), True, 'import numpy as np\n'), ((10258, 10282), 'numpy.minimum', 'np.minimum', (['xy_dist', '(0.4)'], {}), '(xy_dist, 0.4)\n', (10268, 10282), True, 'import numpy as np\n'), ((10469, 10505), 'pybullet.getLinkState', 'p.getLinkState', (['self.robot.arm_id', 'i'], {}), '(self.robot.arm_id, i)\n', (10483, 10505), True, 'import pybullet as p\n'), ((12273, 12339), 'pybullet.getContactPoints', 'p.getContactPoints', (["self.top_obj['id']", 'self.robot.arm_id', '(-1)', 'dof'], {}), "(self.top_obj['id'], self.robot.arm_id, -1, dof)\n", (12291, 12339), True, 'import pybullet as p\n'), ((17622, 17714), 'pybullet.multiplyTransforms', 'p.multiplyTransforms', (['[0, 0, 0]', "state_dict['obj_quat_in_palm']", '[0, 0, 1]', '[0, 0, 0, 1]'], {}), "([0, 0, 0], state_dict['obj_quat_in_palm'], [0, 0, 1],\n [0, 0, 0, 1])\n", (17642, 17714), True, 'import pybullet as p\n'), ((10931, 10953), 'numpy.array', 'np.array', (['palm_com_pos'], {}), '(palm_com_pos)\n', (10939, 10953), True, 'import numpy as np\n'), ((10956, 10973), 'numpy.array', 'np.array', (['top_pos'], {}), '(top_pos)\n', (10964, 10973), True, 'import numpy as np\n'), ((18319, 18343), 'numpy.array', 'np.array', (['o_unitz_hf_sum'], {}), '(o_unitz_hf_sum)\n', (18327, 18343), True, 'import numpy as np\n'), ((18346, 18364), 'numpy.array', 'np.array', (['uz_check'], {}), '(uz_check)\n', (18354, 18364), True, 'import numpy as np\n'), ((10558, 10575), 'numpy.array', 'np.array', (['tip_pos'], {}), '(tip_pos)\n', (10566, 10575), True, 'import numpy as np\n'), ((10578, 10595), 'numpy.array', 'np.array', (['top_pos'], {}), '(top_pos)\n', (10586, 10595), True, 'import numpy as np\n'), ((10761, 10778), 'numpy.array', 'np.array', (['tip_pos'], {}), '(tip_pos)\n', (10769, 10778), True, 'import numpy as np\n'), ((10781, 10798), 'numpy.array', 'np.array', (['top_pos'], {}), '(top_pos)\n', (10789, 10798), True, 'import numpy as np\n')] |
# Copyright (C) 2016 <NAME>
#
# This file is part of GM81.
#
# GM81 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GM81 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GM81. If not, see <http://www.gnu.org/licenses/>.
# This module implemets the empirical spectrum of internal waves developed by
# Garrett and Munk, in the incarnation presented in Munk's chapter in Evolution
# of Physical Oceanography, which can be downloaded here:
# http://ocw.mit.edu/resources/res-12-000-evolution-of-physical-oceanography-spring-2007/part-2/wunsch_chapter9.pdf
# The variable names follow Munk's notation.
import numpy as np
# energy parameter
E = 6.3e-5
# j_*
js = 3
# sum_1^infty (j^2+j_*^2)^-1
jsum = (np.pi*js/np.tanh(np.pi*js)-1)/(2*js**2)
# gravitational acceleration
g = 9.81
def omg_k_j(k, j, f, N0, b):
# frequency omega as a function of hor. wavenumber k and mode number j
return np.sqrt((N0**2*k**2+f**2*(np.pi*j/b)**2)/(k**2+(np.pi*j/b)**2))
def k_omg_j(omg, j, f, N0, b):
# hor. wavenumber as a function of frequency omg and mode number j
return np.sqrt((omg**2-f**2)/(N0**2-omg**2))*np.pi*j/b
def B(omg, f):
# Munk's B(omg) describing the frequency distribution
return 2/np.pi*f/omg/np.sqrt(omg**2-f**2)
def H(j):
# Munk's H(j) describing the mode distribution
return 1./(j**2+js**2)/jsum
def E_omg_j(omg, j, f):
# Munk's E(omg,j)
return B(omg, f)*H(j)*E
def E_k_j(k, j, f, N, N0, b):
# Munk's E(omg,j) transformed into hor. wavenumber space:
# E(k,j) = E(omg,j) domg/dk. The transformation is achieved using the
# dispersion relation (9.23a) in Munk (1981).
omg = omg_k_j(k, j, f, N0, b)
domgdk = (N0**2-omg**2)/omg*k/(k**2+(np.pi*j/b)**2)
return E_omg_j(omg, j, f)*domgdk
def P_k_j(k, j, f, N, N0, b):
# potential energy spectrum (N^2 times displacement spectrum) as a function
# of hor. wavenumber k and mode number j
omg = omg_k_j(k, j, f, N0, b)
return b**2*N0*N*(omg**2-f**2)/omg**2*E_k_j(k, j, f, N, N0, b)
def K_k_j(k, j, f, N, N0, b):
# kinetic energy spectrum as a function of hor. wavenumber k and mode
# number j
omg = omg_k_j(k, j, f, N0, b)
return b**2*N0*N*(omg**2+f**2)/omg**2*E_k_j(k, j, f, N, N0, b)
def eta_k_j(k, j, f, N, N0, b):
# SSH spectrum as a function of hor. wavenumber k and mode number j
omg = omg_k_j(k, j, f, N0, b)
return (omg**2-f**2)**2/(f**2*(omg**2+f**2))*K_k_j(k, j, f, N, N0, b)/k**2*f**2/g**2
def P_omg_j(omg, j, f, N, N0, b):
# potential energy spectrum (N^2 times displacement spectrum) as a function
# of frequency omg and mode number j
return b**2*N0*N*(omg**2-f**2)/omg**2*E_omg_j(omg, j, f)
def K_omg_j(omg, j, f, N, N0, b):
# kinetic energy spectrum as a function of frequency omg and mode number j
return b**2*N0*N*(omg**2+f**2)/omg**2*E_omg_j(omg, j, f)
def eta_omg_j(omg, j, f, N, N0, b):
# SSH spectrum as a function of frequency omg and mode number j
k = k_omg_j(omg, j, f, N0, b)
return (omg**2-f**2)**2/(f**2*(omg**2+f**2))*K_omg_j(omg, j, f, N, N0, b)/k**2*f**2/g**2
def sqrt_trapz(kh, S):
# integrate S/sqrt(kh^2-k^2) over all kh, approximating S as piecewise
# linear but then performing the integration exactly
a = kh[:-1]
b = kh[1:]
A = S[:-1]
B = S[1:]
k = kh[0]
return np.sum(((A-B)*(np.sqrt(a**2-k**2)-np.sqrt(b**2-k**2))+(a*B-b*A)*np.log((a+np.sqrt(a**2-k**2))/(b+np.sqrt(b**2-k**2))))/(b-a))
def calc_1d(k, S):
# calculate 1D wavenumber spectrum from 2D isotropic wavenumber spectrum:
# S1d = 2/pi int_k^infty S2d/sqrt(kh^2-k^2) dkh
# (The normalization is such that int_0^infty S1d dk = int_0^infty S2d dkh.)
S1d = np.empty(k.size)
for i in range(k.size):
S1d[i] = 2/np.pi*sqrt_trapz(k[i:], S[i:])
return S1d
| [
"numpy.empty",
"numpy.tanh",
"numpy.sqrt"
] | [((1345, 1443), 'numpy.sqrt', 'np.sqrt', (['((N0 ** 2 * k ** 2 + f ** 2 * (np.pi * j / b) ** 2) / (k ** 2 + (np.pi * j /\n b) ** 2))'], {}), '((N0 ** 2 * k ** 2 + f ** 2 * (np.pi * j / b) ** 2) / (k ** 2 + (np.\n pi * j / b) ** 2))\n', (1352, 1443), True, 'import numpy as np\n'), ((4142, 4158), 'numpy.empty', 'np.empty', (['k.size'], {}), '(k.size)\n', (4150, 4158), True, 'import numpy as np\n'), ((1670, 1696), 'numpy.sqrt', 'np.sqrt', (['(omg ** 2 - f ** 2)'], {}), '(omg ** 2 - f ** 2)\n', (1677, 1696), True, 'import numpy as np\n'), ((1159, 1178), 'numpy.tanh', 'np.tanh', (['(np.pi * js)'], {}), '(np.pi * js)\n', (1166, 1178), True, 'import numpy as np\n'), ((1523, 1574), 'numpy.sqrt', 'np.sqrt', (['((omg ** 2 - f ** 2) / (N0 ** 2 - omg ** 2))'], {}), '((omg ** 2 - f ** 2) / (N0 ** 2 - omg ** 2))\n', (1530, 1574), True, 'import numpy as np\n'), ((3790, 3814), 'numpy.sqrt', 'np.sqrt', (['(a ** 2 - k ** 2)'], {}), '(a ** 2 - k ** 2)\n', (3797, 3814), True, 'import numpy as np\n'), ((3809, 3833), 'numpy.sqrt', 'np.sqrt', (['(b ** 2 - k ** 2)'], {}), '(b ** 2 - k ** 2)\n', (3816, 3833), True, 'import numpy as np\n'), ((3849, 3873), 'numpy.sqrt', 'np.sqrt', (['(a ** 2 - k ** 2)'], {}), '(a ** 2 - k ** 2)\n', (3856, 3873), True, 'import numpy as np\n'), ((3872, 3896), 'numpy.sqrt', 'np.sqrt', (['(b ** 2 - k ** 2)'], {}), '(b ** 2 - k ** 2)\n', (3879, 3896), True, 'import numpy as np\n')] |
import numpy as np
from transforms.gaf import GAF
from models.cnn import CNN
class CNNTimeSeries(CNN):
def __init__(self, image_shape, prediction_shape, batch_size, extremes=[None, None]):
super().__init__(image_shape, prediction_shape)
self.batch_size = batch_size
self.extremes = extremes
self.dataX = []
self.dataY = []
self.isReadyToTrain = False
def _preprocess(self, sequence):
gaf = GAF(sequence, self.extremes)
self.extremes = gaf.extremes
return [gaf.encoded, gaf.series[0]]
def record(self, sequence):
preprocessed = self._preprocess(sequence)
if len(self.dataX) > 0:
# if any input (GAF image) exists in `dataX`, we assume that its
# corresponding output (float) is the first value in `sequence`
self.dataY.append(preprocessed[1])
# append a new GAF image to `dataX`
self.dataX.append(preprocessed[0])
length = len(self.dataX)
if length > self.batch_size:
# if we've stored more than 1 batch's worth of images,
# remove the oldest one
self.dataX.pop(0)
# if `isReadyToTrain` is already true, self must be the second
# time we've run through self conditional, meaning `dataY`
# is ready to be shifted as well
if self.isReadyToTrain:
self.dataY.pop(0)
else:
self.isReadyToTrain = True
def train(self):
if self.isReadyToTrain:
x = np.concatenate(self.dataX)
x = np.expand_dims(x, -1)
y = np.array(self.dataY)
return self.model.train_on_batch(x, y)
def predict_next_value_in(self, sequence):
sequence = np.expand_dims(self._preprocess(sequence), [0, -1])
prediction = self.model.predict(sequence)
return prediction * self.extremes[1] + self.extremes[0]
def predict_from_record(self):
sequence = np.expand_dims(self._preprocess(self.dataX[-1]), [0, -1])
prediction = self.model.predict(sequence)
return prediction * self.extremes[1] + self.extremes[0]
| [
"transforms.gaf.GAF",
"numpy.array",
"numpy.expand_dims",
"numpy.concatenate"
] | [((458, 486), 'transforms.gaf.GAF', 'GAF', (['sequence', 'self.extremes'], {}), '(sequence, self.extremes)\n', (461, 486), False, 'from transforms.gaf import GAF\n'), ((1566, 1592), 'numpy.concatenate', 'np.concatenate', (['self.dataX'], {}), '(self.dataX)\n', (1580, 1592), True, 'import numpy as np\n'), ((1609, 1630), 'numpy.expand_dims', 'np.expand_dims', (['x', '(-1)'], {}), '(x, -1)\n', (1623, 1630), True, 'import numpy as np\n'), ((1647, 1667), 'numpy.array', 'np.array', (['self.dataY'], {}), '(self.dataY)\n', (1655, 1667), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import PolynomialFeatures
import matplotlib.pyplot as plt
import matplotlib.colors as mc
import random
import colorsys
from utils import *
import time
import os
randomState=42
data = np.genfromtxt("data/SUSY.csv", delimiter=',')
X = data[:,1:]
y = data[:,0]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=randomState)
rounds = 500
b = 1
bavg = 50
m = 441
n_local = 2
random.seed(randomState)
rng = np.random.RandomState(randomState)
name = "RadonOnly"
#set up a folder for logging
exp_path = name + "_" + time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime(time.time()))
os.mkdir(exp_path)
#log basic experiment properties
f = open(exp_path+"/setup.txt",'w')
out = "aggregator = RadonPoint \n"
out += "m = "+str(m)+"\n n_local = "+str(n_local)+"\n"
out += "d = "+str(b)+"\n b = "+str(bavg)+"\n"
out += "rounds = "+str(rounds)+"\n"
out += "model = SGDClassifier(alpha=0.0001, random_state = rng, learning_rate='adaptive', eta0=0.01, early_stopping=False)\n"
out += "randomState = "+str(randomState)+"\n"
f.write(out)
f.close()
local_Xtrains, local_ytrains = splitIntoLocalData(X_train, y_train, m, n_local, rng)
print("Starting...")
localModels = np.array([SGDClassifier(alpha=0.0001, random_state = rng, learning_rate='adaptive', eta0=0.01, early_stopping=False) for _ in range(m)])
m_rad, trainACCs_rad, testACCs_rad = runRadonPoint(local_Xtrains, local_ytrains, localModels, X_test, y_test, rounds, rng, b=bavg, exp_path = exp_path)
print("Radon Only done.")
pickle.dump(m_rad, open(os.path.join(exp_path, "finalModel.pck"),'wb'))
pickle.dump(trainACCs_rad, open(os.path.join(exp_path, "trainACC.pck"),'wb'))
pickle.dump(testACCs_rad, open(os.path.join(exp_path, "testACC.pck"),'wb'))
plotResults(trainACCs_rad, testACCs_rad, 'Radon point')
| [
"os.mkdir",
"sklearn.linear_model.SGDClassifier",
"sklearn.model_selection.train_test_split",
"numpy.genfromtxt",
"numpy.random.RandomState",
"time.time",
"random.seed",
"os.path.join"
] | [((428, 473), 'numpy.genfromtxt', 'np.genfromtxt', (['"""data/SUSY.csv"""'], {'delimiter': '""","""'}), "('data/SUSY.csv', delimiter=',')\n", (441, 473), True, 'import numpy as np\n'), ((539, 602), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': 'randomState'}), '(X, y, test_size=0.2, random_state=randomState)\n', (555, 602), False, 'from sklearn.model_selection import train_test_split\n'), ((654, 678), 'random.seed', 'random.seed', (['randomState'], {}), '(randomState)\n', (665, 678), False, 'import random\n'), ((685, 719), 'numpy.random.RandomState', 'np.random.RandomState', (['randomState'], {}), '(randomState)\n', (706, 719), True, 'import numpy as np\n'), ((857, 875), 'os.mkdir', 'os.mkdir', (['exp_path'], {}), '(exp_path)\n', (865, 875), False, 'import os\n'), ((1446, 1554), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'alpha': '(0.0001)', 'random_state': 'rng', 'learning_rate': '"""adaptive"""', 'eta0': '(0.01)', 'early_stopping': '(False)'}), "(alpha=0.0001, random_state=rng, learning_rate='adaptive',\n eta0=0.01, early_stopping=False)\n", (1459, 1554), False, 'from sklearn.linear_model import SGDClassifier\n'), ((1776, 1816), 'os.path.join', 'os.path.join', (['exp_path', '"""finalModel.pck"""'], {}), "(exp_path, 'finalModel.pck')\n", (1788, 1816), False, 'import os\n'), ((1856, 1894), 'os.path.join', 'os.path.join', (['exp_path', '"""trainACC.pck"""'], {}), "(exp_path, 'trainACC.pck')\n", (1868, 1894), False, 'import os\n'), ((1933, 1970), 'os.path.join', 'os.path.join', (['exp_path', '"""testACC.pck"""'], {}), "(exp_path, 'testACC.pck')\n", (1945, 1970), False, 'import os\n'), ((843, 854), 'time.time', 'time.time', ([], {}), '()\n', (852, 854), False, 'import time\n')] |
# https://towardsdatascience.com/6-steps-to-write-any-machine-learning-algorithm-from-scratch-perceptron-case-study-335f638a70f3
# Importing libraries
# NAND Gate
# Note: x0 is a dummy variable for the bias term
# x0 x1 x2
import numpy as np
x = [[1., 0., 0.],
[1., 0., 1.],
[1., 1., 0.],
[1., 1., 1.]]
y =[1.,
1.,
1.,
0.]
w = np.zeros(len(x[0]))
# Activation Function
z = 0.0
# Dot Product
f = np.dot(w, x[0])
if f > z:
yhat = 1.
else:
yhat = 0.
eta = 0.1
w[0] = w[0] + eta*(y[0] - yhat)*x[0][0]
w[1] = w[1] + eta*(y[0] - yhat)*x[0][1]
w[2] = w[2] + eta*(y[0] - yhat)*x[0][2]
print(w[0])
# print(x[0])
# print(y[0])
# print(w[0])
# print(y[0])
# print(x[0][0])
# print(x[0][1])
# print(x[0][2])
# print(len(y))
# print(len(x))
# import numpy as np
# # Perceptron function
# def perceptron(x, y, z, eta, t):
# '''
# Input Parameters:
# x: data set of input features
# y: actual outputs
# z: activation function threshold
# eta: learning rate
# t: number of iterations
# '''
# # initializing the weights
# w = np.zeros(len(x[0]))
# n = 0
# # initializing additional parameters to compute sum-of-squared errors
# yhat_vec = np.ones(len(y)) # vector for predictions
# errors = np.ones(len(y)) # vector for errors (actual - predictions)
# J = [] # vector for the SSE cost function
# while n < t: for i in xrange(0, len(x)): # dot product f = np.dot(x[i], w) # activation function if f >= z:
# yhat = 1.
# else:
# yhat = 0.
# yhat_vec[i] = yhat
# # updating the weights
# for j in xrange(0, len(w)):
# w[j] = w[j] + eta*(y[i]-yhat)*x[i][j]
# n += 1
# # computing the sum-of-squared errors
# for i in xrange(0,len(y)):
# errors[i] = (y[i]-yhat_vec[i])**2
# J.append(0.5*np.sum(errors))
# return w, J | [
"numpy.dot"
] | [((428, 443), 'numpy.dot', 'np.dot', (['w', 'x[0]'], {}), '(w, x[0])\n', (434, 443), True, 'import numpy as np\n')] |
#! /usr/bin/env python
import numpy as np
simples = [1,0]
def sigmd(x):
return ( 1 / ( 1 + np.exp(-x) ) )
def dsigmd(x):
return (sigmd(x)*( 1- sigmd(x)))
#
#Cross-entropy loss
#
def loss(y_lab,f):
return (y_lab*np.log(f) + (1-y_lab)*np.log(1 - f))*(-1)
def nn(w,b,x):
return sigmd(w*x+b)
def dw(x_lab,f,y_lab):
return x_lab*(f - y_lab)
def db(f,y_lab):
return (f - y_lab)
w = 1.0
b = 2.0
V_w_1 = 0.0
V_b_1 = 0.0
for i in range(300):
d_w = 0.0
d_b = 0.0
for j in range(2):
x_label = j
y_label = simples[j]
tmp_w = w - 0.9 * V_w_1
tmp_b = b - 0.9 * V_b_1
forward_nn = nn(tmp_w,tmp_b,x_label)
l = loss(y_label,forward_nn)
d_w += dw(x_label,forward_nn,y_label)
d_b += db(forward_nn,y_label)
V_w = 0.9 * V_w_1 + 0.15 * (d_w / 20)
w = w - V_w
V_b = 0.9 * V_b_1 + 0.15 * (d_b / 20)
b = b - V_b
V_w_1 = V_w
V_b_1 = V_b
if((i%10) == 0):
print('Now w: %f, b: %f,loss: %f' % (w,b,l))
print('verify 0: %f,1: %f' % (nn(w,b,0),nn(w,b,1)))
| [
"numpy.log",
"numpy.exp"
] | [((98, 108), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (104, 108), True, 'import numpy as np\n'), ((229, 238), 'numpy.log', 'np.log', (['f'], {}), '(f)\n', (235, 238), True, 'import numpy as np\n'), ((251, 264), 'numpy.log', 'np.log', (['(1 - f)'], {}), '(1 - f)\n', (257, 264), True, 'import numpy as np\n')] |
"""
Code to sample from the latent space and generate the corresponding audio. The input is the conditional parameter pitch.
"""
import torch
from torchvision import transforms
from torchvision.datasets import MNIST
from torch.utils.data import DataLoader
import numpy as np
import matplotlib.pyplot as pyp
from vae_krishna import *
import sys
from dataset import *
import glob
import ast
sys.path.append('../extra_dependencies/models/')
from hprModel import hprModelAnal,hprModelSynth
from sineModel import sineModelAnal,sineModelSynth
from essentia.standard import MonoLoader
from scipy.io.wavfile import write
from scipy.signal import windows
from scipy import interpolate
import pickle
import os
# Importing our model
from simple_ae import *
import librosa
import sampling_synth as ss
# -----------------------------------------------CVAE----------------------------------------------------------
# Provide directory where CVAE network outputs are stored
# Load the parameters from the strig of the .pth file
dir_files = './CVAEparams/'
list_pth_files = glob.glob(dir_files + '*.pth')
# Display the available parameter files
print('Available state files to load are(for cVAE) : ')
for it,i in enumerate(list_pth_files):
print(it,i.split('/')[-1][:-4])
# Choose the .pth file
idx = (int)(input('Choose the input index'))
# print(list_pth_files[idx])
# Load the parameters into a list
list_params = ast.literal_eval(list_pth_files[idx].split('_')[1])
file_load_VAE = list_pth_files[idx]
# list params, make the model and load the wights from the .pth file
# Fix device here(currently cpu)
device = 'cpu'
# device = 'cuda'
# Defining the model architecture
dim_cc = list_params[0]
flag_cond = list_params[1]
layer_dims_enc = list_params[2]
latent_dims = list_params[3]
layer_dims_dec = list_params[4]
num_cond = list_params[5]
cVAE = cVAE_synth(flag_cond = flag_cond, layer_dims_enc = layer_dims_enc, layer_dims_dec = layer_dims_dec, latent_dims = latent_dims, num_cond = num_cond, device = device)
cVAE.load_state_dict(torch.load(file_load_VAE,map_location = 'cpu'))
# Provide the parameters for the Generation/Synthesis here
params = {}
params['fs'] = 48000
params['W'] = 1024
params['N'] = 2048
params['H'] = 256
params['t'] = -120
params['maxnSines'] = 100
params['nH'] = 100
params_ceps = {}
params_ceps['thresh'] = 0.1
params_ceps['num_iters'] = 10000
octave = 4
# Dictionary to map frequencies
dict_fmap = {}
start_frequency = octave*55
step = 2**(1.0/12)
dict_fmap['C'] = (step**(3))*start_frequency
dict_fmap['C#'] = step*dict_fmap['C']
dict_fmap['D'] = step*dict_fmap['C#']
dict_fmap['D#'] = step*dict_fmap['D']
dict_fmap['E'] = step*dict_fmap['D#']
dict_fmap['F'] = step*dict_fmap['E']
dict_fmap['F#'] = step*dict_fmap['F']
dict_fmap['G'] = step*dict_fmap['F#']
dict_fmap['G#'] = step*dict_fmap['G']
dict_fmap['A'] = step*dict_fmap['G#']
dict_fmap['A#'] = step*dict_fmap['A']
dict_fmap['B'] = step*dict_fmap['A#']
# Provide the directory to store the network generated audio
dir_gen_audio = './dir_netgen_audio/'
try:
os.makedirs(dir_gen_audio, exist_ok = True)
print("Directory '%s' created successfully" %dir_gen_audio)
except OSError as error:
print("Directory '%s' exists")
# Dimensionality of latent space
ld = latent_dims
# Specify the duration of the vibrato note in seconds
dur_n = 3
nf = (int)(params['fs']/(params['H'])*dur_n)
"""
You can take the pitch inputs in two ways (setting the choice variable to '0' or '1')
choice = 0:
Here, you can manually input the pitch contour. Just specify the start and end frequencies. A matplotlib plot will pop, asking you to click at points.
Each point you click is a (pitch,time) pair, and the more points you add, the finer the sampling. The pitch contour will be formed by interpolating appropriately
Once you have specified the contour, close the matplotlib popup window.
choice = 1:
Here, a single note with vibrato is generated. You can specify the vibrato parameters as needed.
An audio file will be saved in the specified directory
"""
choice = 0
if(choice == 0):
# ______________________________________________________________________________________________________________________________________
# Choice = 0;
# Obtaining the Pitch Contour by drawing on matplotlib
# Obtaining the Contour by passing (pitch,time) coordinates and linearly interpolating the frequencies in between
# Starting Frequency (Specify)
f_start = 250
# Ending frequency
f_end = 500
lp_x = []
lp_y = []
class LineBuilder:
def __init__(self, line):
self.line = line
self.xs = list(line.get_xdata())
self.ys = list(line.get_ydata())
self.cid = line.figure.canvas.mpl_connect('button_press_event', self)
def __call__(self, event):
print('click', event)
if event.inaxes!=self.line.axes: return
lp_x.append(event.xdata)
lp_y.append(event.ydata)
# print(list_points_clicked)
self.xs.append(event.xdata)
self.ys.append(event.ydata)
self.line.set_data(self.xs, self.ys)
self.line.figure.canvas.draw()
fig = pyp.figure()
ax = fig.add_subplot(111)
ax.set_title('click to select the pitch points (they will be linearly interpolated')
pyp.ylim(f_start,f_end)
pyp.xlim(0,dur_n)
line, = ax.plot([0], [f_start]) # empty line
linebuilder = LineBuilder(line)
pyp.show()
# Specify array containing the time instants and pitches
# The pitch contour will be formed by linearly interpolating
# array_time_instants = np.array([0.5,1.1,2.3,2.5,2.8])
# array_frequencies = np.array([260,290,250,350,400])
array_time_instants = np.array(lp_x)
array_frequencies = np.array(lp_y)
num_points = array_frequencies.shape[0]
# Append the start and end frequencies to the main frequency array. Do same with time(start -> 0 and stop-> duration specified)
array_frequencies = np.insert(array_frequencies,[0,num_points],[f_start,f_end])
array_time_instants = np.insert(array_time_instants,[0,num_points],[0,dur_n])
# print(array_frequencies)
# print(array_time_instants)
#Assuming that spacing between all frequencies is uniform (i.e. more the frequencies specified, more dense the sampling)
# nbf = (int)(nf/num_points)
fcontour_Hz = np.zeros(nf)
for i in range(0,len(array_frequencies) - 1):
s = array_time_instants[i]
e = array_time_instants[i+1]
# print(s,e)
s = (int)((s/dur_n)*nf)
e = (int)((e/dur_n)*nf)
nbf = (e - s)
# print(s,e)
fr = np.linspace(array_frequencies[i],array_frequencies[i+1],nbf)
fcontour_Hz[s:e] = fr
# print(fcontour_Hz)
else:
# ____________________________________________________________________________________________________________________________________
# Choice = 1;
# Generating a note with Vibrato (Frequency Modulation)
# Vibrato pitch contour in Hz
# Center Frequency in MIDI
p = 69
# Obtain f_c by converting the pitch from MIDI to Hz
f_Hz = 440*2**((p-69)/12)
# Vibrato depth(1-2% of f_c)
Av = 0.04*f_Hz
# Vibrato frequency(generally 5-10 Hz)
fV_act = 6
# Sub/sampling the frequency according to the Hop Size
f_v = 2*np.pi*((fV_act*params['H'])/(params['fs']))
# Forming the contour
# The note will begin with a sustain pitch, and then transition into a vibrato
# Specify the fraction of time the note will remain in sustain
frac_sus = 0.25
fcontour_Hz =np.concatenate((f_Hz*np.ones((int)(nf*frac_sus) + 1),f_Hz + Av*np.sin(np.arange((int)((1-frac_sus)*nf))*f_v)))
# Once the pitch contour is obtained, generate the sound by sampling and providing the contour as a conditional parameter.
# Convert from Hz to MIDI frequency
pch = (69 + 12*np.log2(fcontour_Hz/440))
# Obtain a trajectory in the latent space using a random walk
z_ss = 0.001*ss.rand_walk(np.zeros(ld), 0.00001, nf)
z_ss1 = torch.FloatTensor(z_ss.T)
cond_inp = torch.FloatTensor(pch)
cond_inp = cond_inp.float()/127
# print(z_ss1.shape,cond_inp.shape)
# Sample from the CVAE latent space
s_z_X = cVAE.sample_latent_space(z_ss1,cond_inp.view(-1,1))
cc_network = s_z_X.data.numpy().squeeze()
# Obtain the audio by sampling the spectral envelope at the specified pitch values
a_gen_cVAE = ss.recon_samples_ls(matrix_ceps_coeffs = cc_network.T, midi_pitch = fcontour_Hz, params = params,choice_f = 1)
write(filename = dir_gen_audio + 'gensynth_cVAE.wav', rate = params['fs'], data = a_gen_cVAE.astype('float32')) | [
"sys.path.append",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"os.makedirs",
"matplotlib.pyplot.ylim",
"numpy.log2",
"torch.load",
"torch.FloatTensor",
"numpy.zeros",
"numpy.insert",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.linspace",
"glob.glob",
"sampling_synth.recon_... | [((390, 438), 'sys.path.append', 'sys.path.append', (['"""../extra_dependencies/models/"""'], {}), "('../extra_dependencies/models/')\n", (405, 438), False, 'import sys\n'), ((1060, 1090), 'glob.glob', 'glob.glob', (["(dir_files + '*.pth')"], {}), "(dir_files + '*.pth')\n", (1069, 1090), False, 'import glob\n'), ((7794, 7819), 'torch.FloatTensor', 'torch.FloatTensor', (['z_ss.T'], {}), '(z_ss.T)\n', (7811, 7819), False, 'import torch\n'), ((7831, 7853), 'torch.FloatTensor', 'torch.FloatTensor', (['pch'], {}), '(pch)\n', (7848, 7853), False, 'import torch\n'), ((8157, 8264), 'sampling_synth.recon_samples_ls', 'ss.recon_samples_ls', ([], {'matrix_ceps_coeffs': 'cc_network.T', 'midi_pitch': 'fcontour_Hz', 'params': 'params', 'choice_f': '(1)'}), '(matrix_ceps_coeffs=cc_network.T, midi_pitch=fcontour_Hz,\n params=params, choice_f=1)\n', (8176, 8264), True, 'import sampling_synth as ss\n'), ((2029, 2074), 'torch.load', 'torch.load', (['file_load_VAE'], {'map_location': '"""cpu"""'}), "(file_load_VAE, map_location='cpu')\n", (2039, 2074), False, 'import torch\n'), ((3047, 3088), 'os.makedirs', 'os.makedirs', (['dir_gen_audio'], {'exist_ok': '(True)'}), '(dir_gen_audio, exist_ok=True)\n', (3058, 3088), False, 'import os\n'), ((5124, 5136), 'matplotlib.pyplot.figure', 'pyp.figure', ([], {}), '()\n', (5134, 5136), True, 'import matplotlib.pyplot as pyp\n'), ((5251, 5275), 'matplotlib.pyplot.ylim', 'pyp.ylim', (['f_start', 'f_end'], {}), '(f_start, f_end)\n', (5259, 5275), True, 'import matplotlib.pyplot as pyp\n'), ((5276, 5294), 'matplotlib.pyplot.xlim', 'pyp.xlim', (['(0)', 'dur_n'], {}), '(0, dur_n)\n', (5284, 5294), True, 'import matplotlib.pyplot as pyp\n'), ((5375, 5385), 'matplotlib.pyplot.show', 'pyp.show', ([], {}), '()\n', (5383, 5385), True, 'import matplotlib.pyplot as pyp\n'), ((5644, 5658), 'numpy.array', 'np.array', (['lp_x'], {}), '(lp_x)\n', (5652, 5658), True, 'import numpy as np\n'), ((5680, 5694), 'numpy.array', 'np.array', (['lp_y'], {}), '(lp_y)\n', (5688, 5694), True, 'import numpy as np\n'), ((5888, 5951), 'numpy.insert', 'np.insert', (['array_frequencies', '[0, num_points]', '[f_start, f_end]'], {}), '(array_frequencies, [0, num_points], [f_start, f_end])\n', (5897, 5951), True, 'import numpy as np\n'), ((5971, 6030), 'numpy.insert', 'np.insert', (['array_time_instants', '[0, num_points]', '[0, dur_n]'], {}), '(array_time_instants, [0, num_points], [0, dur_n])\n', (5980, 6030), True, 'import numpy as np\n'), ((6252, 6264), 'numpy.zeros', 'np.zeros', (['nf'], {}), '(nf)\n', (6260, 6264), True, 'import numpy as np\n'), ((6478, 6542), 'numpy.linspace', 'np.linspace', (['array_frequencies[i]', 'array_frequencies[i + 1]', 'nbf'], {}), '(array_frequencies[i], array_frequencies[i + 1], nbf)\n', (6489, 6542), True, 'import numpy as np\n'), ((7644, 7670), 'numpy.log2', 'np.log2', (['(fcontour_Hz / 440)'], {}), '(fcontour_Hz / 440)\n', (7651, 7670), True, 'import numpy as np\n'), ((7759, 7771), 'numpy.zeros', 'np.zeros', (['ld'], {}), '(ld)\n', (7767, 7771), True, 'import numpy as np\n')] |
import ipyleaflet as ll
import numpy as np
import vaex.image
from .plot import BackendBase
import copy
from .utils import debounced
class IpyleafletBackend(BackendBase):
def __init__(self, map=None, center=[53.3082834, 6.388399], zoom=12):
self.map = map
self._center = center
self._zoom = zoom
self.last_image_layer = None
def create_widget(self, output, plot, dataset, limits):
self.plot = plot
self.dataset = dataset
self.output = output
self.limits = np.array(limits)[:2].tolist()
if self.map is None:
(xmin, xmax), (ymin, ymax) = limits[:2]
center = xmin + (xmax - xmin) / 2, ymin + (ymax - ymin) / 2
center = center[1], center[0]
self.map = ll.Map(center=center, zoom=self._zoom)
self.map.observe(self._update_limits, "north")
self.map.observe(self._update_limits, "east")
self.map.observe(self._update_limits, "south")
self.map.observe(self._update_limits, "west")
# self.map.bounds = self.limits
# self.limits = self.map.bounds[1], self.map.bounds[0] # np.array(limits).tolist()
# print(self.map.bounds, self.map.west)
# print(self.limits)
self.widget = self.map
def _update_limits(self, *args):
with self.output:
# self._progressbar.cancel()
limits = copy.deepcopy(self.limits)
limits[0] = (self.map.west, self.map.east)
limits[1] = (self.map.north, self.map.south)
self.limits = limits
@debounced(0.1, method=True)
def update_image(self, rgb_image):
with self.output:
if self.last_image_layer:
self.map.remove_layer(self.last_image_layer)
url = vaex.image.rgba_to_url(rgb_image[::-1, ::].copy())
image = ll.ImageOverlay(url=url, bounds=list(self.map.bounds))
# print("add ", self.limits, self.map.bounds)
self.map.add_layer(image)
self.last_image_layer = image
| [
"numpy.array",
"copy.deepcopy",
"ipyleaflet.Map"
] | [((778, 816), 'ipyleaflet.Map', 'll.Map', ([], {'center': 'center', 'zoom': 'self._zoom'}), '(center=center, zoom=self._zoom)\n', (784, 816), True, 'import ipyleaflet as ll\n'), ((1401, 1427), 'copy.deepcopy', 'copy.deepcopy', (['self.limits'], {}), '(self.limits)\n', (1414, 1427), False, 'import copy\n'), ((530, 546), 'numpy.array', 'np.array', (['limits'], {}), '(limits)\n', (538, 546), True, 'import numpy as np\n')] |
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import dash
from app import app
import numpy
import plotly.graph_objects as go # or plotly.express as px
from elasticsearch import Elasticsearch
port = 9200
index_name = "evm_tests"
server = "alpine"
es = Elasticsearch([{"host": server, "port": port}], http_auth=("elastic", "changeme"))
fig = go.Figure() # or any Plotly Express function e.g. px.bar(...)
def get_test_dates():
body2 = {"query": {"match_all": {}}}
res = es.search(index=index_name, size=1000, body=body6,)
def get_data(iteration):
body2 = {"query": {"query_string": {"query": "lte*", "fields": ["test_name"],}}}
body4 = {"query": {"match": {"iterations": "1"}}}
body6 = {
"query": {
"bool": {
"must": [
{
"match": {"test_name": "evm_1",},
"match": {"iteration": str(iteration)},
},
]
}
}
}
res = es.search(index=index_name, size=1000, body=body6,)
# print(res["hits"]["hits"])
# for val in res["hits"]["hits"]:
# # print(val['_source'])
# print(val["_source"]["carrier_frequency"])
x = [val["_source"]["carrier_frequency"] for val in res["hits"]["hits"]]
y = [val["_source"]["evm_db"] for val in res["hits"]["hits"]]
x = numpy.array(x)
y = numpy.array(y)
inds = x.argsort()
x = x[inds]
y = y[inds]
x = x / 1000000
fig = go.Figure()
fig.add_trace(go.Scatter(x=x, y=y, mode="lines+markers", name="EVM"))
fig.update_xaxes(title_text="LO (MHz)", title_font={"size": 20}, title_standoff=25)
fig.update_yaxes(title_text="EVM (dB)", title_font={"size": 20}, title_standoff=25)
return fig
layout = html.Div(
[
html.H4("EVM Tests"),
html.Div(
[
html.H6("""Select Iteration""", style={"margin-right": "2em"}),
dcc.Dropdown(
id="iteration-dropdown",
options=[
{"label": "1", "value": 1},
{"label": "2", "value": 2},
{"label": "3", "value": 3},
],
value="1",
style=dict(width="40%", verticalAlign="middle"),
),
],
style=dict(display="flex"),
),
html.Div(id="dd-output-container"),
html.Div(
[
html.H6("""Select Test Date""", style={"margin-right": "2em"}),
dcc.Dropdown(
id="testdate-dropdown",
options=[
{"label": "1", "value": 1},
{"label": "2", "value": 2},
{"label": "3", "value": 3},
],
value="1",
style=dict(width="40%", verticalAlign="middle"),
),
],
style=dict(display="flex"),
),
html.Div(id="testdate-output-container"),
dcc.Graph(id="evm_plot", figure=fig),
]
)
@app.callback(
[
dash.dependencies.Output("dd-output-container", "children"),
dash.dependencies.Output("evm_plot", "figure"),
],
[dash.dependencies.Input("iteration-dropdown", "value")],
)
def update_output(value):
return ['You have selected "{}"'.format(value), get_data(value)]
| [
"elasticsearch.Elasticsearch",
"plotly.graph_objects.Scatter",
"dash_html_components.H6",
"dash_html_components.Div",
"plotly.graph_objects.Figure",
"dash.dependencies.Input",
"dash_html_components.H4",
"numpy.array",
"dash_core_components.Graph",
"dash.dependencies.Output"
] | [((323, 409), 'elasticsearch.Elasticsearch', 'Elasticsearch', (["[{'host': server, 'port': port}]"], {'http_auth': "('elastic', 'changeme')"}), "([{'host': server, 'port': port}], http_auth=('elastic',\n 'changeme'))\n", (336, 409), False, 'from elasticsearch import Elasticsearch\n'), ((412, 423), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (421, 423), True, 'import plotly.graph_objects as go\n'), ((1439, 1453), 'numpy.array', 'numpy.array', (['x'], {}), '(x)\n', (1450, 1453), False, 'import numpy\n'), ((1462, 1476), 'numpy.array', 'numpy.array', (['y'], {}), '(y)\n', (1473, 1476), False, 'import numpy\n'), ((1563, 1574), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (1572, 1574), True, 'import plotly.graph_objects as go\n'), ((1593, 1647), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x', 'y': 'y', 'mode': '"""lines+markers"""', 'name': '"""EVM"""'}), "(x=x, y=y, mode='lines+markers', name='EVM')\n", (1603, 1647), True, 'import plotly.graph_objects as go\n'), ((1875, 1895), 'dash_html_components.H4', 'html.H4', (['"""EVM Tests"""'], {}), "('EVM Tests')\n", (1882, 1895), True, 'import dash_html_components as html\n'), ((2486, 2520), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""dd-output-container"""'}), "(id='dd-output-container')\n", (2494, 2520), True, 'import dash_html_components as html\n'), ((3110, 3150), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""testdate-output-container"""'}), "(id='testdate-output-container')\n", (3118, 3150), True, 'import dash_html_components as html\n'), ((3160, 3196), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""evm_plot"""', 'figure': 'fig'}), "(id='evm_plot', figure=fig)\n", (3169, 3196), True, 'import dash_core_components as dcc\n'), ((3237, 3296), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""dd-output-container"""', '"""children"""'], {}), "('dd-output-container', 'children')\n", (3261, 3296), False, 'import dash\n'), ((3306, 3352), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""evm_plot"""', '"""figure"""'], {}), "('evm_plot', 'figure')\n", (3330, 3352), False, 'import dash\n'), ((3366, 3420), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""iteration-dropdown"""', '"""value"""'], {}), "('iteration-dropdown', 'value')\n", (3389, 3420), False, 'import dash\n'), ((1945, 2003), 'dash_html_components.H6', 'html.H6', (['"""Select Iteration"""'], {'style': "{'margin-right': '2em'}"}), "('Select Iteration', style={'margin-right': '2em'})\n", (1952, 2003), True, 'import dash_html_components as html\n'), ((2570, 2628), 'dash_html_components.H6', 'html.H6', (['"""Select Test Date"""'], {'style': "{'margin-right': '2em'}"}), "('Select Test Date', style={'margin-right': '2em'})\n", (2577, 2628), True, 'import dash_html_components as html\n')] |
import numpy as np
from ..search_setting._base import get_params, to_func
def eval_fitness(scores, sign):
"""
Fitness is in proportion to difference from worst score.
"""
scores = sign*scores
scores = np.nanmax(scores) - scores
scores /= np.nansum(scores)
scores[np.isnan(scores)] = 0
return scores
def crossover(fitness, param_crossover_proba, cv_results, start_index):
parents_index = np.random.choice(len(fitness), size=2, replace=False, p=fitness) + start_index
parents_params_base = cv_results["params"][parents_index[0]]
parents_params_tgt = cv_results["params"][parents_index[1]]
child_params = {}
for key in parents_params_base.keys():
if np.random.rand() < param_crossover_proba:
child_params[key] = parents_params_tgt[key]
else:
child_params[key] = parents_params_base[key]
return child_params
def mutate(params, param_mutation_proba, param_distributions):
ret = {}
for key in params.keys():
if np.random.rand() < param_mutation_proba:
ret.update(get_params(param_distributions, tgt_key=key))
else:
ret[key] = params[key]
return ret
def gamin(obj, param_distributions, max_iter, iter_pergeneration, param_crossover_proba, param_mutation_proba, random_sampling_proba, cvsummarizer):
it = 0
param_crossover_proba = to_func(param_crossover_proba)
param_mutation_proba = to_func(param_mutation_proba)
random_sampling_proba = to_func(random_sampling_proba)
while True:
# Set GA params.
population = []
if it == 0:
generaion = 0
rsp = 1
start_index = None
fitness = None
else:
generaion = int((it+1) / iter_pergeneration)
rsp = random_sampling_proba(generaion)
start_index = it - iter_pergeneration
fitness = eval_fitness(scores=np.array(cvsummarizer.cv_results_[cvsummarizer.score_summarizer_name+"_test_score"])[start_index:],
sign=cvsummarizer.sign)
if (fitness > 0).sum() < 2:
# If there are not enough parents in this generaion, use old generation scores.
fitness = eval_fitness(scores=np.array(cvsummarizer.cv_results_[cvsummarizer.score_summarizer_name+"_test_score"]),
sign=cvsummarizer.sign)
start_index = 0
if (fitness > 0).sum() < 2:
# If there are not enough parents in all generaion, all next generation is random sample.
rsp = 1
# Create population.
for i in range(iter_pergeneration):
if np.random.rand() < rsp:
population.append(get_params(param_distributions, tgt_key=None))
else:
child = crossover(fitness=fitness, param_crossover_proba=param_crossover_proba(generaion),
cv_results=cvsummarizer.cv_results_, start_index=start_index)
child = mutate(params=child, param_mutation_proba=param_mutation_proba(generaion),
param_distributions=param_distributions)
population.append(child)
# Do search.
for params in population:
obj(params)
it += 1
if it >= max_iter:
return
| [
"numpy.nansum",
"numpy.isnan",
"numpy.array",
"numpy.random.rand",
"numpy.nanmax"
] | [((264, 281), 'numpy.nansum', 'np.nansum', (['scores'], {}), '(scores)\n', (273, 281), True, 'import numpy as np\n'), ((223, 240), 'numpy.nanmax', 'np.nanmax', (['scores'], {}), '(scores)\n', (232, 240), True, 'import numpy as np\n'), ((293, 309), 'numpy.isnan', 'np.isnan', (['scores'], {}), '(scores)\n', (301, 309), True, 'import numpy as np\n'), ((720, 736), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (734, 736), True, 'import numpy as np\n'), ((1032, 1048), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1046, 1048), True, 'import numpy as np\n'), ((2778, 2794), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2792, 2794), True, 'import numpy as np\n'), ((1960, 2050), 'numpy.array', 'np.array', (["cvsummarizer.cv_results_[cvsummarizer.score_summarizer_name + '_test_score']"], {}), "(cvsummarizer.cv_results_[cvsummarizer.score_summarizer_name +\n '_test_score'])\n", (1968, 2050), True, 'import numpy as np\n'), ((2316, 2406), 'numpy.array', 'np.array', (["cvsummarizer.cv_results_[cvsummarizer.score_summarizer_name + '_test_score']"], {}), "(cvsummarizer.cv_results_[cvsummarizer.score_summarizer_name +\n '_test_score'])\n", (2324, 2406), True, 'import numpy as np\n')] |
from __future__ import print_function, absolute_import
from six import iteritems, itervalues
from six.moves import range
from pyNastran.bdf.bdf import BDF
import tables
import numpy as np
from .input import Input
from .result import Result
from .pynastran_interface import get_bdf_cards
from .punch import PunchReader
from .f06 import F06Reader
class H5Nastran(object):
version = '0.1.0'
def __init__(self, h5filename, mode='r'):
filters = tables.Filters(complib='zlib', complevel=5)
self.h5f = tables.open_file(h5filename, mode=mode, filters=filters)
self._card_tables = {}
self._result_tables = {}
self.input = Input(self)
self.result = Result(self)
self.bdf = None # pyNastran bdf file
self._tables = set()
self._unsupported_tables = set()
self._bdf = None
self._punch = None
self._f06 = None
self._bdf_domain = 1
if mode == 'w':
self._write_info()
def close(self):
self.h5f.close()
def load_bdf(self, filename=None):
if self._bdf is not None:
raise Exception('BDF already loaded!')
if filename is None:
self._load_bdf()
return self.bdf
self._bdf = filename
self.bdf = BDF(debug=False)
self.bdf.read_bdf(filename)
bdf = self.bdf
assert bdf is not None
cards = get_bdf_cards(bdf)
tables = set()
unsupported = []
card_names = sorted(cards.keys())
for card_name in card_names:
table = self._card_tables.get(card_name, None)
if table is None:
print(card_name)
unsupported.append(card_name)
continue
try:
table.write_data(cards[card_name], self._bdf_domain)
except NotImplementedError:
print(card_name)
unsupported.append(card_name)
tables.add(table)
for table in tables:
table.finalize()
self._unsupported_cards(unsupported)
self._bdf_domain += 1
self._save_bdf()
return self.bdf
def load_f06(self, f06file):
if self._bdf is None:
raise Exception('BDF must be loaded first!')
if self._punch is not None:
raise Exception('Punch file has already been loaded. Cannot load f06 file after punch.')
self._f06 = f06file
reader = F06Reader(f06file)
reader.register_callback(self._load_result_table)
reader.read()
for table in self._tables:
table.finalize()
self._tables.clear()
def load_punch(self, filename):
if self._bdf is None:
raise Exception('BDF must be loaded first!')
if self._f06 is not None:
raise Exception('F06 has already been loaded. Cannot load punch file after f06.')
self._punch = filename
reader = PunchReader(filename)
reader.register_callback(self._load_result_table)
reader.read()
for table in self._tables:
table.finalize()
self._tables.clear()
self._write_unsupported_tables()
def path(self):
return ['', 'NASTRAN']
def register_card_table(self, card_table):
assert card_table.card_id not in self._card_tables
self._card_tables[card_table.card_id] = card_table
def register_result_table(self, result_table):
result_type = result_table.result_type
if isinstance(result_type, str):
result_type = [result_type]
for _result_type in result_type:
assert _result_type not in self._result_tables
self._result_tables[_result_type] = result_table
def _load_bdf(self):
from zlib import decompress
bdf_lines = decompress(self.h5f.get_node('/PRIVATE/NASTRAN/INPUT/BDF_LINES').read()).decode()
from six import StringIO
class DummyIO(StringIO):
# pyNastran expects StringIO to have a readlines method
def readlines(self):
return self.getvalue().split('\n')
data = DummyIO()
data.write(bdf_lines)
bdf = BDF(debug=False)
bdf.read_bdf(data)
data.close()
self.bdf = bdf
def _load_result_table(self, table_data):
print(table_data.header)
results_type = table_data.header.results_type
table = self._result_tables.get(results_type, None)
if table is None:
return self._unsupported_table(table_data)
table.results_type = results_type
table.write_data(table_data)
self._tables.add(table)
def _save_bdf(self):
from six import StringIO
out = StringIO()
self.bdf.write_bdf(out, close=False)
from zlib import compress
self.h5f.create_array('/PRIVATE/NASTRAN/INPUT', 'BDF_LINES', obj=compress(out.getvalue().encode()), title='BDF LINES',
createparents=True)
def _unsupported_cards(self, cards):
cards = np.array(cards, dtype='S8')
self.h5f.create_array('/PRIVATE/NASTRAN/INPUT', 'UNSUPPORTED_CARDS', obj=cards, title='UNSUPPORTED BDF CARDS',
createparents=True)
def _unsupported_table(self, table_data):
print('Unsupported table %s' % table_data.header.results_type)
self._unsupported_tables.add(table_data.header.results_type)
def _write_info(self):
import pyNastran
info = 'h5Nastran version %s\nPowered by pyNastran version %s' % (self.version, pyNastran.__version__)
self.h5f.create_array('/PRIVATE/h5Nastran', 'h5Nastran', obj=info.encode(), title='h5Nastran Info',
createparents=True)
def _write_unsupported_tables(self):
headers = list(sorted(self._unsupported_tables))
data = np.array(headers, dtype='S256')
self.h5f.create_array('/PRIVATE/NASTRAN/RESULT', 'UNSUPPORTED_RESULT_TABLES', obj=data, title='UNSUPPORTED RESULT TABLES',
createparents=True)
| [
"tables.add",
"pyNastran.bdf.bdf.BDF",
"six.StringIO",
"tables.Filters",
"numpy.array",
"tables.open_file"
] | [((490, 533), 'tables.Filters', 'tables.Filters', ([], {'complib': '"""zlib"""', 'complevel': '(5)'}), "(complib='zlib', complevel=5)\n", (504, 533), False, 'import tables\n'), ((554, 610), 'tables.open_file', 'tables.open_file', (['h5filename'], {'mode': 'mode', 'filters': 'filters'}), '(h5filename, mode=mode, filters=filters)\n', (570, 610), False, 'import tables\n'), ((1371, 1387), 'pyNastran.bdf.bdf.BDF', 'BDF', ([], {'debug': '(False)'}), '(debug=False)\n', (1374, 1387), False, 'from pyNastran.bdf.bdf import BDF\n'), ((4434, 4450), 'pyNastran.bdf.bdf.BDF', 'BDF', ([], {'debug': '(False)'}), '(debug=False)\n', (4437, 4450), False, 'from pyNastran.bdf.bdf import BDF\n'), ((5016, 5026), 'six.StringIO', 'StringIO', ([], {}), '()\n', (5024, 5026), False, 'from six import StringIO\n'), ((5354, 5381), 'numpy.array', 'np.array', (['cards'], {'dtype': '"""S8"""'}), "(cards, dtype='S8')\n", (5362, 5381), True, 'import numpy as np\n'), ((6189, 6220), 'numpy.array', 'np.array', (['headers'], {'dtype': '"""S256"""'}), "(headers, dtype='S256')\n", (6197, 6220), True, 'import numpy as np\n'), ((2087, 2104), 'tables.add', 'tables.add', (['table'], {}), '(table)\n', (2097, 2104), False, 'import tables\n')] |
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import numpy as np
import sys
sys.path.append('../')
from src.utils.plotting import latexconfig
latexconfig()
h = 6.626e-34 # Planck constant [J⋅s]
c = 3.0e+8 # speed of light [m/s]
k = 1.38e-23 # Boltzmann constant [J⋅K^−1]
def B_rj(labda, T):
"""Rayleigh-Jeans black-body radiation interpretation"""
return (2.0 * c * k * T) / (labda**4)
def B_p(labda, T):
"""Planck black-body radiation formula"""
return (2.0 * h * c**2) / (labda**5 * (np.exp(h*c/(labda*k*T)) - 1.0))
labda = np.arange(1e-9, 3e-6, 1e-9, dtype=np.float128)
Ts = [4000., 5000., 6000., 7000.]
plt.figure('Black-body Radiation', figsize=(8, 4))
for T in Ts:
plt.plot(labda*1e9, B_p(labda, T), label=f'$T={int(T)}$K')
plt.plot(labda*1e9, B_rj(labda, T=5000), label=f'Rayleigh-Jeans (T={5000}K)', color='black')
plt.vlines(x=380, ymin=0, ymax=7e13, color='black', linestyle=':')
plt.vlines(x=740, ymin=0, ymax=7e13, color='black', linestyle=':',
label='visible light borders')
#visible spectrum coloring
plt.axvspan(380, 450, alpha=0.3, color='violet')
plt.axvspan(451, 485, alpha=0.3, color='blue')
plt.axvspan(486, 500, alpha=0.3, color='cyan')
plt.axvspan(501, 565, alpha=0.3, color='green')
plt.axvspan(566, 590, alpha=0.3, color='yellow')
plt.axvspan(591, 625, alpha=0.3, color='orange')
plt.axvspan(626, 740, alpha=0.3, color='red')
plt.ylim([0, 7e13])
plt.legend(loc='best')
plt.xlabel('$\lambda$ [nm]')
plt.ylabel('$B(\lambda, T)$ ')
plt.show() | [
"sys.path.append",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.vlines",
"src.utils.plotting.latexconfig",
"matplotlib.pyplot.axvspan",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.exp",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.... | [((54, 63), 'seaborn.set', 'sns.set', ([], {}), '()\n', (61, 63), True, 'import seaborn as sns\n'), ((95, 117), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (110, 117), False, 'import sys\n'), ((161, 174), 'src.utils.plotting.latexconfig', 'latexconfig', ([], {}), '()\n', (172, 174), False, 'from src.utils.plotting import latexconfig\n'), ((578, 627), 'numpy.arange', 'np.arange', (['(1e-09)', '(3e-06)', '(1e-09)'], {'dtype': 'np.float128'}), '(1e-09, 3e-06, 1e-09, dtype=np.float128)\n', (587, 627), True, 'import numpy as np\n'), ((661, 711), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Black-body Radiation"""'], {'figsize': '(8, 4)'}), "('Black-body Radiation', figsize=(8, 4))\n", (671, 711), True, 'import matplotlib.pyplot as plt\n'), ((883, 961), 'matplotlib.pyplot.vlines', 'plt.vlines', ([], {'x': '(380)', 'ymin': '(0)', 'ymax': '(70000000000000.0)', 'color': '"""black"""', 'linestyle': '""":"""'}), "(x=380, ymin=0, ymax=70000000000000.0, color='black', linestyle=':')\n", (893, 961), True, 'import matplotlib.pyplot as plt\n'), ((950, 1064), 'matplotlib.pyplot.vlines', 'plt.vlines', ([], {'x': '(740)', 'ymin': '(0)', 'ymax': '(70000000000000.0)', 'color': '"""black"""', 'linestyle': '""":"""', 'label': '"""visible light borders"""'}), "(x=740, ymin=0, ymax=70000000000000.0, color='black', linestyle=\n ':', label='visible light borders')\n", (960, 1064), True, 'import matplotlib.pyplot as plt\n'), ((1088, 1136), 'matplotlib.pyplot.axvspan', 'plt.axvspan', (['(380)', '(450)'], {'alpha': '(0.3)', 'color': '"""violet"""'}), "(380, 450, alpha=0.3, color='violet')\n", (1099, 1136), True, 'import matplotlib.pyplot as plt\n'), ((1137, 1183), 'matplotlib.pyplot.axvspan', 'plt.axvspan', (['(451)', '(485)'], {'alpha': '(0.3)', 'color': '"""blue"""'}), "(451, 485, alpha=0.3, color='blue')\n", (1148, 1183), True, 'import matplotlib.pyplot as plt\n'), ((1184, 1230), 'matplotlib.pyplot.axvspan', 'plt.axvspan', (['(486)', '(500)'], {'alpha': '(0.3)', 'color': '"""cyan"""'}), "(486, 500, alpha=0.3, color='cyan')\n", (1195, 1230), True, 'import matplotlib.pyplot as plt\n'), ((1231, 1278), 'matplotlib.pyplot.axvspan', 'plt.axvspan', (['(501)', '(565)'], {'alpha': '(0.3)', 'color': '"""green"""'}), "(501, 565, alpha=0.3, color='green')\n", (1242, 1278), True, 'import matplotlib.pyplot as plt\n'), ((1279, 1327), 'matplotlib.pyplot.axvspan', 'plt.axvspan', (['(566)', '(590)'], {'alpha': '(0.3)', 'color': '"""yellow"""'}), "(566, 590, alpha=0.3, color='yellow')\n", (1290, 1327), True, 'import matplotlib.pyplot as plt\n'), ((1328, 1376), 'matplotlib.pyplot.axvspan', 'plt.axvspan', (['(591)', '(625)'], {'alpha': '(0.3)', 'color': '"""orange"""'}), "(591, 625, alpha=0.3, color='orange')\n", (1339, 1376), True, 'import matplotlib.pyplot as plt\n'), ((1377, 1422), 'matplotlib.pyplot.axvspan', 'plt.axvspan', (['(626)', '(740)'], {'alpha': '(0.3)', 'color': '"""red"""'}), "(626, 740, alpha=0.3, color='red')\n", (1388, 1422), True, 'import matplotlib.pyplot as plt\n'), ((1424, 1455), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 70000000000000.0]'], {}), '([0, 70000000000000.0])\n', (1432, 1455), True, 'import matplotlib.pyplot as plt\n'), ((1444, 1466), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (1454, 1466), True, 'import matplotlib.pyplot as plt\n'), ((1467, 1496), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\lambda$ [nm]"""'], {}), "('$\\\\lambda$ [nm]')\n", (1477, 1496), True, 'import matplotlib.pyplot as plt\n'), ((1496, 1527), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$B(\\\\lambda, T)$ """'], {}), "('$B(\\\\lambda, T)$ ')\n", (1506, 1527), True, 'import matplotlib.pyplot as plt\n'), ((1527, 1537), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1535, 1537), True, 'import matplotlib.pyplot as plt\n'), ((537, 568), 'numpy.exp', 'np.exp', (['(h * c / (labda * k * T))'], {}), '(h * c / (labda * k * T))\n', (543, 568), True, 'import numpy as np\n')] |
import numpy as np
from agents.common import PlayerAction, BoardPiece, SavedState
def test_evaluate_window():
from agents.agent_minimax import evaluate_window
window = np.array([0,1.0,1])
player = BoardPiece(2)
ret = evaluate_window(window, player)
assert isinstance(ret, np.int)
def test_alpha_beta():
from agents.agent_minimax import alpha_beta
board = np.zeros((6, 7), dtype=BoardPiece)
player = BoardPiece(2)
depth = 5
alpha = -math.inf
beta = math.inf
maximizingPlayer = True
ret = alpha_beta(board, player, depth, alpha, beta, maximizingPlayer)
assert isinstance(ret, tuple())
def test_score_position():
from agents.agent_minimax import score_position
board = np.zeros((6, 7), dtype=BoardPiece)
player = BoardPiece(2)
ret = score_position(board, player)
assert isinstance(ret, np.int)
def test_generate_move_minimax():
from agents.agent_minimax import generate_move_minimax
board = np.zeros((6, 7), dtype=BoardPiece)
player = BoardPiece(2)
ret = generate_move_minimax(board, player)
assert isinstance(ret, tuple())
| [
"agents.agent_minimax.generate_move_minimax",
"agents.agent_minimax.evaluate_window",
"numpy.zeros",
"agents.common.BoardPiece",
"agents.agent_minimax.score_position",
"numpy.array",
"agents.agent_minimax.alpha_beta"
] | [((177, 198), 'numpy.array', 'np.array', (['[0, 1.0, 1]'], {}), '([0, 1.0, 1])\n', (185, 198), True, 'import numpy as np\n'), ((210, 223), 'agents.common.BoardPiece', 'BoardPiece', (['(2)'], {}), '(2)\n', (220, 223), False, 'from agents.common import PlayerAction, BoardPiece, SavedState\n'), ((234, 265), 'agents.agent_minimax.evaluate_window', 'evaluate_window', (['window', 'player'], {}), '(window, player)\n', (249, 265), False, 'from agents.agent_minimax import evaluate_window\n'), ((385, 419), 'numpy.zeros', 'np.zeros', (['(6, 7)'], {'dtype': 'BoardPiece'}), '((6, 7), dtype=BoardPiece)\n', (393, 419), True, 'import numpy as np\n'), ((433, 446), 'agents.common.BoardPiece', 'BoardPiece', (['(2)'], {}), '(2)\n', (443, 446), False, 'from agents.common import PlayerAction, BoardPiece, SavedState\n'), ((541, 604), 'agents.agent_minimax.alpha_beta', 'alpha_beta', (['board', 'player', 'depth', 'alpha', 'beta', 'maximizingPlayer'], {}), '(board, player, depth, alpha, beta, maximizingPlayer)\n', (551, 604), False, 'from agents.agent_minimax import alpha_beta\n'), ((733, 767), 'numpy.zeros', 'np.zeros', (['(6, 7)'], {'dtype': 'BoardPiece'}), '((6, 7), dtype=BoardPiece)\n', (741, 767), True, 'import numpy as np\n'), ((781, 794), 'agents.common.BoardPiece', 'BoardPiece', (['(2)'], {}), '(2)\n', (791, 794), False, 'from agents.common import PlayerAction, BoardPiece, SavedState\n'), ((805, 834), 'agents.agent_minimax.score_position', 'score_position', (['board', 'player'], {}), '(board, player)\n', (819, 834), False, 'from agents.agent_minimax import score_position\n'), ((976, 1010), 'numpy.zeros', 'np.zeros', (['(6, 7)'], {'dtype': 'BoardPiece'}), '((6, 7), dtype=BoardPiece)\n', (984, 1010), True, 'import numpy as np\n'), ((1024, 1037), 'agents.common.BoardPiece', 'BoardPiece', (['(2)'], {}), '(2)\n', (1034, 1037), False, 'from agents.common import PlayerAction, BoardPiece, SavedState\n'), ((1048, 1084), 'agents.agent_minimax.generate_move_minimax', 'generate_move_minimax', (['board', 'player'], {}), '(board, player)\n', (1069, 1084), False, 'from agents.agent_minimax import generate_move_minimax\n')] |
import gettext
import unittest
import numpy
# local libraries
from nion.swift import Facade
from nion.data import DataAndMetadata
from nion.swift.test import TestContext
from nion.ui import TestUI
from nion.swift import Application
from nion.swift.model import DocumentModel
from nionswift_plugin.nion_experimental_tools import AffineTransformImage
_ = gettext.gettext
Facade.initialize()
def create_memory_profile_context() -> TestContext.MemoryProfileContext:
return TestContext.MemoryProfileContext()
class TestAffineTransformImage(unittest.TestCase):
def setUp(self):
self.app = Application.Application(TestUI.UserInterface(), set_global=True)
self.app.workspace_dir = str()
def tearDown(self):
pass
def test_affine_transform_image_for_2d_data(self):
with create_memory_profile_context() as profile_context:
document_controller = profile_context.create_document_controller_with_application()
document_model = document_controller.document_model
data = numpy.zeros((5, 5))
data[2:-2, 1:-1] = 1
xdata = DataAndMetadata.new_data_and_metadata(data)
api = Facade.get_api("~1.0", "~1.0")
data_item = api.library.create_data_item_from_data_and_metadata(xdata)
document_controller.selection.set(0)
document_controller.selected_display_panel = None # use the document controller selection
affine_transform = AffineTransformImage.AffineTransformMenuItem(api)
affine_transform.menu_item_execute(api.application.document_controllers[0])
document_controller.periodic()
# Can't convince the computation to update when changing the graphics, so just check that it got executed
vector_a = data_item.graphics[0]
vector_b = data_item.graphics[1]
# # Rotate by 90 degrees
vector_a.end = (0.75, 0.5)
vector_b.end = (0.5, 0.75)
# # Update computation
document_controller.periodic()
DocumentModel.evaluate_data(document_model.computations[0])
self.assertEqual(len(data_item.graphics), 2)
self.assertEqual(api.library.data_item_count, 2)
self.assertTrue(numpy.allclose(document_model.data_items[1].data, numpy.rot90(data)))
def test_affine_transform_image_for_3d_data(self):
data_descriptors = [DataAndMetadata.DataDescriptor(True, 0, 2), DataAndMetadata.DataDescriptor(False, 1, 2),
DataAndMetadata.DataDescriptor(False, 2, 1)]
for data_descriptor in data_descriptors:
with self.subTest(data_descriptor=data_descriptor):
with create_memory_profile_context() as profile_context:
document_controller = profile_context.create_document_controller_with_application()
document_model = document_controller.document_model
data = numpy.zeros((5, 5, 5))
if data_descriptor.collection_dimension_count == 2:
data[2:-2, 1:-1] = 1
else:
data[..., 2:-2, 1:-1] = 1
xdata = DataAndMetadata.new_data_and_metadata(data, data_descriptor=data_descriptor)
api = Facade.get_api("~1.0", "~1.0")
data_item = api.library.create_data_item_from_data_and_metadata(xdata)
document_controller.selection.set(0)
document_controller.selected_display_panel = None # use the document controller selection
affine_transform = AffineTransformImage.AffineTransformMenuItem(api)
affine_transform.menu_item_execute(api.application.document_controllers[0])
document_controller.periodic()
# Can't convince the computation to update when changing the graphics, so just check that it got executed
vector_a = data_item.graphics[0]
vector_b = data_item.graphics[1]
# # Rotate by 90 degrees
vector_a.end = (0.75, 0.5)
vector_b.end = (0.5, 0.75)
# # Update computation
document_controller.periodic()
DocumentModel.evaluate_data(document_model.computations[0])
self.assertEqual(len(data_item.graphics), 2)
self.assertEqual(api.library.data_item_count, 2)
if data_descriptor.collection_dimension_count == 2:
self.assertTrue(numpy.allclose(document_model.data_items[1].data, numpy.rot90(data)))
else:
self.assertTrue(numpy.allclose(document_model.data_items[1].data, numpy.rot90(data, axes=(1, 2))))
def test_affine_transform_image_for_4d_data(self):
data_descriptors = [DataAndMetadata.DataDescriptor(True, 1, 2), DataAndMetadata.DataDescriptor(False, 2, 2),
DataAndMetadata.DataDescriptor(True, 2, 1)]
for data_descriptor in data_descriptors:
with self.subTest(data_descriptor=data_descriptor):
with create_memory_profile_context() as profile_context:
document_controller = profile_context.create_document_controller_with_application()
document_model = document_controller.document_model
data = numpy.zeros((5, 5, 5, 5))
if data_descriptor.collection_dimension_count == 2 and not data_descriptor.is_sequence:
data[2:-2, 1:-1] = 1
elif data_descriptor.collection_dimension_count == 2 and data_descriptor.is_sequence:
data[:, 2:-2, 1:-1] = 1
else:
data[..., 2:-2, 1:-1] = 1
xdata = DataAndMetadata.new_data_and_metadata(data, data_descriptor=data_descriptor)
api = Facade.get_api("~1.0", "~1.0")
data_item = api.library.create_data_item_from_data_and_metadata(xdata)
document_controller.selection.set(0)
document_controller.selected_display_panel = None # use the document controller selection
affine_transform = AffineTransformImage.AffineTransformMenuItem(api)
affine_transform.menu_item_execute(api.application.document_controllers[0])
document_controller.periodic()
# Can't convince the computation to update when changing the graphics, so just check that it got executed
vector_a = data_item.graphics[0]
vector_b = data_item.graphics[1]
# # Rotate by 90 degrees
vector_a.end = (0.75, 0.5)
vector_b.end = (0.5, 0.75)
# # Update computation
document_controller.periodic()
DocumentModel.evaluate_data(document_model.computations[0])
self.assertEqual(len(data_item.graphics), 2)
self.assertEqual(api.library.data_item_count, 2)
if data_descriptor.collection_dimension_count == 2 and not data_descriptor.is_sequence:
self.assertTrue(numpy.allclose(document_model.data_items[1].data, numpy.rot90(data)))
elif data_descriptor.collection_dimension_count == 2 and data_descriptor.is_sequence:
self.assertTrue(numpy.allclose(document_model.data_items[1].data, numpy.rot90(data, axes=(1, 2))))
else:
self.assertTrue(numpy.allclose(document_model.data_items[1].data, numpy.rot90(data, axes=(2, 3))))
def test_affine_transform_image_for_5d_data(self):
data_descriptor = DataAndMetadata.DataDescriptor(True, 2, 2)
with create_memory_profile_context() as profile_context:
document_controller = profile_context.create_document_controller_with_application()
document_model = document_controller.document_model
data = numpy.zeros((2, 5, 5, 5, 5))
data[:, 2:-2, 1:-1] = 1
xdata = DataAndMetadata.new_data_and_metadata(data, data_descriptor=data_descriptor)
api = Facade.get_api("~1.0", "~1.0")
data_item = api.library.create_data_item_from_data_and_metadata(xdata)
document_controller.selection.set(0)
document_controller.selected_display_panel = None # use the document controller selection
affine_transform = AffineTransformImage.AffineTransformMenuItem(api)
affine_transform.menu_item_execute(api.application.document_controllers[0])
document_controller.periodic()
# Can't convince the computation to update when changing the graphics, so just check that it got executed
vector_a = data_item.graphics[0]
vector_b = data_item.graphics[1]
# # Rotate by 90 degrees
vector_a.end = (0.75, 0.5)
vector_b.end = (0.5, 0.75)
# # Update computation
document_controller.periodic()
DocumentModel.evaluate_data(document_model.computations[0])
self.assertEqual(len(data_item.graphics), 2)
self.assertEqual(api.library.data_item_count, 2)
self.assertTrue(numpy.allclose(document_model.data_items[1].data, numpy.rot90(data, axes=(1, 2))))
| [
"nionswift_plugin.nion_experimental_tools.AffineTransformImage.AffineTransformMenuItem",
"nion.swift.Facade.initialize",
"nion.swift.Facade.get_api",
"numpy.zeros",
"nion.data.DataAndMetadata.new_data_and_metadata",
"nion.swift.model.DocumentModel.evaluate_data",
"numpy.rot90",
"nion.data.DataAndMetad... | [((375, 394), 'nion.swift.Facade.initialize', 'Facade.initialize', ([], {}), '()\n', (392, 394), False, 'from nion.swift import Facade\n'), ((481, 515), 'nion.swift.test.TestContext.MemoryProfileContext', 'TestContext.MemoryProfileContext', ([], {}), '()\n', (513, 515), False, 'from nion.swift.test import TestContext\n'), ((7929, 7971), 'nion.data.DataAndMetadata.DataDescriptor', 'DataAndMetadata.DataDescriptor', (['(True)', '(2)', '(2)'], {}), '(True, 2, 2)\n', (7959, 7971), False, 'from nion.data import DataAndMetadata\n'), ((634, 656), 'nion.ui.TestUI.UserInterface', 'TestUI.UserInterface', ([], {}), '()\n', (654, 656), False, 'from nion.ui import TestUI\n'), ((1052, 1071), 'numpy.zeros', 'numpy.zeros', (['(5, 5)'], {}), '((5, 5))\n', (1063, 1071), False, 'import numpy\n'), ((1125, 1168), 'nion.data.DataAndMetadata.new_data_and_metadata', 'DataAndMetadata.new_data_and_metadata', (['data'], {}), '(data)\n', (1162, 1168), False, 'from nion.data import DataAndMetadata\n'), ((1187, 1217), 'nion.swift.Facade.get_api', 'Facade.get_api', (['"""~1.0"""', '"""~1.0"""'], {}), "('~1.0', '~1.0')\n", (1201, 1217), False, 'from nion.swift import Facade\n'), ((1484, 1533), 'nionswift_plugin.nion_experimental_tools.AffineTransformImage.AffineTransformMenuItem', 'AffineTransformImage.AffineTransformMenuItem', (['api'], {}), '(api)\n', (1528, 1533), False, 'from nionswift_plugin.nion_experimental_tools import AffineTransformImage\n'), ((2078, 2137), 'nion.swift.model.DocumentModel.evaluate_data', 'DocumentModel.evaluate_data', (['document_model.computations[0]'], {}), '(document_model.computations[0])\n', (2105, 2137), False, 'from nion.swift.model import DocumentModel\n'), ((2438, 2480), 'nion.data.DataAndMetadata.DataDescriptor', 'DataAndMetadata.DataDescriptor', (['(True)', '(0)', '(2)'], {}), '(True, 0, 2)\n', (2468, 2480), False, 'from nion.data import DataAndMetadata\n'), ((2482, 2525), 'nion.data.DataAndMetadata.DataDescriptor', 'DataAndMetadata.DataDescriptor', (['(False)', '(1)', '(2)'], {}), '(False, 1, 2)\n', (2512, 2525), False, 'from nion.data import DataAndMetadata\n'), ((2555, 2598), 'nion.data.DataAndMetadata.DataDescriptor', 'DataAndMetadata.DataDescriptor', (['(False)', '(2)', '(1)'], {}), '(False, 2, 1)\n', (2585, 2598), False, 'from nion.data import DataAndMetadata\n'), ((4956, 4998), 'nion.data.DataAndMetadata.DataDescriptor', 'DataAndMetadata.DataDescriptor', (['(True)', '(1)', '(2)'], {}), '(True, 1, 2)\n', (4986, 4998), False, 'from nion.data import DataAndMetadata\n'), ((5000, 5043), 'nion.data.DataAndMetadata.DataDescriptor', 'DataAndMetadata.DataDescriptor', (['(False)', '(2)', '(2)'], {}), '(False, 2, 2)\n', (5030, 5043), False, 'from nion.data import DataAndMetadata\n'), ((5073, 5115), 'nion.data.DataAndMetadata.DataDescriptor', 'DataAndMetadata.DataDescriptor', (['(True)', '(2)', '(1)'], {}), '(True, 2, 1)\n', (5103, 5115), False, 'from nion.data import DataAndMetadata\n'), ((8216, 8244), 'numpy.zeros', 'numpy.zeros', (['(2, 5, 5, 5, 5)'], {}), '((2, 5, 5, 5, 5))\n', (8227, 8244), False, 'import numpy\n'), ((8301, 8377), 'nion.data.DataAndMetadata.new_data_and_metadata', 'DataAndMetadata.new_data_and_metadata', (['data'], {'data_descriptor': 'data_descriptor'}), '(data, data_descriptor=data_descriptor)\n', (8338, 8377), False, 'from nion.data import DataAndMetadata\n'), ((8396, 8426), 'nion.swift.Facade.get_api', 'Facade.get_api', (['"""~1.0"""', '"""~1.0"""'], {}), "('~1.0', '~1.0')\n", (8410, 8426), False, 'from nion.swift import Facade\n'), ((8693, 8742), 'nionswift_plugin.nion_experimental_tools.AffineTransformImage.AffineTransformMenuItem', 'AffineTransformImage.AffineTransformMenuItem', (['api'], {}), '(api)\n', (8737, 8742), False, 'from nionswift_plugin.nion_experimental_tools import AffineTransformImage\n'), ((9287, 9346), 'nion.swift.model.DocumentModel.evaluate_data', 'DocumentModel.evaluate_data', (['document_model.computations[0]'], {}), '(document_model.computations[0])\n', (9314, 9346), False, 'from nion.swift.model import DocumentModel\n'), ((2334, 2351), 'numpy.rot90', 'numpy.rot90', (['data'], {}), '(data)\n', (2345, 2351), False, 'import numpy\n'), ((2989, 3011), 'numpy.zeros', 'numpy.zeros', (['(5, 5, 5)'], {}), '((5, 5, 5))\n', (3000, 3011), False, 'import numpy\n'), ((3233, 3309), 'nion.data.DataAndMetadata.new_data_and_metadata', 'DataAndMetadata.new_data_and_metadata', (['data'], {'data_descriptor': 'data_descriptor'}), '(data, data_descriptor=data_descriptor)\n', (3270, 3309), False, 'from nion.data import DataAndMetadata\n'), ((3336, 3366), 'nion.swift.Facade.get_api', 'Facade.get_api', (['"""~1.0"""', '"""~1.0"""'], {}), "('~1.0', '~1.0')\n", (3350, 3366), False, 'from nion.swift import Facade\n'), ((3665, 3714), 'nionswift_plugin.nion_experimental_tools.AffineTransformImage.AffineTransformMenuItem', 'AffineTransformImage.AffineTransformMenuItem', (['api'], {}), '(api)\n', (3709, 3714), False, 'from nionswift_plugin.nion_experimental_tools import AffineTransformImage\n'), ((4347, 4406), 'nion.swift.model.DocumentModel.evaluate_data', 'DocumentModel.evaluate_data', (['document_model.computations[0]'], {}), '(document_model.computations[0])\n', (4374, 4406), False, 'from nion.swift.model import DocumentModel\n'), ((5506, 5531), 'numpy.zeros', 'numpy.zeros', (['(5, 5, 5, 5)'], {}), '((5, 5, 5, 5))\n', (5517, 5531), False, 'import numpy\n'), ((5943, 6019), 'nion.data.DataAndMetadata.new_data_and_metadata', 'DataAndMetadata.new_data_and_metadata', (['data'], {'data_descriptor': 'data_descriptor'}), '(data, data_descriptor=data_descriptor)\n', (5980, 6019), False, 'from nion.data import DataAndMetadata\n'), ((6046, 6076), 'nion.swift.Facade.get_api', 'Facade.get_api', (['"""~1.0"""', '"""~1.0"""'], {}), "('~1.0', '~1.0')\n", (6060, 6076), False, 'from nion.swift import Facade\n'), ((6375, 6424), 'nionswift_plugin.nion_experimental_tools.AffineTransformImage.AffineTransformMenuItem', 'AffineTransformImage.AffineTransformMenuItem', (['api'], {}), '(api)\n', (6419, 6424), False, 'from nionswift_plugin.nion_experimental_tools import AffineTransformImage\n'), ((7057, 7116), 'nion.swift.model.DocumentModel.evaluate_data', 'DocumentModel.evaluate_data', (['document_model.computations[0]'], {}), '(document_model.computations[0])\n', (7084, 7116), False, 'from nion.swift.model import DocumentModel\n'), ((9543, 9573), 'numpy.rot90', 'numpy.rot90', (['data'], {'axes': '(1, 2)'}), '(data, axes=(1, 2))\n', (9554, 9573), False, 'import numpy\n'), ((4703, 4720), 'numpy.rot90', 'numpy.rot90', (['data'], {}), '(data)\n', (4714, 4720), False, 'import numpy\n'), ((4839, 4869), 'numpy.rot90', 'numpy.rot90', (['data'], {'axes': '(1, 2)'}), '(data, axes=(1, 2))\n', (4850, 4869), False, 'import numpy\n'), ((7449, 7466), 'numpy.rot90', 'numpy.rot90', (['data'], {}), '(data)\n', (7460, 7466), False, 'import numpy\n'), ((7665, 7695), 'numpy.rot90', 'numpy.rot90', (['data'], {'axes': '(1, 2)'}), '(data, axes=(1, 2))\n', (7676, 7695), False, 'import numpy\n'), ((7814, 7844), 'numpy.rot90', 'numpy.rot90', (['data'], {'axes': '(2, 3)'}), '(data, axes=(2, 3))\n', (7825, 7844), False, 'import numpy\n')] |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
import cmath
import seaborn
import scipy
import functools
import time
#parameter settings
v=1 #intracell hopping in time period 1
w=1 #intercell hopping in time period 2
aalpha=-0.25 #phase index 1
bbeta=0.75 #phase index 2
t1=(np.pi*(aalpha+bbeta))/(2*v) #time period 1
t2=(np.pi*(bbeta-aalpha))/(2*w) #time period 2
print(t1,t2)
T=t1+t2 #total time period
Theta=np.pi #gap
#logarithm function with gap branch cut
def llog(z, theta):
modulus = np.abs(z)
argument = np.angle(z)
if theta-2*np.pi <= argument < theta:
argument = argument
else:
argument = theta-2*np.pi+np.mod(argument-theta, 2*np.pi)
return np.log(modulus) + 1j * argument
#time evolution operator at half period
def U(k,t):
matrix1 = np.zeros((2, 2), dtype=complex)
matrix2 = np.zeros((2, 2), dtype=complex)
if t < t1:
matrix1[0,1] = v*(t)
matrix1[1,0] = v*(t)
matrix2[0,1] = 0
matrix2[1,0] = 0
if t >= t1:
matrix1[0,1] = v*t1
matrix1[1,0] = v*t1
matrix2[0,1] = w*(t-t1)*cmath.exp(-1j*k)
matrix2[1,0] = w*(t-t1)*cmath.exp(1j*k)
return np.dot(scipy.linalg.expm(-1j*matrix2),scipy.linalg.expm(-1j*matrix1))
#Floquet operator cut at half period
def UF(k,t):
E=np.sqrt(math.pow(v*t1,2)+math.pow(w*t2,2)+2*(v*t1)*(w*t2)*np.cos(k))
eiphi=((v*t1)+(w*t2)*np.exp(1j*k))/(E)
eiphic=np.conjugate(eiphi)
TT=(1/np.sqrt(2))*np.matrix([[eiphic,eiphic],[1,-1]])
Tc=(1/np.sqrt(2))*np.matrix([[eiphi,1],[eiphi,-1]])
EIG=np.exp(-1j*E)
EIGc=np.exp(1j*E)
hamiltonian=np.matrix([[(1j/T)*llog(EIG,Theta),0],[0,(1j/T)*llog(EIGc,Theta)]])
Hamiltonian = TT*hamiltonian*Tc #effective Hamiltonian
return scipy.linalg.expm(1j*(t/T)*Hamiltonian)
#calculate the winding number
def main():
delta_1 = 1e-5 #differentiation step
delta_2 = 1e-3 #integration step
for t in np.arange(0,T,delta_2):
for k in np.arange(-np.pi, np.pi, delta_2):
H0 = U(k,t)*UF(k,t)
H1 = U(k+delta_1,t)*UF(k+delta_1,t)
C = np.trace(scipy.linalg.inv(H0)*((H1-H0)/delta_1))*delta_2
W = C*delta_2
print('Winding number = ', (1j*C)/(2*np.pi))
if __name__ == '__main__':
main() | [
"scipy.linalg.expm",
"numpy.matrix",
"numpy.abs",
"numpy.log",
"math.pow",
"numpy.angle",
"numpy.zeros",
"numpy.mod",
"scipy.linalg.inv",
"numpy.arange",
"numpy.exp",
"numpy.cos",
"cmath.exp",
"numpy.conjugate",
"numpy.sqrt"
] | [((561, 570), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (567, 570), True, 'import numpy as np\n'), ((587, 598), 'numpy.angle', 'np.angle', (['z'], {}), '(z)\n', (595, 598), True, 'import numpy as np\n'), ((865, 896), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {'dtype': 'complex'}), '((2, 2), dtype=complex)\n', (873, 896), True, 'import numpy as np\n'), ((912, 943), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {'dtype': 'complex'}), '((2, 2), dtype=complex)\n', (920, 943), True, 'import numpy as np\n'), ((1514, 1533), 'numpy.conjugate', 'np.conjugate', (['eiphi'], {}), '(eiphi)\n', (1526, 1533), True, 'import numpy as np\n'), ((1659, 1676), 'numpy.exp', 'np.exp', (['(-1.0j * E)'], {}), '(-1.0j * E)\n', (1665, 1676), True, 'import numpy as np\n'), ((1683, 1699), 'numpy.exp', 'np.exp', (['(1.0j * E)'], {}), '(1.0j * E)\n', (1689, 1699), True, 'import numpy as np\n'), ((1853, 1900), 'scipy.linalg.expm', 'scipy.linalg.expm', (['(1.0j * (t / T) * Hamiltonian)'], {}), '(1.0j * (t / T) * Hamiltonian)\n', (1870, 1900), False, 'import scipy\n'), ((2033, 2057), 'numpy.arange', 'np.arange', (['(0)', 'T', 'delta_2'], {}), '(0, T, delta_2)\n', (2042, 2057), True, 'import numpy as np\n'), ((760, 775), 'numpy.log', 'np.log', (['modulus'], {}), '(modulus)\n', (766, 775), True, 'import numpy as np\n'), ((1265, 1299), 'scipy.linalg.expm', 'scipy.linalg.expm', (['(-1.0j * matrix2)'], {}), '(-1.0j * matrix2)\n', (1282, 1299), False, 'import scipy\n'), ((1296, 1330), 'scipy.linalg.expm', 'scipy.linalg.expm', (['(-1.0j * matrix1)'], {}), '(-1.0j * matrix1)\n', (1313, 1330), False, 'import scipy\n'), ((1557, 1595), 'numpy.matrix', 'np.matrix', (['[[eiphic, eiphic], [1, -1]]'], {}), '([[eiphic, eiphic], [1, -1]])\n', (1566, 1595), True, 'import numpy as np\n'), ((1616, 1652), 'numpy.matrix', 'np.matrix', (['[[eiphi, 1], [eiphi, -1]]'], {}), '([[eiphi, 1], [eiphi, -1]])\n', (1625, 1652), True, 'import numpy as np\n'), ((2075, 2108), 'numpy.arange', 'np.arange', (['(-np.pi)', 'np.pi', 'delta_2'], {}), '(-np.pi, np.pi, delta_2)\n', (2084, 2108), True, 'import numpy as np\n'), ((716, 751), 'numpy.mod', 'np.mod', (['(argument - theta)', '(2 * np.pi)'], {}), '(argument - theta, 2 * np.pi)\n', (722, 751), True, 'import numpy as np\n'), ((1180, 1200), 'cmath.exp', 'cmath.exp', (['(-1.0j * k)'], {}), '(-1.0j * k)\n', (1189, 1200), False, 'import cmath\n'), ((1230, 1249), 'cmath.exp', 'cmath.exp', (['(1.0j * k)'], {}), '(1.0j * k)\n', (1239, 1249), False, 'import cmath\n'), ((1545, 1555), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1552, 1555), True, 'import numpy as np\n'), ((1604, 1614), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1611, 1614), True, 'import numpy as np\n'), ((1397, 1416), 'math.pow', 'math.pow', (['(v * t1)', '(2)'], {}), '(v * t1, 2)\n', (1405, 1416), False, 'import math\n'), ((1414, 1433), 'math.pow', 'math.pow', (['(w * t2)', '(2)'], {}), '(w * t2, 2)\n', (1422, 1433), False, 'import math\n'), ((1447, 1456), 'numpy.cos', 'np.cos', (['k'], {}), '(k)\n', (1453, 1456), True, 'import numpy as np\n'), ((1484, 1500), 'numpy.exp', 'np.exp', (['(1.0j * k)'], {}), '(1.0j * k)\n', (1490, 1500), True, 'import numpy as np\n'), ((2218, 2238), 'scipy.linalg.inv', 'scipy.linalg.inv', (['H0'], {}), '(H0)\n', (2234, 2238), False, 'import scipy\n')] |
"""
# A tutorial about Label Images in ANTsPy
In ANTsPy, we have a special class for dealing with what I call
"Label Images" - a brain image where each pixel/voxel is associated with
a specific label. For instance, an atlas or parcellation is the prime example
of a label image. But `LabelImage` types dont <i>just</i> have labels... they
also can have real values associated with those labels. For instance, suppose
you have a set of Cortical Thickness values derived from an atlas, and you want
to assign those regional values *back* onto an actual brain image for plotting
or to perform analysis tasks which require some notion of spatial location.
`LabelImage` types let you do this.
Basically, to create a label image in *ANTsPy*, you need two things (one is
optional but highly recommended):
- a discrete atlas image (a normal `ANTsImage` type)
- (optionally) a pandas dataframe or python dictionary with a mapping
from discrete values in the atlas image to string atlas labels
This tutorial will show you all the beautiful things you can do with `LabelImage` types.
"""
"""
## A simple example
We will start with a simple example to demonstrate label images - a 2D square
with four regions
"""
import ants
import os
import numpy as np
import pandas as pd
# create discrete image
square = np.zeros((20,20))
square[:10,:10] = 0
square[:10,10:] = 1
square[10:,:10] = 2
square[10:,10:] = 3
# create regular ANTsImage from numpy array
img = ants.from_numpy(square).astype('uint8')
# plot image
#img.plot(cmap=None)
"""
Above, we created our discrete "atlas" image. Next, we will
create a dictionary containing the names for each value in
the atlas. We will make simple names.
"""
label_df = np.asarray([['TopRight', 'Right', 'Top'],
['BottomRight', 'Right', 'Bottom'],
['TopLeft', 'Left', 'Top'],
['BottomLeft', 'Left', 'Bottom']])
label_df = pd.DataFrame(label_df, index=[1,2,3,4],
columns=['Quadrant', 'Right/Left', 'Top/Bottom'])
atlas = ants.LabelImage(label_image=img, label_info=label_df)
"""
You can index a label image like a dictionary, and it will return
the unique image values corresponding to that label, or more than
one if appropriate.
"""
up_right_idx = atlas['UpperRight']
print(up_right_idx) # should be 1
right_idxs = atlas['Right']
print(right_idxs) # should be [1, 2]
"""
## A real example
Now that we have the basics of the `ants.LabelImage` class down, we
can move on to a real example to show how this would work in practice.
In this example, we have a Freesurfer atlas (the Desikan-killany atlas,
aka "aparc+aseg.mgz") and a data frame of aggregated cortical thickness values
for a subset of those regions for a collection of subjects.
Our first task is to create a LabelImage for this atlas.
"""
"""
We start by loading in the label info as a pandas dataframe
"""
proc_dir = '/users/ncullen/desktop/projects/tadpole/data/processed/'
raw_dir = '/users/ncullen/desktop/projects/tadpole/data/raw/'
label_df = pd.read_csv(os.path.join(proc_dir, 'UCSF_FS_Map.csv'), index_col=0)
print(label_df.head())
"""
As you can see, the label dataframe has the the atlas values as the dataframe
index and a set of columns with different labels for each index.
Next, we load in the discrete atlas image.
"""
atlas_img = ants.image_read(os.path.join(raw_dir, 'freesurfer/aparc+aseg.mgz')).astype('uint32')
atlas_img.plot()
label_img = ants.LabelImage(image=atlas_img, info=label_df)
"""
Let's see this in action on a template
"""
t1_img = ants.image_read(os.path.join(raw_dir,'freesurfer/T1.mgz'))
t1_img.plot()
# set the label image
t1_img.set_label_image(atlas_img)
"""
Our second task is create an image for each subject that fills in the brain
region locations with the associated region's cortical thickness
"""
data = pd.read_csv(os.path.join())
| [
"pandas.DataFrame",
"numpy.asarray",
"numpy.zeros",
"ants.from_numpy",
"ants.LabelImage",
"os.path.join"
] | [((1316, 1334), 'numpy.zeros', 'np.zeros', (['(20, 20)'], {}), '((20, 20))\n', (1324, 1334), True, 'import numpy as np\n'), ((1719, 1864), 'numpy.asarray', 'np.asarray', (["[['TopRight', 'Right', 'Top'], ['BottomRight', 'Right', 'Bottom'], [\n 'TopLeft', 'Left', 'Top'], ['BottomLeft', 'Left', 'Bottom']]"], {}), "([['TopRight', 'Right', 'Top'], ['BottomRight', 'Right', 'Bottom'\n ], ['TopLeft', 'Left', 'Top'], ['BottomLeft', 'Left', 'Bottom']])\n", (1729, 1864), True, 'import numpy as np\n'), ((1951, 2047), 'pandas.DataFrame', 'pd.DataFrame', (['label_df'], {'index': '[1, 2, 3, 4]', 'columns': "['Quadrant', 'Right/Left', 'Top/Bottom']"}), "(label_df, index=[1, 2, 3, 4], columns=['Quadrant',\n 'Right/Left', 'Top/Bottom'])\n", (1963, 2047), True, 'import pandas as pd\n'), ((2074, 2127), 'ants.LabelImage', 'ants.LabelImage', ([], {'label_image': 'img', 'label_info': 'label_df'}), '(label_image=img, label_info=label_df)\n', (2089, 2127), False, 'import ants\n'), ((3495, 3542), 'ants.LabelImage', 'ants.LabelImage', ([], {'image': 'atlas_img', 'info': 'label_df'}), '(image=atlas_img, info=label_df)\n', (3510, 3542), False, 'import ants\n'), ((3091, 3132), 'os.path.join', 'os.path.join', (['proc_dir', '"""UCSF_FS_Map.csv"""'], {}), "(proc_dir, 'UCSF_FS_Map.csv')\n", (3103, 3132), False, 'import os\n'), ((3616, 3658), 'os.path.join', 'os.path.join', (['raw_dir', '"""freesurfer/T1.mgz"""'], {}), "(raw_dir, 'freesurfer/T1.mgz')\n", (3628, 3658), False, 'import os\n'), ((3902, 3916), 'os.path.join', 'os.path.join', ([], {}), '()\n', (3914, 3916), False, 'import os\n'), ((1465, 1488), 'ants.from_numpy', 'ants.from_numpy', (['square'], {}), '(square)\n', (1480, 1488), False, 'import ants\n'), ((3396, 3446), 'os.path.join', 'os.path.join', (['raw_dir', '"""freesurfer/aparc+aseg.mgz"""'], {}), "(raw_dir, 'freesurfer/aparc+aseg.mgz')\n", (3408, 3446), False, 'import os\n')] |
from skimage.segmentation._watershed import watershed
from i3Deep import utils
import os
from evaluate import evaluate
import numpy as np
from tqdm import tqdm
def compute_predictions(image_path, mask_path, gt_path, save_path, nr_modalities, class_labels):
image_filenames = utils.load_filenames(image_path)[::nr_modalities]
mask_filenames = utils.load_filenames(mask_path)
for i in tqdm(range(len(image_filenames))):
image, affine, spacing, header = utils.load_nifty(image_filenames[i])
mask, _, _, _ = utils.load_nifty(mask_filenames[i])
labels = np.unique(mask)
# labels = labels[labels > 0]
for label in np.flip(labels):
mask[mask == label] = label + 1
mask = mask.astype(np.uint8)
mask = watershed(image=image, markers=mask)
for label in labels:
mask[mask == label + 1] = label
utils.save_nifty(save_path + os.path.basename(mask_filenames[i][:-12] + ".nii.gz"), mask, affine, spacing, header, is_mask=True)
results = evaluate(gt_path, save_path, class_labels)
return results | [
"skimage.segmentation._watershed.watershed",
"numpy.flip",
"os.path.basename",
"evaluate.evaluate",
"i3Deep.utils.load_nifty",
"i3Deep.utils.load_filenames",
"numpy.unique"
] | [((360, 391), 'i3Deep.utils.load_filenames', 'utils.load_filenames', (['mask_path'], {}), '(mask_path)\n', (380, 391), False, 'from i3Deep import utils\n'), ((1059, 1101), 'evaluate.evaluate', 'evaluate', (['gt_path', 'save_path', 'class_labels'], {}), '(gt_path, save_path, class_labels)\n', (1067, 1101), False, 'from evaluate import evaluate\n'), ((288, 320), 'i3Deep.utils.load_filenames', 'utils.load_filenames', (['image_path'], {}), '(image_path)\n', (308, 320), False, 'from i3Deep import utils\n'), ((485, 521), 'i3Deep.utils.load_nifty', 'utils.load_nifty', (['image_filenames[i]'], {}), '(image_filenames[i])\n', (501, 521), False, 'from i3Deep import utils\n'), ((547, 582), 'i3Deep.utils.load_nifty', 'utils.load_nifty', (['mask_filenames[i]'], {}), '(mask_filenames[i])\n', (563, 582), False, 'from i3Deep import utils\n'), ((601, 616), 'numpy.unique', 'np.unique', (['mask'], {}), '(mask)\n', (610, 616), True, 'import numpy as np\n'), ((678, 693), 'numpy.flip', 'np.flip', (['labels'], {}), '(labels)\n', (685, 693), True, 'import numpy as np\n'), ((794, 830), 'skimage.segmentation._watershed.watershed', 'watershed', ([], {'image': 'image', 'markers': 'mask'}), '(image=image, markers=mask)\n', (803, 830), False, 'from skimage.segmentation._watershed import watershed\n'), ((944, 997), 'os.path.basename', 'os.path.basename', (["(mask_filenames[i][:-12] + '.nii.gz')"], {}), "(mask_filenames[i][:-12] + '.nii.gz')\n", (960, 997), False, 'import os\n')] |
# The setup here is similar to Table 8.1 in Watanabe textbook. I don't use his prior for A and B however. He also never specifies how he chose A_0 and B_0
from __future__ import print_function
from torch.distributions.uniform import Uniform
from torch.distributions.normal import Normal
from torch.distributions.multivariate_normal import MultivariateNormal
import sys
import numpy as np
sys.path.append('../')
from main import *
class Args:
dataset = 'reducedrank_synthetic'
sanity_check = True
syntheticsamplesize = 1000
network = 'reducedrank'
posterior_method = 'implicit'
batchsize = 100
epochs = 200
epsilon_mc = 100
pretrainDepochs = 100
trainDepochs = 50
n_hidden_D = 128
num_hidden_layers_D = 2
n_hidden_G = 256
num_hidden_layers_G = 1
lr_primal = 1e-3
lr_dual = 5e-2
beta_auto_liberal = False
beta_auto_conservative = False
beta_auto_oracle = False
betasbegin = 0.1
betasend = 1.5
betalogscale = True
numbetas = 10
elasticnet_alpha = 1.0
R = 200
MCs = 10
log_interval = 50
cuda = False
args=Args()
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
def train(args, train_loader, valid_loader):
# get a grid of inverse temperatures [beta_1/log n, \ldots, beta_k/log n]
set_betas(args)
mc = 1
saveimgpath = None
nll_betas_implicit = np.empty(0)
for beta_index in range(args.betas.shape[0]):
# train implicit variational inference
print('Begin training IVI')
G = train_implicitVI(train_loader, valid_loader, args, mc, beta_index, saveimgpath)
print('Finished training IVI')
nllw_array_implicit = approxinf_nll_implicit(train_loader, G, args)
nllw_mean_implicit = sum(nllw_array_implicit)/len(nllw_array_implicit)
nll_betas_implicit = np.append(nll_betas_implicit, nllw_mean_implicit)
ivi_robust, ivi_ols = lsfit_lambda(nll_betas_implicit, args, saveimgname=None)
return ivi_robust
def compute_predictive_dist(args, G, loader):
R = 1000
eps = torch.randn(R, args.epsilon_dim)
sampled_weights = G(eps)
wholex = loader.dataset[:][0]
wholey = loader.dataset[:][1]
list_of_param_dicts = weights_to_dict(args, sampled_weights)
pred_logprob = 0
for param_dict in list_of_param_dicts:
mean = torch.matmul(torch.matmul(wholex, param_dict['a']), param_dict['b'])
pred_logprob += -(args.output_dim*np.log(2 * np.pi) + torch.norm(wholey-mean,dim=1)**2) / 2
return pred_logprob/R
def compute_Bg(pred_logprob,loader,args):
wholex = loader.dataset[:][0]
wholey = loader.dataset[:][1]
mean = torch.matmul(torch.matmul(wholex, args.a_params), args.b_params)
mean_dev = torch.norm(wholey-mean,dim=1)**2
true_logprob = -(args.output_dim * np.log(2 * np.pi) + mean_dev) / 2
return (true_logprob-pred_logprob).mean()
args.input_dim = 6
args.output_dim = 6
args.a_params = torch.randn(args.input_dim,3)
args.b_params = torch.randn(3,args.output_dim)
H0 = torch.matrix_rank(torch.matmul(args.a_params,args.b_params))
Hrange = range(3, 6)
results = []
for H in Hrange:
args.H = H
args.model, args.w_dim = retrieve_model(args)
args.epsilon_dim = args.w_dim
Bg = np.empty(args.MCs)
rlct = np.empty(args.MCs)
for mc in range(0, args.MCs):
train_loader, valid_loader, test_loader = get_dataset_by_id(args, kwargs)
args.betas = [1.0]
beta_index = 0
G = train_implicitVI(train_loader, valid_loader, args, mc, beta_index, saveimgpath=None)
with torch.no_grad():
pred = compute_predictive_dist(args, G, test_loader)
Bg[mc] = compute_Bg(pred, test_loader, args)
rlct[mc] = train(args, train_loader, valid_loader)
print('reduced rank regression model H {}: mc {}: Bg {} rlct {}'.format(H,mc, Bg[mc], rlct[mc]))
print('H: {}'.format(H))
print('E_n Bg(n): {}'.format(Bg.mean()))
print('hat RLCT/n: {}'.format(rlct.mean() / args.syntheticsamplesize))
results.append({'H':H,'E_n Bg(n)': Bg.mean(), 'hat RLCT/n': rlct.mean()/ args.syntheticsamplesize})
with open('generalization_rr.pkl', 'wb') as f:
pickle.dump(results, f) | [
"sys.path.append",
"numpy.empty",
"numpy.append",
"numpy.log"
] | [((392, 414), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (407, 414), False, 'import sys\n'), ((1411, 1422), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (1419, 1422), True, 'import numpy as np\n'), ((3310, 3328), 'numpy.empty', 'np.empty', (['args.MCs'], {}), '(args.MCs)\n', (3318, 3328), True, 'import numpy as np\n'), ((3344, 3362), 'numpy.empty', 'np.empty', (['args.MCs'], {}), '(args.MCs)\n', (3352, 3362), True, 'import numpy as np\n'), ((1873, 1922), 'numpy.append', 'np.append', (['nll_betas_implicit', 'nllw_mean_implicit'], {}), '(nll_betas_implicit, nllw_mean_implicit)\n', (1882, 1922), True, 'import numpy as np\n'), ((2850, 2867), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (2856, 2867), True, 'import numpy as np\n'), ((2489, 2506), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (2495, 2506), True, 'import numpy as np\n')] |
import numpy as np
from scipy import stats
import seaborn as sns
def r2_pearson(x, y):
return stats.pearsonr(x, y)[0] ** 2
def r_pearson(x, y):
return stats.pearsonr(x, y)[0]
def r2_spearman(x, y):
return stats.spearmanr(x, y)[0] ** 2
def r_spearman(x, y):
return stats.spearmanr(x, y)[0]
def plot_jointplot(
x,
y,
df,
kind="hex",
correlation="spearman",
logx=False,
logy=False,
reg_line_color="#e34a33",
point_color="#4CB391",
alpha=0.5,
):
if logx:
df["log({})".format(x)] = np.log10(df[x] + 1)
x = "log({})".format(x)
if logy:
df["log({})".format(y)] = np.log10(df[y] + 1)
y = "log({})".format(y)
if correlation not in ["spearman", "pearson"]:
raise Exception("Valid correlation: pearson, spearman")
if correlation == "spearman":
r = r_spearman
else:
r = r_pearson
if kind == "hex":
g = sns.jointplot(
x=x, y=y, data=df, kind="hex", color=point_color, height=8, stat_func=r
)
else:
g = sns.jointplot(
x=x, y=y, data=df, color=point_color, height=8, stat_func=r, alpha=alpha
)
g = g.annotate(
r,
template="{stat}: {val:.2f}",
stat="$R$ {}".format(correlation),
loc="upper right",
fontsize=18,
frameon=False,
)
sns.regplot(x, y, data=df, ax=g.ax_joint, scatter=False, color=reg_line_color)
return g
| [
"scipy.stats.spearmanr",
"scipy.stats.pearsonr",
"seaborn.regplot",
"seaborn.jointplot",
"numpy.log10"
] | [((1381, 1459), 'seaborn.regplot', 'sns.regplot', (['x', 'y'], {'data': 'df', 'ax': 'g.ax_joint', 'scatter': '(False)', 'color': 'reg_line_color'}), '(x, y, data=df, ax=g.ax_joint, scatter=False, color=reg_line_color)\n', (1392, 1459), True, 'import seaborn as sns\n'), ((163, 183), 'scipy.stats.pearsonr', 'stats.pearsonr', (['x', 'y'], {}), '(x, y)\n', (177, 183), False, 'from scipy import stats\n'), ((288, 309), 'scipy.stats.spearmanr', 'stats.spearmanr', (['x', 'y'], {}), '(x, y)\n', (303, 309), False, 'from scipy import stats\n'), ((556, 575), 'numpy.log10', 'np.log10', (['(df[x] + 1)'], {}), '(df[x] + 1)\n', (564, 575), True, 'import numpy as np\n'), ((655, 674), 'numpy.log10', 'np.log10', (['(df[y] + 1)'], {}), '(df[y] + 1)\n', (663, 674), True, 'import numpy as np\n'), ((947, 1037), 'seaborn.jointplot', 'sns.jointplot', ([], {'x': 'x', 'y': 'y', 'data': 'df', 'kind': '"""hex"""', 'color': 'point_color', 'height': '(8)', 'stat_func': 'r'}), "(x=x, y=y, data=df, kind='hex', color=point_color, height=8,\n stat_func=r)\n", (960, 1037), True, 'import seaborn as sns\n'), ((1078, 1169), 'seaborn.jointplot', 'sns.jointplot', ([], {'x': 'x', 'y': 'y', 'data': 'df', 'color': 'point_color', 'height': '(8)', 'stat_func': 'r', 'alpha': 'alpha'}), '(x=x, y=y, data=df, color=point_color, height=8, stat_func=r,\n alpha=alpha)\n', (1091, 1169), True, 'import seaborn as sns\n'), ((100, 120), 'scipy.stats.pearsonr', 'stats.pearsonr', (['x', 'y'], {}), '(x, y)\n', (114, 120), False, 'from scipy import stats\n'), ((223, 244), 'scipy.stats.spearmanr', 'stats.spearmanr', (['x', 'y'], {}), '(x, y)\n', (238, 244), False, 'from scipy import stats\n')] |
import numpy as np
import rospy
from std_msgs.msg import Float64MultiArray, MultiArrayDimension
def main():
rospy.init_node('phonebot_stand', anonymous=True)
cmd_topic = '/phonebot/joints_position_controller/command'
pub = rospy.Publisher(cmd_topic, Float64MultiArray, queue_size=16)
msg = Float64MultiArray()
msg.layout.dim.append(
MultiArrayDimension(
label='',
size=8,
stride=8))
rate = rospy.Rate(10)
t0 = rospy.Time.now()
while not rospy.is_shutdown():
if t0.is_zero():
t0 = rospy.Time.now()
continue
t1 = rospy.Time.now()
dt = (t1-t0).to_sec()
cmd = np.clip(dt / 10.0, 0.0, 1.0) * np.full(8, -1.3)
msg.data = cmd
pub.publish(msg)
rate.sleep()
if __name__ == '__main__':
main()
| [
"numpy.full",
"std_msgs.msg.MultiArrayDimension",
"rospy.Time.now",
"rospy.Publisher",
"rospy.Rate",
"numpy.clip",
"rospy.is_shutdown",
"std_msgs.msg.Float64MultiArray",
"rospy.init_node"
] | [((114, 163), 'rospy.init_node', 'rospy.init_node', (['"""phonebot_stand"""'], {'anonymous': '(True)'}), "('phonebot_stand', anonymous=True)\n", (129, 163), False, 'import rospy\n'), ((237, 297), 'rospy.Publisher', 'rospy.Publisher', (['cmd_topic', 'Float64MultiArray'], {'queue_size': '(16)'}), '(cmd_topic, Float64MultiArray, queue_size=16)\n', (252, 297), False, 'import rospy\n'), ((308, 327), 'std_msgs.msg.Float64MultiArray', 'Float64MultiArray', ([], {}), '()\n', (325, 327), False, 'from std_msgs.msg import Float64MultiArray, MultiArrayDimension\n'), ((461, 475), 'rospy.Rate', 'rospy.Rate', (['(10)'], {}), '(10)\n', (471, 475), False, 'import rospy\n'), ((485, 501), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (499, 501), False, 'import rospy\n'), ((363, 410), 'std_msgs.msg.MultiArrayDimension', 'MultiArrayDimension', ([], {'label': '""""""', 'size': '(8)', 'stride': '(8)'}), "(label='', size=8, stride=8)\n", (382, 410), False, 'from std_msgs.msg import Float64MultiArray, MultiArrayDimension\n'), ((516, 535), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (533, 535), False, 'import rospy\n'), ((630, 646), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (644, 646), False, 'import rospy\n'), ((579, 595), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (593, 595), False, 'import rospy\n'), ((691, 719), 'numpy.clip', 'np.clip', (['(dt / 10.0)', '(0.0)', '(1.0)'], {}), '(dt / 10.0, 0.0, 1.0)\n', (698, 719), True, 'import numpy as np\n'), ((722, 738), 'numpy.full', 'np.full', (['(8)', '(-1.3)'], {}), '(8, -1.3)\n', (729, 738), True, 'import numpy as np\n')] |
from matplotlib import pyplot as plt
import numpy as np
def graph(sac, avg_reward, disc_r):
plt.title("Reward per Epoch")
plt.xlabel("Epoch")
plt.ylabel("Reward")
ls1 = np.linspace(0, len(sac.q1_loss), num=len(avg_reward)).tolist()
avg_rp = np.array(avg_reward)
plt.plot(ls1, avg_rp/np.max(np.abs(avg_rp)), label="Reward")
q1_loss = np.array(sac.q1_loss)
q1_loss = q1_loss/q1_loss.max()
q2_loss = np.array(sac.q2_loss)
q2_loss = q2_loss/q2_loss.max()
p_loss = np.array(sac.p_loss)
p_loss = p_loss/abs(np.max(np.abs(p_loss)))
plt.plot(q1_loss, label="Q1 loss")
plt.plot(q2_loss, label="Q2 loss")
ls2 = np.linspace(0, len(sac.q1_loss), num=len(p_loss)).tolist()
plt.plot(ls2, p_loss, label="P loss")
disc_rp = np.array(disc_r)
disc_rp = disc_rp/np.max(np.abs(disc_rp))
ls3 = np.linspace(0, len(sac.q1_loss), num=len(disc_rp)).tolist()
plt.plot(ls3, disc_rp, label="Discount Reward")
plt.legend()
plt.draw()
plt.pause(0.0001)
plt.clf() | [
"matplotlib.pyplot.title",
"numpy.abs",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.draw",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.pause"
] | [((97, 126), 'matplotlib.pyplot.title', 'plt.title', (['"""Reward per Epoch"""'], {}), "('Reward per Epoch')\n", (106, 126), True, 'from matplotlib import pyplot as plt\n'), ((128, 147), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (138, 147), True, 'from matplotlib import pyplot as plt\n'), ((149, 169), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Reward"""'], {}), "('Reward')\n", (159, 169), True, 'from matplotlib import pyplot as plt\n'), ((255, 275), 'numpy.array', 'np.array', (['avg_reward'], {}), '(avg_reward)\n', (263, 275), True, 'import numpy as np\n'), ((350, 371), 'numpy.array', 'np.array', (['sac.q1_loss'], {}), '(sac.q1_loss)\n', (358, 371), True, 'import numpy as np\n'), ((417, 438), 'numpy.array', 'np.array', (['sac.q2_loss'], {}), '(sac.q2_loss)\n', (425, 438), True, 'import numpy as np\n'), ((484, 504), 'numpy.array', 'np.array', (['sac.p_loss'], {}), '(sac.p_loss)\n', (492, 504), True, 'import numpy as np\n'), ((552, 586), 'matplotlib.pyplot.plot', 'plt.plot', (['q1_loss'], {'label': '"""Q1 loss"""'}), "(q1_loss, label='Q1 loss')\n", (560, 586), True, 'from matplotlib import pyplot as plt\n'), ((588, 622), 'matplotlib.pyplot.plot', 'plt.plot', (['q2_loss'], {'label': '"""Q2 loss"""'}), "(q2_loss, label='Q2 loss')\n", (596, 622), True, 'from matplotlib import pyplot as plt\n'), ((691, 728), 'matplotlib.pyplot.plot', 'plt.plot', (['ls2', 'p_loss'], {'label': '"""P loss"""'}), "(ls2, p_loss, label='P loss')\n", (699, 728), True, 'from matplotlib import pyplot as plt\n'), ((741, 757), 'numpy.array', 'np.array', (['disc_r'], {}), '(disc_r)\n', (749, 757), True, 'import numpy as np\n'), ((871, 918), 'matplotlib.pyplot.plot', 'plt.plot', (['ls3', 'disc_rp'], {'label': '"""Discount Reward"""'}), "(ls3, disc_rp, label='Discount Reward')\n", (879, 918), True, 'from matplotlib import pyplot as plt\n'), ((921, 933), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (931, 933), True, 'from matplotlib import pyplot as plt\n'), ((935, 945), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (943, 945), True, 'from matplotlib import pyplot as plt\n'), ((947, 964), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.0001)'], {}), '(0.0001)\n', (956, 964), True, 'from matplotlib import pyplot as plt\n'), ((966, 975), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (973, 975), True, 'from matplotlib import pyplot as plt\n'), ((784, 799), 'numpy.abs', 'np.abs', (['disc_rp'], {}), '(disc_rp)\n', (790, 799), True, 'import numpy as np\n'), ((305, 319), 'numpy.abs', 'np.abs', (['avg_rp'], {}), '(avg_rp)\n', (311, 319), True, 'import numpy as np\n'), ((533, 547), 'numpy.abs', 'np.abs', (['p_loss'], {}), '(p_loss)\n', (539, 547), True, 'import numpy as np\n')] |
import numpy as np # Linear algebra
import re # Regular expressions
import pandas as pd # Data wrangling
import string
import sys
# Custom dependencies
sys.path.append('/projects/../../PythonNotebooks/model/')
from helper import *
class Translator(object):
def __init__(self, DataFrame, maxlen = 150, one_hot_encode = False):
"""
Constructor.
: DataFrame (pd.DataFrame): structures to be encoded
: maxlen (int, default = 150): activities corresponding to compounds fed into the system
: one_hot_encode (bool): toggle to one-hot encode
"""
self.DataFrame = DataFrame
if isinstance(DataFrame, pd.Series):
self.structures = DataFrame
else:
self.structures = self.DataFrame['SMILES']
self.maxlen = maxlen
self.additional_chars = set()
self.special_char_regex = r'(\[[^\]]*\])'
self.sos, self.eos = '^', '$'
self.ohe = one_hot_encode
def encode(self):
"""
Encodes SMILES strings
"""
translated = [[self.dictionary[atom] for atom in molecule] for molecule in self.smiles]
if self.ohe:
translated = [self.one_hot_encode(molecule) for molecule in translated]
#ranslated = [np.asarray(i) for i in translated]
self.smiles = pd.Series(translated).map(lambda x: np.asarray(x))
#elf.smiles.append([translated])
def pad(self):
"""
Pads SMILES strings in the given series to the maximum length specified in the constructor.
"""
if self.ohe:
seq_lengths = list(map(len, self.smiles))
self.padded = pd.DataFrame(index = np.arange(len(self.smiles)), columns = ['SMILES'])
# Padding the sequence with 0s at the bottom
for idx, (seq, seqlen) in enumerate(list(zip(self.smiles, seq_lengths))):
seq_smi = np.zeros((self.maxlen, len(self.dictionary)))
seq_smi[:seqlen] = seq
self.padded.iloc[idx,0] = seq_smi
else:
self.padded = self.smiles.map(lambda x: np.pad(x, (0,self.maxlen - len(x)), 'constant'))
def one_hot_encode(self, encoded):
"""
One-hot encodes tokenised SMILES strings
: encoded (list): contains encoded sequences of each SMILES strings
"""
# Correcting translation ex professo for OHE
array = np.array(encoded) - 1
# Creation of matrix of 0s
one_hot_array = np.zeros((array.size, len(self.dictionary)), dtype = np.int16)
# Filling the matrix accordingly
one_hot_array[np.arange(one_hot_array.shape[0]), array.flatten()] = 1
one_hot_array= one_hot_array.reshape((*array.shape, len(self.dictionary)))
return one_hot_array
def _decode(self, dictionary, special_chars_dictionary):
"""
Converts array of integers back into SMILES strings based on the dictionaries provided.
: dictionary (dict): standard dictionary containing the mapping between tokens and integers
: special_chars_dictionary (dict): dictionary containing the mapping between special characters tokens and 'standard tokens'
from the standard dictionary
WARNING, DECODING IS NOT PROVIDED FOR OHE, YET
"""
unpadded = self.structures.map(lambda x: np.trim_zeros(x, 'b'))
# Decoding characters
decoding_dictionary = {v: k for k, v in dictionary.items()}
# (dfernandez): this is the original list comprehension that had been
# omitted for exploration/debugging purposes...
# will recover once the bug is identified
decoded_chars = [[decoding_dictionary[integer] for integer in mol] for mol in unpadded]
# Decoding special characters
decoding_special_characters_dictionary = {v: k for k, v in special_chars_dictionary.items()}
decoded = [[decoding_special_characters_dictionary[integer] if integer in decoding_special_characters_dictionary.keys() else integer for integer in mol] for mol in decoded_chars]
decoded = [''.join(i) for i in decoded]
decoded = pd.Series(decoded)
decoded = decoded.str.replace(self.sos,'').str.replace(self.eos,'').str.replace('A','Cl').str.replace('D','Br')
return decoded
def recode(self):
"""
Recodes the SMILES strings, so that each special character corresponds to a single symbol
"""
self._replace_halogen()
self._sos_and_eos()
def _replace_halogen(self):
"""
Replaces halogen atoms with single-characters.
"""
self.smiles = self.structures.str.replace('Cl','A').str.replace('Br','D')
def recode_special_chars(self):
"""
Identifies special characters and replaces them with single-characters.
"""
# Recoding
self.smiles = self.smiles.map(lambda x: list(filter(None,re.split(self.special_char_regex, x)))) # filtering to avoid '' after split
recoded = [[self.special_chars_dictionary[char] if char in self.special_chars_dictionary.keys() else char for char in chars] for chars in self.smiles]
recoded = [''.join(i) for i in recoded]
self.smiles = pd.Series(recoded)
def _sos_and_eos(self):
"""
Adds Start Of Sequence and End Of Sequence characters to a given SMILES string.
"""
# Generate SOS and EOS characters from special_chars list generated in recode_special_chars function
self.smiles = self.sos + self.smiles + self.eos
def add_characters(self, chars):
"""
Add characters to current vocabulary
: chars (set) : characters to add
"""
# Collect characters
for char in chars:
self.additional_chars.add(char)
char_list = list(self.additional_chars)
# Sort list of characters
char_list.sort()
self.chars = char_list
# Generate dictionaries
self.int2token = dict(enumerate(self.chars, 1))
self.dictionary = {token:integer for integer,token in self.int2token.items()}
def generate_special_chars_dictionary(self):
"""
Generates a dictionary mapping special characters to single characters (standard tokens).
"""
current_chars = set(self.chars)
# Generation of new characters for special characters
new_chars = set(string.ascii_letters)
self.new_chars = sorted(new_chars.difference(current_chars))
keys = self.new_chars[:len(self.special_chars)]
# Generation of the dictionary with single-characters and special characters
self.special_chars_dictionary = dict(zip(self.special_chars, keys))
def identify_special_chars(self):
self.chars = self.smiles.str.cat(sep = ',').replace(',','')
# Identification of special characters
self.special_chars = sorted(set(re.compile(self.special_char_regex).findall(self.chars)))
def init_dict(self, special_characters = True):
"""
Takes the series with SMILES to initialise the vocabulary and generate the corresponding dictionaries
"""
# Generate standard dictionary
self.recode()
if special_characters:
# Generate special characters dictionary
self.identify_special_chars()
self.generate_special_chars_dictionary()
self.recode_special_chars()
chars = sorted(set(self.smiles.str.cat(sep = ',').replace(',','')))
self.add_characters(chars)
return self.special_chars_dictionary, self.dictionary
def init_translation(self, standard_dictionary, special_characters_dictionary = None):
"""
Translates SMILES strings into tokens.
: dictionary (dict): standard dictionary containing the mapping between tokens and integers
: special_chars_dictionary (dict, optional): dictionary containing the mapping between special characters tokens and 'standard tokens'
from the standard dictionary
"""
self.dictionary = standard_dictionary
if special_characters_dictionary is not None:
self.special_chars_dictionary = special_characters_dictionary
self.recode()
if special_characters_dictionary is not None:
self.recode_special_chars()
self.encode()
self.pad()
self.translated_DataFrame = pd.DataFrame(self.padded, columns = ['SMILES'])
return self.translated_DataFrame
"""
Implements creation and storing of special characters and standard character dictionaries
"""
import glob
import json
class MasterDictionary(object):
def __call__(self, load = False):
"""
Callable.
"""
# Loading or dumping dictionaries as relevant
self.load_master_dictionary()
if load:
return self.master_standard_dictionary, self.master_special_characters_dictionary
else:
# If master dictionaries are present, update them
if self.exist:
self.update_master_dictionary()
def __init__ (self, new_special_characters_dictionary = None, new_standard_dictionary = None, verbose = 1):
"""
Initialiser.
: new_special_characters_dictionary (dict, optional): special characters dictionary from new dataset
: new_standard_dictionary (dict, optional): standard dictionary from new dataset
: verbose (int): regulates verbosity
"""
self.new_special_characters_dictionary = new_special_characters_dictionary
self.new_standard_dictionary = new_standard_dictionary
self.verbose = verbose
# Toggle to update dictionaries if master dictionaries have already been created
self.exist = False
def update_master_dictionary(self):
"""
Updates both master dictionaries
"""
# Update of special characters must come first
# so that standard dictionary is updated accordingly
self.update_special_characters_dictionary(self.new_special_characters_dictionary, self.master_special_characters_dictionary)
self.update_standard_dictionary(self.new_standard_dictionary, self.master_standard_dictionary)
def load_master_dictionary(self, which = 'both'):
"""
Loads the master dictionaries if existing or dumps them if they have not been created beforehand.
"""
try:
dictionary = self.load_dictionary(which = which)
if isinstance(dictionary, list):
self.master_special_characters_dictionary, self.master_standard_dictionary, = dictionary
else:
if which == 'standard':
self.master_standard_dictionary = dictionary
elif which == 'special_characters':
self.master_special_characters_dictionary = dictionary
self.exist = True
except:
print('No existing master dictionaries!')
print('Saving master dictionaries...')
# Dumping dictionaries for the first time
self.dump_dictionary('master_special_characters_dictionary',self.new_special_characters_dictionary)
self.dump_dictionary('master_standard_dictionary',self.new_standard_dictionary)
print('Done!')
print('Special characters dictionary:\n {}'.format(self.new_special_characters_dictionary))
print('Standard dictionary:\n {}'.format(self.new_standard_dictionary))
def load_dictionary(self, which = str):
if which == 'both':
dictionaries = []
path = '/projects/../../PythonNotebooks/dictionary/'
for filename in glob.glob(os.path.join(path,'*.json')):
with open(filename,'r') as jsonFile:
dictionary = json.load(jsonFile)
dictionaries.append(dictionary)
return dictionaries
else:
if which == 'standard':
dictionary = 'master_standard_dictionary.json'
elif which == 'special_characters':
dictionary = 'master_special_characters_dictionary.json'
with open('/projects/../../PythonNotebooks/dictionary/'+ str(dictionary),'r') as jsonFile:
dictionary = json.load(jsonFile)
return dictionary
@staticmethod
def dump_dictionary(name, dictionary):
"""
Stores dictionaries in a directory.
: name (str): name to assign to the dictionary to be stored
: dictionary (dict): dictionary to store
"""
with open('/projects/../../PythonNotebooks/dictionary/' + str(name) +'.json','w') as jsonFile1:
json.dump(dictionary, jsonFile1, ensure_ascii = False)
def update_special_characters_dictionary(self, new_special_characters_dictionary, special_characters_dictionary):
"""
Updates the special characters dictionary with potential new special characters coming from new datasets
: new_special_characters_dictionary (dict): dictionary generated from the new dataset
: special_characters_dictionary (dict): master special characters dictionary, stored special characters dictionary
: standard_dictionary (dict): standard dictionary, used to avoid repetition of vocabulary
"""
# Getting special characters not present in master dictionary
new_special_characters = set(new_special_characters_dictionary.keys()) - set(special_characters_dictionary.keys())
if len(new_special_characters) != 0:
# In the special characters dictionary, values act as keys
keys = set(special_characters_dictionary.values())
# Keys to use in new dictionary are based on ASCII letters
keys_to_use = set(string.punctuation + string.ascii_letters).difference(keys)
keys_to_use = sorted(keys_to_use.difference(self.master_standard_dictionary.keys()))
# Generating new keys and mixing them with current keys
keys_to_use = keys_to_use[:len(new_special_characters)]
new_keys = keys_to_use + list(keys)
new_keys.sort()
# Collecting new and current special characters
special_characters = list(special_characters_dictionary.keys()) + list(new_special_characters)
special_characters.sort()
# Generating new dictionary
self.master_special_characters_dictionary = dict(zip(special_characters, new_keys))
# Dump the new master special characters dictionary
self.dump_dictionary('master_special_characters_dictionary',self.master_special_characters_dictionary)
print('The special characters dictionary has been updated.')
print('There are {} new special characters'.format(len(new_special_characters)))
print('These new special characters have been incorporated: \n {}'.format(sorted(new_special_characters)))
else:
if self.verbose > 0:
print('No new special characters to add to the master special characters dictionary.')
def update_standard_dictionary(self, new_standard_dictionary, standard_dictionary):
"""
Updates the standard dictionary with standard dictionaries from new datasets
: new_standard_dictionary (dict): dictionary generated from the new dataset
: standard_dictionary (dict): master standard dictionary, stored standard dictionary
"""
new_characters = set(new_standard_dictionary.keys()) - set(standard_dictionary.keys())
# Including new special characters that might have been added
# to the master special characters dictionary
new_special_characters = set(self.master_special_characters_dictionary.values()) - set(standard_dictionary.keys())
new_characters.update(list(new_special_characters))
if len(new_characters) != 0:
current_standard_dictionary_keys = set(standard_dictionary.keys())
# Adding the new characters
current_standard_dictionary_keys.update(new_characters)
# Sorting the new characters
standard_keys = sorted(current_standard_dictionary_keys)
# Generating new dictionary
token2int = dict(enumerate(standard_keys, 1))
self.master_standard_dictionary = {token:integer for integer, token in token2int.items()}
# Dump the new master special characters dictionary
self.dump_dictionary('master_standard_dictionary',self.master_standard_dictionary)
print('The standard dictionary has been updated.')
print('There are {} new characters'.format(len(new_characters)))
print('These new characters have been incorporated: \n {}'.format(sorted(new_characters)))
else:
if self.verbose > 0:
print('No new characters to add to the master standard dictionary.') | [
"sys.path.append",
"json.dump",
"pandas.DataFrame",
"json.load",
"re.split",
"numpy.trim_zeros",
"numpy.asarray",
"numpy.array",
"pandas.Series",
"numpy.arange",
"re.compile"
] | [((153, 210), 'sys.path.append', 'sys.path.append', (['"""/projects/../../PythonNotebooks/model/"""'], {}), "('/projects/../../PythonNotebooks/model/')\n", (168, 210), False, 'import sys\n'), ((4450, 4468), 'pandas.Series', 'pd.Series', (['decoded'], {}), '(decoded)\n', (4459, 4468), True, 'import pandas as pd\n'), ((5653, 5671), 'pandas.Series', 'pd.Series', (['recoded'], {}), '(recoded)\n', (5662, 5671), True, 'import pandas as pd\n'), ((9139, 9184), 'pandas.DataFrame', 'pd.DataFrame', (['self.padded'], {'columns': "['SMILES']"}), "(self.padded, columns=['SMILES'])\n", (9151, 9184), True, 'import pandas as pd\n'), ((2633, 2650), 'numpy.array', 'np.array', (['encoded'], {}), '(encoded)\n', (2641, 2650), True, 'import numpy as np\n'), ((13806, 13858), 'json.dump', 'json.dump', (['dictionary', 'jsonFile1'], {'ensure_ascii': '(False)'}), '(dictionary, jsonFile1, ensure_ascii=False)\n', (13815, 13858), False, 'import json\n'), ((1441, 1462), 'pandas.Series', 'pd.Series', (['translated'], {}), '(translated)\n', (1450, 1462), True, 'import pandas as pd\n'), ((1477, 1490), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (1487, 1490), True, 'import numpy as np\n'), ((2858, 2891), 'numpy.arange', 'np.arange', (['one_hot_array.shape[0]'], {}), '(one_hot_array.shape[0])\n', (2867, 2891), True, 'import numpy as np\n'), ((3617, 3638), 'numpy.trim_zeros', 'np.trim_zeros', (['x', '"""b"""'], {}), "(x, 'b')\n", (3630, 3638), True, 'import numpy as np\n'), ((13344, 13363), 'json.load', 'json.load', (['jsonFile'], {}), '(jsonFile)\n', (13353, 13363), False, 'import json\n'), ((12823, 12842), 'json.load', 'json.load', (['jsonFile'], {}), '(jsonFile)\n', (12832, 12842), False, 'import json\n'), ((5330, 5366), 're.split', 're.split', (['self.special_char_regex', 'x'], {}), '(self.special_char_regex, x)\n', (5338, 5366), False, 'import re\n'), ((7506, 7541), 're.compile', 're.compile', (['self.special_char_regex'], {}), '(self.special_char_regex)\n', (7516, 7541), False, 'import re\n')] |
from typing import Callable, List
import tensorflow as tf
import gudhi
import numpy as np
from distances.euclidean import euclidean_distance
def generate_distance_matrix(point_cloud: np.array,
distance_function: Callable[[np.array, np.array], float] = euclidean_distance):
numpy_distance_matrix = np.zeros((point_cloud.shape[0], point_cloud.shape[0]))
tril_indices = zip(*np.tril_indices(point_cloud.shape[0]))
for x, y in tril_indices:
if x != y:
pairwise_distance = distance_function(point_cloud[x, :], point_cloud[y, :])
numpy_distance_matrix[x, y] = pairwise_distance
return numpy_distance_matrix
def generate_tril_distance_matrix_for_gudhi(distance_matrix):
tril_distance_matrix = []
tril_indices = zip(*np.tril_indices(distance_matrix.shape[0]))
current_row = []
for x, y in tril_indices:
if x == y:
tril_distance_matrix.append(current_row[:])
current_row = []
else:
pairwise_distance = distance_matrix[x, y]
current_row.append(pairwise_distance)
return tril_distance_matrix
def generate_rips_complex(distance_matrix: List[List[float]], max_dimension: int):
assert max_dimension >= 0
rips_complex = gudhi.RipsComplex(distance_matrix=distance_matrix)
simplex_tree = rips_complex.create_simplex_tree(max_dimension=max_dimension + 1)
return simplex_tree
def _create_dict_of_dims_for_pdgm_points(persistence_diagrams):
points_dict = dict()
for pdgm_point in persistence_diagrams:
if pdgm_point[1] not in points_dict:
points_dict[pdgm_point[1]] = [pdgm_point[0]]
else:
points_dict[pdgm_point[1]].append(pdgm_point[0])
return points_dict
def _associate_dimension_to_persistence_pairs(rips_complex_simplex_tree, persistence_diagrams, persistence_pairs):
map_point_per_dgm_to_dim = _create_dict_of_dims_for_pdgm_points(persistence_diagrams)
persistence_pairs_with_dim = []
for pp in persistence_pairs:
birth, death = rips_complex_simplex_tree.filtration(pp[0]), rips_complex_simplex_tree.filtration(pp[1])
dims = map_point_per_dgm_to_dim[(birth, death)]
persistence_pairs_with_dim.append((pp, dims))
return persistence_pairs_with_dim
def compute_persistence(rips_complex_simplex_tree, hom_coeff: int = 2):
persistence_diagrams = rips_complex_simplex_tree.persistence(homology_coeff_field=hom_coeff + 1)
persistence_pairs = rips_complex_simplex_tree.persistence_pairs()
persistence_pairs_with_dim = _associate_dimension_to_persistence_pairs(rips_complex_simplex_tree,
persistence_diagrams, persistence_pairs)
return persistence_diagrams, persistence_pairs_with_dim
def extract_persistence_pairs_with_given_dimension(persistence_pairs, hom_dim: int):
return list(map(lambda pp: pp[0], filter(lambda pp: hom_dim in pp[1], persistence_pairs)))
# Here, we compute v_bar and w_bar for each simplex of each persistence pair, to give the derivative definition
# seen in "A Framework for Differential Calculus on Persistence Barcodes". The ordering is given by the ordering
# yield by Gudhi.
def compute_indices_persistence_pairs(rips_complex, distance_matrix: np.array, persistence_pairs, number_of_points_sampled: int):
indices = []
pers = []
for s1, s2 in persistence_pairs:
if len(s1) != 0 and len(
s2) != 0: # We discard points dying at infinity, specially the max. connected component for H_0 group.
l1, l2 = np.array(s1), np.array(s2)
i1 = [s1[v] for v in np.unravel_index(np.argmax(distance_matrix[l1, :][:, l1]), [len(s1), len(s1)])]
i2 = [s2[v] for v in np.unravel_index(np.argmax(distance_matrix[l2, :][:, l2]), [len(s2), len(s2)])]
pers.append(rips_complex.filtration(s2) - rips_complex.filtration(s1))
indices += i1
indices += i2
# Sort points with distance-to-diagonal
perm = np.argsort(pers)
indices = list(np.reshape(indices, [-1, 4])[perm][::-1, :].flatten())
# Output indices
indices = indices[:4 * number_of_points_sampled] + [0 for _ in range(0, max(0, 4 * number_of_points_sampled - len(indices)))]
return list(np.array(indices, dtype=np.int32))
def get_persistence_diagrams_from_indices(distance_matrix: tf.Tensor, indices: List[List[int]], number_of_points_in_dgm: int):
dgm = tf.reshape(tf.gather_nd(distance_matrix, tf.reshape(indices, [2 * number_of_points_in_dgm, 2])),
[number_of_points_in_dgm, 2])
return dgm
def get_persistence_diagrams_from_indices_correct(distance_matrix: np.array, indices: List[List[int]]):
dgm = []
for birth_idxs, death_idxs in indices:
birth, death = distance_matrix[birth_idxs[0], birth_idxs[1]], distance_matrix[death_idxs[0], death_idxs[1]]
dgm.append([birth, death])
return np.array(dgm)
def get_indices_of_birth_death_persistence_diagrams(distance_matrix,
hom_dim, number_of_points_sampled,
hom_coeff: int = 2):
rips_complex = generate_rips_complex(distance_matrix, hom_dim)
_, persistence_pairs = compute_persistence(rips_complex, hom_coeff)
persistence_pairs_int_dimension = extract_persistence_pairs_with_given_dimension(persistence_pairs,
hom_dim)
indices_for_persistence_pairs = compute_indices_persistence_pairs(rips_complex,
distance_matrix,
persistence_pairs_int_dimension,
number_of_points_sampled)
return indices_for_persistence_pairs
| [
"numpy.tril_indices",
"numpy.argmax",
"tensorflow.reshape",
"numpy.zeros",
"numpy.argsort",
"numpy.array",
"numpy.reshape",
"gudhi.RipsComplex"
] | [((334, 388), 'numpy.zeros', 'np.zeros', (['(point_cloud.shape[0], point_cloud.shape[0])'], {}), '((point_cloud.shape[0], point_cloud.shape[0]))\n', (342, 388), True, 'import numpy as np\n'), ((1282, 1332), 'gudhi.RipsComplex', 'gudhi.RipsComplex', ([], {'distance_matrix': 'distance_matrix'}), '(distance_matrix=distance_matrix)\n', (1299, 1332), False, 'import gudhi\n'), ((4078, 4094), 'numpy.argsort', 'np.argsort', (['pers'], {}), '(pers)\n', (4088, 4094), True, 'import numpy as np\n'), ((4998, 5011), 'numpy.array', 'np.array', (['dgm'], {}), '(dgm)\n', (5006, 5011), True, 'import numpy as np\n'), ((4337, 4370), 'numpy.array', 'np.array', (['indices'], {'dtype': 'np.int32'}), '(indices, dtype=np.int32)\n', (4345, 4370), True, 'import numpy as np\n'), ((413, 450), 'numpy.tril_indices', 'np.tril_indices', (['point_cloud.shape[0]'], {}), '(point_cloud.shape[0])\n', (428, 450), True, 'import numpy as np\n'), ((800, 841), 'numpy.tril_indices', 'np.tril_indices', (['distance_matrix.shape[0]'], {}), '(distance_matrix.shape[0])\n', (815, 841), True, 'import numpy as np\n'), ((4552, 4605), 'tensorflow.reshape', 'tf.reshape', (['indices', '[2 * number_of_points_in_dgm, 2]'], {}), '(indices, [2 * number_of_points_in_dgm, 2])\n', (4562, 4605), True, 'import tensorflow as tf\n'), ((3635, 3647), 'numpy.array', 'np.array', (['s1'], {}), '(s1)\n', (3643, 3647), True, 'import numpy as np\n'), ((3649, 3661), 'numpy.array', 'np.array', (['s2'], {}), '(s2)\n', (3657, 3661), True, 'import numpy as np\n'), ((3712, 3752), 'numpy.argmax', 'np.argmax', (['distance_matrix[l1, :][:, l1]'], {}), '(distance_matrix[l1, :][:, l1])\n', (3721, 3752), True, 'import numpy as np\n'), ((3825, 3865), 'numpy.argmax', 'np.argmax', (['distance_matrix[l2, :][:, l2]'], {}), '(distance_matrix[l2, :][:, l2])\n', (3834, 3865), True, 'import numpy as np\n'), ((4114, 4142), 'numpy.reshape', 'np.reshape', (['indices', '[-1, 4]'], {}), '(indices, [-1, 4])\n', (4124, 4142), True, 'import numpy as np\n')] |
# Copyright 2020, Battelle Energy Alliance, LLC
# ALL RIGHTS RESERVED
"""
Created on Feb. 7, 2020
@author: wangc, mandd
"""
#External Modules------------------------------------------------------------------------------------
import numpy as np
import numpy.ma as ma
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from utils import mathUtils as utils
from utils import InputData, InputTypes
from .ReliabilityBase import ReliabilityBase
#Internal Modules End--------------------------------------------------------------------------------
class BathtubModel(ReliabilityBase):
"""
Bathtub reliability models from:
<NAME>, "A Hazard Rate Model," IEEE Trans. Rel. 29, 150 (1979)
"""
@classmethod
def getInputSpecification(cls):
"""
Collects input specifications for this class.
@ In, None
@ Out, inputSpecs, InputData, specs
"""
inputSpecs = super(BathtubModel, cls).getInputSpecification()
inputSpecs.description = r"""
Bathtub reliability model, see reference "<NAME>. Dhillon, "A Hazard Rate Model," IEEE Trans. Rel. 29, 150 (1979)"
"""
inputSpecs.addSub(InputData.parameterInputFactory('alpha', contentType=InputTypes.InterpretedListType,
descr='Shape parameter'))
inputSpecs.addSub(InputData.parameterInputFactory('beta', contentType=InputTypes.InterpretedListType,
descr='Scale parameter'))
inputSpecs.addSub(InputData.parameterInputFactory('c', contentType=InputTypes.InterpretedListType,
descr='Weight parameter, 0<=c<=1'))
inputSpecs.addSub(InputData.parameterInputFactory('theta', contentType=InputTypes.InterpretedListType,
descr='Scale parameter'))
inputSpecs.addSub(InputData.parameterInputFactory('rho', contentType=InputTypes.InterpretedListType,
descr='Shape parameter'))
inputSpecs.addSub(InputData.parameterInputFactory('Tm', contentType=InputTypes.InterpretedListType,
descr='Mission time'))
return inputSpecs
def __init__(self):
"""
Constructor
@ In, None
@ Out, None
"""
super().__init__()
# shape parameter
self._alpha = None
# scale parameter
self._beta = 1
# c \in [0,1], weight parameter
self._c = 1
# scale parameter
self._theta = 1
# shape parameter
self._rho = 1.
def _handleInput(self, paramInput):
"""
Function to read the portion of the parsed xml input that belongs to this specialized class
and initialize some stuff based on the inputs got
@ In, paramInput, InputData.ParameterInput, the parsed xml input
@ Out, None
"""
super()._handleInput(paramInput)
for child in paramInput.subparts:
if child.getName().lower() == 'alpha':
self.setVariable('_alpha', child.value)
elif child.getName().lower() == 'beta':
self.setVariable('_beta', child.value)
elif child.getName().lower() == 'tm':
self.setVariable('_tm', child.value)
elif child.getName().lower() == 'theta':
self.setVariable('_theta', child.value)
elif child.getName().lower() == 'rho':
self.setVariable('_rho', child.value)
elif child.getName().lower() == 'c':
self.setVariable('_c', child.value)
def initialize(self, inputDict):
"""
Method to initialize this plugin
@ In, inputDict, dict, dictionary of inputs
@ Out, None
"""
super().initialize(inputDict)
def _checkInputParams(self, needDict):
"""
Method to check input parameters
@ In, needDict, dict, dictionary of required parameters
@ Out, None
"""
super()._checkInputParams(needDict)
if '_c' in needDict:
if np.any(needDict['_c']<0) or np.any(needDict['_c']>1):
raise IOError('Variable "{}" should be between [0,1], but "{}" is provided!'.format('_c',needDict['_c']))
def _probabilityFunction(self):
"""
Function to calculate probability
@ In, None
@ Out, pdf, float/numpy.array, the calculated pdf value(s)
"""
alpha = self._alpha
beta = self._beta
c = self._c
rho = self._rho
theta = self._theta
mask = self._tm < self._loc
tm = ma.array(self._tm-self._loc, mask=mask)
term1 = c * alpha * np.power(tm/beta, alpha - 1.)
term2 = (1.-c) * rho * np.power(tm/theta, rho -1.) * np.exp(np.power(tm/theta, alpha))
term3 = -c * beta * np.power(tm/beta, alpha)
term4 = -(1.-c) *(np.exp(np.power(tm/theta, rho)) - 1.)
pdf = (term1+term2) * np.exp(term3+term4)
pdf = pdf.filled(0.)
return pdf
def _cumulativeFunction(self):
"""
Function to calculate cumulative value
@ In, None
@ Out, cdf, float/numpy.array, the calculated cdf value(s)
"""
alpha = self._alpha
beta = self._beta
c = self._c
rho = self._rho
theta = self._theta
mask = self._tm < self._loc
tm = ma.array(self._tm-self._loc, mask=mask)
term3 = -c * beta * np.power(tm/beta, alpha)
term4 = -(1.-c) *(np.exp(np.power(tm/theta, rho)) - 1.)
cdf = 1. - np.exp(term3+term4)
cdf = cdf.filled(0.)
return cdf
def _reliabilityFunction(self):
"""
Function to calculate reliability value
@ In, None
@ Out, rdf, float/numpy.array, the calculated reliability value(s)
"""
alpha = self._alpha
beta = self._beta
c = self._c
rho = self._rho
theta = self._theta
mask = self._tm < self._loc
tm = ma.array(self._tm-self._loc, mask=mask)
term3 = -c * beta * np.power(tm/beta, alpha)
term4 = -(1.-c) *(np.exp(np.power(tm/theta, rho)) - 1.)
rdf = np.exp(term3+term4)
rdf = rdf.filled(1.0)
return rdf
def _failureRateFunction(self):
"""
Function to calculate hazard rate/failure rate
@ In, None
@ Out, frf, float/numpy.array, the calculated failure rate value(s)
"""
alpha = self._alpha
beta = self._beta
c = self._c
rho = self._rho
theta = self._theta
mask = self._tm < self._loc
tm = ma.array(self._tm-self._loc, mask=mask)
term1 = c * alpha * np.power(tm/beta, alpha - 1.)
term2 = (1.-c) * rho * np.power(tm/theta, rho -1.) * np.exp(np.power(tm/theta, alpha))
frf = term1 + term2
frf = frf.filled(0.)
return frf
| [
"utils.InputData.parameterInputFactory",
"numpy.power",
"numpy.any",
"numpy.ma.array",
"numpy.exp"
] | [((4302, 4343), 'numpy.ma.array', 'ma.array', (['(self._tm - self._loc)'], {'mask': 'mask'}), '(self._tm - self._loc, mask=mask)\n', (4310, 4343), True, 'import numpy.ma as ma\n'), ((5006, 5047), 'numpy.ma.array', 'ma.array', (['(self._tm - self._loc)'], {'mask': 'mask'}), '(self._tm - self._loc, mask=mask)\n', (5014, 5047), True, 'import numpy.ma as ma\n'), ((5564, 5605), 'numpy.ma.array', 'ma.array', (['(self._tm - self._loc)'], {'mask': 'mask'}), '(self._tm - self._loc, mask=mask)\n', (5572, 5605), True, 'import numpy.ma as ma\n'), ((5723, 5744), 'numpy.exp', 'np.exp', (['(term3 + term4)'], {}), '(term3 + term4)\n', (5729, 5744), True, 'import numpy as np\n'), ((6126, 6167), 'numpy.ma.array', 'ma.array', (['(self._tm - self._loc)'], {'mask': 'mask'}), '(self._tm - self._loc, mask=mask)\n', (6134, 6167), True, 'import numpy.ma as ma\n'), ((1282, 1396), 'utils.InputData.parameterInputFactory', 'InputData.parameterInputFactory', (['"""alpha"""'], {'contentType': 'InputTypes.InterpretedListType', 'descr': '"""Shape parameter"""'}), "('alpha', contentType=InputTypes.\n InterpretedListType, descr='Shape parameter')\n", (1313, 1396), False, 'from utils import InputData, InputTypes\n'), ((1421, 1534), 'utils.InputData.parameterInputFactory', 'InputData.parameterInputFactory', (['"""beta"""'], {'contentType': 'InputTypes.InterpretedListType', 'descr': '"""Scale parameter"""'}), "('beta', contentType=InputTypes.\n InterpretedListType, descr='Scale parameter')\n", (1452, 1534), False, 'from utils import InputData, InputTypes\n'), ((1559, 1679), 'utils.InputData.parameterInputFactory', 'InputData.parameterInputFactory', (['"""c"""'], {'contentType': 'InputTypes.InterpretedListType', 'descr': '"""Weight parameter, 0<=c<=1"""'}), "('c', contentType=InputTypes.\n InterpretedListType, descr='Weight parameter, 0<=c<=1')\n", (1590, 1679), False, 'from utils import InputData, InputTypes\n'), ((1704, 1818), 'utils.InputData.parameterInputFactory', 'InputData.parameterInputFactory', (['"""theta"""'], {'contentType': 'InputTypes.InterpretedListType', 'descr': '"""Scale parameter"""'}), "('theta', contentType=InputTypes.\n InterpretedListType, descr='Scale parameter')\n", (1735, 1818), False, 'from utils import InputData, InputTypes\n'), ((1843, 1955), 'utils.InputData.parameterInputFactory', 'InputData.parameterInputFactory', (['"""rho"""'], {'contentType': 'InputTypes.InterpretedListType', 'descr': '"""Shape parameter"""'}), "('rho', contentType=InputTypes.\n InterpretedListType, descr='Shape parameter')\n", (1874, 1955), False, 'from utils import InputData, InputTypes\n'), ((1980, 2088), 'utils.InputData.parameterInputFactory', 'InputData.parameterInputFactory', (['"""Tm"""'], {'contentType': 'InputTypes.InterpretedListType', 'descr': '"""Mission time"""'}), "('Tm', contentType=InputTypes.\n InterpretedListType, descr='Mission time')\n", (2011, 2088), False, 'from utils import InputData, InputTypes\n'), ((4366, 4398), 'numpy.power', 'np.power', (['(tm / beta)', '(alpha - 1.0)'], {}), '(tm / beta, alpha - 1.0)\n', (4374, 4398), True, 'import numpy as np\n'), ((4511, 4537), 'numpy.power', 'np.power', (['(tm / beta)', 'alpha'], {}), '(tm / beta, alpha)\n', (4519, 4537), True, 'import numpy as np\n'), ((4622, 4643), 'numpy.exp', 'np.exp', (['(term3 + term4)'], {}), '(term3 + term4)\n', (4628, 4643), True, 'import numpy as np\n'), ((5070, 5096), 'numpy.power', 'np.power', (['(tm / beta)', 'alpha'], {}), '(tm / beta, alpha)\n', (5078, 5096), True, 'import numpy as np\n'), ((5170, 5191), 'numpy.exp', 'np.exp', (['(term3 + term4)'], {}), '(term3 + term4)\n', (5176, 5191), True, 'import numpy as np\n'), ((5628, 5654), 'numpy.power', 'np.power', (['(tm / beta)', 'alpha'], {}), '(tm / beta, alpha)\n', (5636, 5654), True, 'import numpy as np\n'), ((6190, 6222), 'numpy.power', 'np.power', (['(tm / beta)', '(alpha - 1.0)'], {}), '(tm / beta, alpha - 1.0)\n', (6198, 6222), True, 'import numpy as np\n'), ((3814, 3840), 'numpy.any', 'np.any', (["(needDict['_c'] < 0)"], {}), "(needDict['_c'] < 0)\n", (3820, 3840), True, 'import numpy as np\n'), ((3842, 3868), 'numpy.any', 'np.any', (["(needDict['_c'] > 1)"], {}), "(needDict['_c'] > 1)\n", (3848, 3868), True, 'import numpy as np\n'), ((4423, 4454), 'numpy.power', 'np.power', (['(tm / theta)', '(rho - 1.0)'], {}), '(tm / theta, rho - 1.0)\n', (4431, 4454), True, 'import numpy as np\n'), ((4460, 4487), 'numpy.power', 'np.power', (['(tm / theta)', 'alpha'], {}), '(tm / theta, alpha)\n', (4468, 4487), True, 'import numpy as np\n'), ((6247, 6278), 'numpy.power', 'np.power', (['(tm / theta)', '(rho - 1.0)'], {}), '(tm / theta, rho - 1.0)\n', (6255, 6278), True, 'import numpy as np\n'), ((6284, 6311), 'numpy.power', 'np.power', (['(tm / theta)', 'alpha'], {}), '(tm / theta, alpha)\n', (6292, 6311), True, 'import numpy as np\n'), ((4565, 4590), 'numpy.power', 'np.power', (['(tm / theta)', 'rho'], {}), '(tm / theta, rho)\n', (4573, 4590), True, 'import numpy as np\n'), ((5124, 5149), 'numpy.power', 'np.power', (['(tm / theta)', 'rho'], {}), '(tm / theta, rho)\n', (5132, 5149), True, 'import numpy as np\n'), ((5682, 5707), 'numpy.power', 'np.power', (['(tm / theta)', 'rho'], {}), '(tm / theta, rho)\n', (5690, 5707), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 20 09:34:32 2021
@author: jsalm
"""
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 23 14:56:52 2021
@author: jsalm
"""
import tpp_class_ball as tpp
import numpy as np
import os
import matplotlib.pyplot as plt
dirname = os.path.dirname(__file__)
save_bin = os.path.join(dirname,"save_bin")
if __name__ == '__main__':
plt.close('all')
### PARAMS ###
#Arm
plt.close('all')
n = 2
pos1 = -np.pi/4 #start mgain: -pi/4, vx: -pi/4
pos2 = np.pi/4 #start mgain: pi/4, vx: pi/4
vel = 0
l1 = (13)*0.0254
l2 = (12+9)*0.0254
m1 = 2*5.715264/3
m2 = 5.715264/3
damp = 10
#mech
mgain = 0 #start vx: 0; for fb: 150
maxiter = 40
ballcatch = False
#ball
e = 1 #coefficient of restitution
mass = 2
vx = -1.5 #start mgain: -1.5
vy = 0
y = 1
x = 2 #start mgain: 2, vx:2,-2 depending on direction of vel,
theta = 0
### Time ###
Ttot = 3.5 # total time in second
f_s = 500 # sample frequency (samples/s)
t_s = np.arange(0,Ttot,1/f_s)
t_ms = np.arange(0,Ttot*f_s,1)
initial_cond_tpp = [[pos1,pos2],[vel,vel]]
coeff_tpp = [[l1,l2],[m1,m2],damp]
initial_cond_ball = [vx,vy,x,y]
### INIT ###
Armobj = tpp.TpPendulum(n,initial_cond_tpp,coeff_tpp,mgain,t_s,f_s)
mech = tpp.Muscle_Mech(mgain,f_s,t_s)
Ballobj = tpp.Ball(t_s,e,mass,initial_cond_ball)
#### Feedforward ####
# f1adj = 0
# f2adj = 0
# count = 0
# obstacle = [(0,0)]
# while count < maxiter and not ballcatch:
# Ballobj = tpp.Ball(t_s,e,mass,initial_cond_ball)
# Armobj = tpp.TpPendulum(n,initial_cond_tpp,coeff_tpp,mgain,t_s,f_s)
# f1adj,f2adj,ballcatch = mech.ff_iterator(Armobj, Ballobj, maxiter, vx,vy, x, y,f1adj,f2adj, ballcatch)
# count += 1
#### Feedback ####
# xA,yA = Armobj.get_xy()
# obstacle = (xA[0,2],yA[0,2])
# for i in range(0,len(t_s)-1):
# t = [t_s[i],t_s[i+1]]
# dt = abs(t_s[i+1] - t_s[i])
# vx,vy,x,y,ballcatch = Ballobj.simple_ball(vx,vy,x,y,dt,obstacle)
# Xp = Armobj.fbsolve(t,x,y,ballcatch)
# xA,yA = Armobj.get_xy()
# obstacle = (xA[0,2],yA[0,2])
### Iteration Params ###
num = -3
lab = {}
count = 0
iterable = np.arange(num,abs(num)+(abs(num)-num)/10,(abs(num)-num)/10)
coeff_name = 'mgain'
fname = 'mgain vs frequency'
catchcoord = []
finalf1 = []
finalf2 = []
#Loop
for j in iterable:
if j > 0:
x = -2
else:
x = 2
#ITERATE PARAMS: mgain, vx, pos2
initial_cond_tpp = [[pos1,pos2],[vel,vel]]
lab[count] = coeff_name+": "+("%.2f"%j)
#### PASTE FEEDForward HERE ####
f1adj = 0
f2adj = 0
iteri = 0
ballcatch = False
obstacle = [(0,0)]
while iteri < maxiter and not ballcatch:
Ballobj = tpp.Ball(t_s,e,mass,initial_cond_ball)
Armobj = tpp.TpPendulum(n,initial_cond_tpp,coeff_tpp,mgain,t_s,f_s)
f1adj,f2adj,ballcatch = mech.ff_iterator(Armobj, Ballobj, maxiter, j,vy, x, y,f1adj,f2adj, ballcatch)
iteri += 1
if not iteri < maxiter:
print('max iteration hit for: ' + lab[count])
lab[count] = coeff_name+": "+("%.2f"%j)+" NO"
catchcoord.append([Ballobj.storeB[-1][2],Ballobj.storeB[-1][3]])
mech.stack_data(Armobj,Ballobj,ballcatch)
finalf1.append(j+f1adj)
finalf2.append(j+f2adj)
count += 1
##### PASTE FEEDback HERE ####
### INIT ###
#ball
# ballcatch = False
# e = 1 #coefficient of restitution
# mass = 2
# vx = -1.5 #start mgain: -1.5
# vy = 0
# y = 1
# x = 2 #start mgain: 2, vx:2,-2 depending on direction of vel,
# #objects
# Armobj = tpp.TpPendulum(n,initial_cond_tpp,coeff_tpp,j,t_s,f_s)
# Ballobj = tpp.Ball(t_s,e,mass,initial_cond_ball)
# #actual loop
# xA,yA = Armobj.get_xy()
# obstacle = (xA[0,2],yA[0,2])
# for i in range(0,len(t_s)-1):
# t = [t_s[i],t_s[i+1]]
# dt = abs(t_s[i+1] - t_s[i])
# vx,vy,x,y,ballcatch = Ballobj.simple_ball(vx,vy,x,y,dt,obstacle)
# Xp = Armobj.fbsolve(t,x,y,ballcatch)
# xA,yA = Armobj.get_xy()
# obstacle = (xA[0,2],yA[0,2])
# if not ballcatch:
# print('max iteration hit for: ' + lab[count])
# lab[count] = coeff_name+": "+("%.2f"%j)+" NO"
# catchcoord.append([Ballobj.storeB[-1][2],Ballobj.storeB[-1][3]])
# mech.stack_data(Armobj,Ballobj,ballcatch)
# count+=1
continue
Armobj.plot_pendulum_trace()
Ballobj.plot_ball_trace()
mech.plot_iter_traces(lab,catchcoord,coeff_name)
mech.plot_work_energy(iterable,coeff_name)
B = np.stack(Ballobj.storeB)
anim = Armobj.animate_ball_pendulum(B[:,2:])
print(coeff_name+str(np.mean(np.stack(mech.storeW),axis=0)))
print(coeff_name+str(np.std(np.stack(mech.storeW),axis=0)))
print(coeff_name+str(np.mean(np.stack(mech.storeE))/1000))
print(coeff_name+str(np.std(np.stack(mech.storeE))/1000))
# fig = plt.figure('frequency vs {0}'.format(coeff_name))
# plt.subplot(2,1,1)
# plt.plot(iterable,np.stack(frqstore)[:,0],label='neuron 1')
# plt.xlabel('{0}'.format(coeff_name))
# plt.ylabel('frequency of pendulum (Hz)')
# plt.grid(linestyle='--',linewidth='1')
# plt.legend()
# plt.subplot(2,1,2)
# plt.plot(iterable,np.stack(workstore)[:,0],label='neuron 1')
# plt.xlabel('{0}'.format(coeff_name))
# plt.ylabel('work (Nm)')
# plt.legend()
# plt.grid(linestyle='--',linewidth='1')
# plt.tight_layout()
# plt.show()
# fig.savefig(os.path.join(save_bin,'{0}.jpg'.format(fname)),dpi=200,bbox_inches='tight') | [
"numpy.stack",
"tpp_class_ball.Muscle_Mech",
"tpp_class_ball.Ball",
"matplotlib.pyplot.close",
"os.path.dirname",
"numpy.arange",
"tpp_class_ball.TpPendulum",
"os.path.join"
] | [((269, 294), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (284, 294), False, 'import os\n'), ((306, 339), 'os.path.join', 'os.path.join', (['dirname', '"""save_bin"""'], {}), "(dirname, 'save_bin')\n", (318, 339), False, 'import os\n'), ((371, 387), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (380, 387), True, 'import matplotlib.pyplot as plt\n'), ((420, 436), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (429, 436), True, 'import matplotlib.pyplot as plt\n'), ((1058, 1085), 'numpy.arange', 'np.arange', (['(0)', 'Ttot', '(1 / f_s)'], {}), '(0, Ttot, 1 / f_s)\n', (1067, 1085), True, 'import numpy as np\n'), ((1093, 1120), 'numpy.arange', 'np.arange', (['(0)', '(Ttot * f_s)', '(1)'], {}), '(0, Ttot * f_s, 1)\n', (1102, 1120), True, 'import numpy as np\n'), ((1279, 1342), 'tpp_class_ball.TpPendulum', 'tpp.TpPendulum', (['n', 'initial_cond_tpp', 'coeff_tpp', 'mgain', 't_s', 'f_s'], {}), '(n, initial_cond_tpp, coeff_tpp, mgain, t_s, f_s)\n', (1293, 1342), True, 'import tpp_class_ball as tpp\n'), ((1349, 1381), 'tpp_class_ball.Muscle_Mech', 'tpp.Muscle_Mech', (['mgain', 'f_s', 't_s'], {}), '(mgain, f_s, t_s)\n', (1364, 1381), True, 'import tpp_class_ball as tpp\n'), ((1394, 1435), 'tpp_class_ball.Ball', 'tpp.Ball', (['t_s', 'e', 'mass', 'initial_cond_ball'], {}), '(t_s, e, mass, initial_cond_ball)\n', (1402, 1435), True, 'import tpp_class_ball as tpp\n'), ((4994, 5018), 'numpy.stack', 'np.stack', (['Ballobj.storeB'], {}), '(Ballobj.storeB)\n', (5002, 5018), True, 'import numpy as np\n'), ((2983, 3024), 'tpp_class_ball.Ball', 'tpp.Ball', (['t_s', 'e', 'mass', 'initial_cond_ball'], {}), '(t_s, e, mass, initial_cond_ball)\n', (2991, 3024), True, 'import tpp_class_ball as tpp\n'), ((3043, 3106), 'tpp_class_ball.TpPendulum', 'tpp.TpPendulum', (['n', 'initial_cond_tpp', 'coeff_tpp', 'mgain', 't_s', 'f_s'], {}), '(n, initial_cond_tpp, coeff_tpp, mgain, t_s, f_s)\n', (3057, 3106), True, 'import tpp_class_ball as tpp\n'), ((5101, 5122), 'numpy.stack', 'np.stack', (['mech.storeW'], {}), '(mech.storeW)\n', (5109, 5122), True, 'import numpy as np\n'), ((5165, 5186), 'numpy.stack', 'np.stack', (['mech.storeW'], {}), '(mech.storeW)\n', (5173, 5186), True, 'import numpy as np\n'), ((5230, 5251), 'numpy.stack', 'np.stack', (['mech.storeE'], {}), '(mech.storeE)\n', (5238, 5251), True, 'import numpy as np\n'), ((5292, 5313), 'numpy.stack', 'np.stack', (['mech.storeE'], {}), '(mech.storeE)\n', (5300, 5313), True, 'import numpy as np\n')] |
import numpy as np
class LinearReressionGD(object):
'''
Requirement :
Numpy module
'''
def __init__(self,lr,n_iters):
self.lr = lr
self.n_iters = n_iters
def fit(self,X,y) :
self.w_ = np.zeros(1+X.shape[1])
self.cost_ = []
for i in range(self.n_iters):
y_hat = self.activation(X)
error = (y-y_hat)
#update rule
self.w_[1:]+= self.lr* X.T.dot(error)
self.w_[0] += self.lr*error.sum()
#calculate cost
cost = (error**2).sum()/2
cost_.append(cost)
return self
def activation(self,X):
return np.dot(X,self.w_[1:]) + self.w_[0]
def predict(self,X):
return self.activation(X) | [
"numpy.dot",
"numpy.zeros"
] | [((197, 221), 'numpy.zeros', 'np.zeros', (['(1 + X.shape[1])'], {}), '(1 + X.shape[1])\n', (205, 221), True, 'import numpy as np\n'), ((536, 558), 'numpy.dot', 'np.dot', (['X', 'self.w_[1:]'], {}), '(X, self.w_[1:])\n', (542, 558), True, 'import numpy as np\n')] |
#------------------------------------------------------------------------------
# Libraries
#------------------------------------------------------------------------------
# Standard
import numpy as np
import pandas as pd
import more_itertools
# User
from base.base_estimator import BaseCateEstimator
from randomized_experiments import frequentist_inference
from utils.exceptions import CateError
#------------------------------------------------------------------------------
# Treatment Effect Estimator
#------------------------------------------------------------------------------
class TreatmentEffectEstimator(BaseCateEstimator):
# --------------------
# Constructor function
# --------------------
def __init__(self,
verbose=False
):
# Initialize inputs
self.verbose = verbose
# --------------------
# Class variables
# --------------------
# --------------------
# Private functions
# --------------------
# --------------------
# Public functions
# --------------------
def fit(self,Y,W):
# Preprocess data
super().preprocess_data(Y=Y,W=W)
if len(W)>50:
raise Exception(
f"""
Randomization inference considers all possible treatment assignments.
If we have more than 50-100 units, this will be computationally heavy.
Current number of units: {len(W)}
""")
# Instantiate
ate_estimator = frequentist_inference.TreatmentEffectEstimator(use_regression=False)
# Compute tau hat
ate_estimator.fit(Y=Y,W=W)
self.tau_hat = ate_estimator.calculate_average_treatment_effect()
self.tau_w = []
for W_aug in more_itertools.distinct_permutations(np.array(W)):
# Convert to series
W_aug = pd.Series(W_aug)
difference_estimator = frequentist_inference.TreatmentEffectEstimator(use_regression=False)
difference_estimator.fit(Y=Y,W=W_aug)
self.tau_w.append(difference_estimator.calculate_average_treatment_effect()['ate'])
return self
def calculate_heterogeneous_treatment_effect(self):
raise CateError
def calculate_average_treatment_effect(self):
"""
"""
tau_abs = abs(self.tau_hat['ate'])
tau_pvalue = np.mean([abs(tau_hat) >= tau_abs for tau_hat in self.tau_w])
tau_obj = {
"ate":self.tau_hat['ate'],
"se":None,
"p_value":tau_pvalue
}
tau_obj = {**self.tau_hat,**tau_obj}
return tau_obj
| [
"randomized_experiments.frequentist_inference.TreatmentEffectEstimator",
"numpy.array",
"pandas.Series"
] | [((1566, 1634), 'randomized_experiments.frequentist_inference.TreatmentEffectEstimator', 'frequentist_inference.TreatmentEffectEstimator', ([], {'use_regression': '(False)'}), '(use_regression=False)\n', (1612, 1634), False, 'from randomized_experiments import frequentist_inference\n'), ((1870, 1881), 'numpy.array', 'np.array', (['W'], {}), '(W)\n', (1878, 1881), True, 'import numpy as np\n'), ((1936, 1952), 'pandas.Series', 'pd.Series', (['W_aug'], {}), '(W_aug)\n', (1945, 1952), True, 'import pandas as pd\n'), ((2001, 2069), 'randomized_experiments.frequentist_inference.TreatmentEffectEstimator', 'frequentist_inference.TreatmentEffectEstimator', ([], {'use_regression': '(False)'}), '(use_regression=False)\n', (2047, 2069), False, 'from randomized_experiments import frequentist_inference\n')] |
"""
Created on July 2020
Demo for traning a 2D FBSEM net
@author: <NAME>
<EMAIL>
"""
import numpy as np
from matplotlib import pyplot as plt
from geometry.BuildGeometry_v4 import BuildGeometry_v4
from models.deeplib import buildBrainPhantomDataset
# build PET recontruction object
temPath = r'C:\pythonWorkSpace\tmp003'
PET = BuildGeometry_v4('mmr',0.5) #scanner mmr, with radial crop factor of 50%
PET.loadSystemMatrix(temPath,is3d=False)
# get some info of Pet object
print('is3d:',PET.is3d)
print('\nscanner info:', PET.scanner.as_dict())
print('\nimage info:',PET.image.as_dict())
print('\nsinogram info:',PET.sinogram.as_dict())
# this will take hours (5 phantoms, 5 random rotations each, lesion & sinogram simulation, 3 different recon,...)
# see 'buildBrainPhantomDataset' for default values, e.g. count level, psf, no. lesions, lesion size, no. rotations, rotation range,....
# LD/ld stands for low-definition low-dose, HD/hd stands for high-definition high-dose
phanPath = r'C:\phantoms\brainWeb'
save_training_dir = r'C:\MoDL\trainingDatasets\brainweb\2D'
phanType ='brainweb'
phanNumber = np.arange(0,5,1) # use first 5 brainweb phantoms out of 20
buildBrainPhantomDataset(PET, save_training_dir, phanPath, phanType =phanType, phanNumber = phanNumber,is3d = False, num_rand_rotations=5)
# check out the strcuture of the produced datasets, e.g. data-0.npy
d = np.load(save_training_dir+ '\\' + 'data-0.npy',allow_pickle=True).item()
d.keys()
fig, ax = plt.subplots(1,4,figsize=(20,10))
ax[0].imshow(d['mrImg'],cmap='gist_gray'),ax[0].set_title('mrImg',fontsize=20)
ax[1].imshow(d['imgHD'],cmap='gist_gray_r'),ax[1].set_title('imgHD',fontsize=20)
ax[2].imshow(d['imgLD'],cmap='gist_gray_r'),ax[2].set_title('imgLD',fontsize=20)
ax[3].imshow(d['imgLD_psf'],cmap='gist_gray_r'),ax[3].set_title('imgLD_psf',fontsize=20)
fig, ax = plt.subplots(1,2,figsize=(20,10))
ax[0].imshow(d['sinoLD']),ax[0].set_title('sinoLD',fontsize=20)
ax[1].imshow(d['AN']),ax[1].set_title('Atten. factors * Norm. Factors (AN)',fontsize=20)
| [
"numpy.load",
"geometry.BuildGeometry_v4.BuildGeometry_v4",
"numpy.arange",
"matplotlib.pyplot.subplots",
"models.deeplib.buildBrainPhantomDataset"
] | [((335, 363), 'geometry.BuildGeometry_v4.BuildGeometry_v4', 'BuildGeometry_v4', (['"""mmr"""', '(0.5)'], {}), "('mmr', 0.5)\n", (351, 363), False, 'from geometry.BuildGeometry_v4 import BuildGeometry_v4\n'), ((1114, 1132), 'numpy.arange', 'np.arange', (['(0)', '(5)', '(1)'], {}), '(0, 5, 1)\n', (1123, 1132), True, 'import numpy as np\n'), ((1174, 1313), 'models.deeplib.buildBrainPhantomDataset', 'buildBrainPhantomDataset', (['PET', 'save_training_dir', 'phanPath'], {'phanType': 'phanType', 'phanNumber': 'phanNumber', 'is3d': '(False)', 'num_rand_rotations': '(5)'}), '(PET, save_training_dir, phanPath, phanType=\n phanType, phanNumber=phanNumber, is3d=False, num_rand_rotations=5)\n', (1198, 1313), False, 'from models.deeplib import buildBrainPhantomDataset\n'), ((1482, 1518), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(4)'], {'figsize': '(20, 10)'}), '(1, 4, figsize=(20, 10))\n', (1494, 1518), True, 'from matplotlib import pyplot as plt\n'), ((1858, 1894), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(20, 10)'}), '(1, 2, figsize=(20, 10))\n', (1870, 1894), True, 'from matplotlib import pyplot as plt\n'), ((1388, 1455), 'numpy.load', 'np.load', (["(save_training_dir + '\\\\' + 'data-0.npy')"], {'allow_pickle': '(True)'}), "(save_training_dir + '\\\\' + 'data-0.npy', allow_pickle=True)\n", (1395, 1455), True, 'import numpy as np\n')] |
import numpy as np
from glmsingle.ols.make_poly_matrix import (make_polynomial_matrix,
make_projection_matrix)
import warnings
warnings.simplefilter(action="ignore", category=FutureWarning)
def construct_projection_matrix(ntimepoints,
extra_regressors=None,
poly_degs=None):
"""[summary]
Arguments:
data {[type]} -- [description]
design {[type]} -- [description]
Keyword Arguments:
extra_regressors {bool} -- [description] (default: {False})
poly_degs {[type]} -- [description] (default: {np.arange(5)})
Returns:
[type] -- [description]
"""
if poly_degs is None:
poly_degs = np.arange(5)
polynomials = make_polynomial_matrix(ntimepoints, poly_degs)
if extra_regressors is not None and extra_regressors.size > 0:
polynomials = np.c_[polynomials, extra_regressors]
return make_projection_matrix(polynomials)
def whiten_data(data, design, extra_regressors=False, poly_degs=None):
"""[summary]
Arguments:
data {[type]} -- [description]
design {[type]} -- [description]
Keyword Arguments:
extra_regressors {bool} -- [description] (default: {False})
poly_degs {[type]} -- [description] (default: {np.arange(5)})
Returns:
[type] -- [description]
"""
if poly_degs is None:
poly_degs = np.arange(5)
# whiten data
whitened_data = []
whitened_design = []
for i, (y, X) in enumerate(zip(data, design)):
polynomials = make_polynomial_matrix(X.shape[0], poly_degs)
if extra_regressors:
if extra_regressors[i].any():
polynomials = np.c_[polynomials, extra_regressors[i]]
project_matrix = make_projection_matrix(polynomials)
whitened_design.append(project_matrix @ X)
whitened_data.append(project_matrix @ y)
return whitened_data, whitened_design
| [
"numpy.arange",
"glmsingle.ols.make_poly_matrix.make_projection_matrix",
"warnings.simplefilter",
"glmsingle.ols.make_poly_matrix.make_polynomial_matrix"
] | [((172, 234), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (193, 234), False, 'import warnings\n'), ((793, 839), 'glmsingle.ols.make_poly_matrix.make_polynomial_matrix', 'make_polynomial_matrix', (['ntimepoints', 'poly_degs'], {}), '(ntimepoints, poly_degs)\n', (815, 839), False, 'from glmsingle.ols.make_poly_matrix import make_polynomial_matrix, make_projection_matrix\n'), ((979, 1014), 'glmsingle.ols.make_poly_matrix.make_projection_matrix', 'make_projection_matrix', (['polynomials'], {}), '(polynomials)\n', (1001, 1014), False, 'from glmsingle.ols.make_poly_matrix import make_polynomial_matrix, make_projection_matrix\n'), ((761, 773), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (770, 773), True, 'import numpy as np\n'), ((1463, 1475), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (1472, 1475), True, 'import numpy as np\n'), ((1617, 1662), 'glmsingle.ols.make_poly_matrix.make_polynomial_matrix', 'make_polynomial_matrix', (['X.shape[0]', 'poly_degs'], {}), '(X.shape[0], poly_degs)\n', (1639, 1662), False, 'from glmsingle.ols.make_poly_matrix import make_polynomial_matrix, make_projection_matrix\n'), ((1830, 1865), 'glmsingle.ols.make_poly_matrix.make_projection_matrix', 'make_projection_matrix', (['polynomials'], {}), '(polynomials)\n', (1852, 1865), False, 'from glmsingle.ols.make_poly_matrix import make_polynomial_matrix, make_projection_matrix\n')] |
from __future__ import division
import numpy as np
def projectSimplex(v):
# Compute the minimum L2-distance projection of vector v onto the probability simplex
nVars = len(v)
mu = np.sort(v)
mu = mu[::-1]
sm = 0
for j in xrange(nVars):
sm = sm + mu[j]
if mu[j] - (1 / (j + 1)) * (sm - 1) > 0:
row = j + 1
sm_row = sm
theta = (1 / row) * (sm_row - 1)
w = v - theta
w[w < 0] = 0
return w
| [
"numpy.sort"
] | [((190, 200), 'numpy.sort', 'np.sort', (['v'], {}), '(v)\n', (197, 200), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
TRABAJO 2
Nombre Estudiante: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
# Fijamos la semilla
np.random.seed(1)
def simula_unif(N, dim, rango):
return np.random.uniform(rango[0],rango[1],(N,dim))
def simula_gaus(N, dim, sigma):
media = 0
out = np.zeros((N,dim),np.float64)
for i in range(N):
# Para cada columna dim se emplea un sigma determinado. Es decir, para
# la primera columna (eje X) se usará una N(0,sqrt(sigma[0]))
# y para la segunda (eje Y) N(0,sqrt(sigma[1]))
out[i,:] = np.random.normal(loc=media, scale=np.sqrt(sigma), size=dim)
return out
def simula_recta(intervalo):
points = np.random.uniform(intervalo[0], intervalo[1], size=(2, 2))
x1 = points[0,0]
x2 = points[1,0]
y1 = points[0,1]
y2 = points[1,1]
# y = a*x + b
a = (y2-y1)/(x2-x1) # Calculo de la pendiente.
b = y1 - a*x1 # Calculo del termino independiente.
return a, b
##############################################
# Mis funciones auxiliares
############################################
def stop(apartado_siguiente = None):
'''
Detiene la ejecución
si apartado siguiente tiene nombre lo muestra en panalla
'''
input("\n--- Pulsar tecla para continuar ---\n")
#print("\n--- Pulsar tecla para continuar ---\n")
if(apartado_siguiente):
print(apartado_siguiente)
def scatter_plot(x, plot_title):
'''Representa un scatter plot
x es una muestra
'''
plt.clf()
plt.scatter(x[:, 0], x[:, 1], c = 'b')
plt.title(plot_title)
plt.show()
### función visualizar datos
def classified_scatter_plot_simple(x,y, plot_title,etiqueta_x, etiqueta_y, labels, colors):
'''
Dibuja los datos x con sus respectivas etiquetas
y: son las etiquetas posibles que se colorearán
labels: Nombre con el que aparecerán las etiquetas
colors: colores de las diferentes etiquetas
Todo lo dibuja en un gráfico
'''
plt.clf()
for l in labels:
index = [i for i,v in enumerate(y) if v == l]
plt.scatter(x[index, 0], x[index, 1], c = colors[l], label = str(l))
# título
plt.xlabel(etiqueta_x)
plt.ylabel(etiqueta_y)
plt.title(plot_title)
plt.legend()
plt.show()
def classified_scatter_plot(x,y, function, plot_title, labels, colors):
'''
Dibuja los datos x con sus respectivas etiquetas
Dibuja la función: function
y: son las etiquetas posibles que se colorearán
labels: Nombre con el que aparecerán las etiquetas
colors: colores de las diferentes etiquetas
Todo lo dibuja en un gráfico
'''
plt.clf()
for l in labels:
index = [i for i,v in enumerate(y) if v == l]
plt.scatter(x[index, 0], x[index, 1], c = colors[l], label = str(l))
## ejes
xmin, xmax = np.min(x[:, 0]), np.max(x[:, 0])
ymin, ymax = np.min(x[:, 1]), np.max(x[:, 1])
## function plot
spacex = np.linspace(xmin,xmax,100)
spacey = np.linspace(ymin,ymax,100)
z = [[ function(i,j) for i in spacex] for j in spacey ]
plt.contour(spacex,spacey, z, 0, colors=['red'],linewidths=2 )
# título
plt.title(plot_title)
plt.legend()
plt.show()
############################################
# EJERCICIO 1.1: Dibujar una gráfica con la nube de puntos de salida correspondiente
stop('Apartado 1.1.a')
N = 50
dimension = 2
rango_uniforme = [-50, 50]
x = simula_unif(N, dimension, rango_uniforme)
scatter_plot(x, plot_title = f'Nube de puntos uniforme (N = {N}, rango = {rango_uniforme})')
stop('Apartado 1.1.b')
sigma_gauss = [5,7]
x = simula_gaus(N, dimension,sigma_gauss)
scatter_plot(x, plot_title = f'Nube de puntos Gausiana (N = {N}, sigma = {sigma_gauss}) ')
# EJERCICIO 1.2: Dibujar una gráfica con la nube de puntos de salida correspondiente
stop('Apartado 1.2.a')
# La funcion np.sign(0) da 0, lo que nos puede dar problemas
def signo(x):
if x >= 0:
return 1
return -1
def f(x, y, a, b):
return signo(y - a*x - b)
#1.2 a)
stop('Apartado 1.2.a')
# La funcion np.sign(0) da 0, lo que nos puede dar problemas
def signo(x):
if x >= 0:
return 1
return -1
def f(x, y, a, b):
return signo(y - a*x - b)
def f_sin_signo (x,y,a,b):
return y - a*x - b
# datos del problema
rango = [-50, 50]
N = 100
dimension = 2
x = simula_unif(N, dimension,rango)
a,b=simula_recta(rango)
print("Los coeficientes a y b: ", a, b)
y = [ f(v[0],v[1],a,b) for v in x ]
# datos representación
labels = [-1,1]
colors = {-1: 'royalblue', 1: 'limegreen'}
classified_scatter_plot(x,y,
lambda x,y: f_sin_signo(x,y,a,b),
f'Apartado 2.a sin ruido',
labels, colors)
stop('Apartado 1.2.b')
def plot_datos_cuad(X, y, fz, title='Point cloud plot', xaxis='x axis', yaxis='y axis'):
#Preparar datos
min_xy = X.min(axis=0)
max_xy = X.max(axis=0)
border_xy = (max_xy-min_xy)*0.01
#Generar grid de predicciones
xx, yy = np.mgrid[min_xy[0]-border_xy[0]:max_xy[0]+border_xy[0]+0.001:border_xy[0],
min_xy[1]-border_xy[1]:max_xy[1]+border_xy[1]+0.001:border_xy[1]]
grid = np.c_[xx.ravel(), yy.ravel(), np.ones_like(xx).ravel()]
pred_y = fz(grid)
# pred_y[(pred_y>-1) & (pred_y<1)]
pred_y = np.clip(pred_y, -1, 1).reshape(xx.shape)
#Plot
f, ax = plt.subplots(figsize=(8, 6))
contour = ax.contourf(xx, yy, pred_y, 50, cmap='RdBu',vmin=-1, vmax=1)
ax_c = f.colorbar(contour)
ax_c.set_label('$f(x, y)$')
ax_c.set_ticks([-1, -0.75, -0.5, -0.25, 0, 0.25, 0.5, 0.75, 1])
ax.scatter(X[:, 0], X[:, 1], c=y, s=50, linewidth=2,
cmap="RdYlBu", edgecolor='white')
XX, YY = np.meshgrid(np.linspace(round(min(min_xy)), round(max(max_xy)),X.shape[0]),np.linspace(round(min(min_xy)), round(max(max_xy)),X.shape[0]))
positions = np.vstack([XX.ravel(), YY.ravel()])
ax.contour(XX,YY,fz(positions.T).reshape(X.shape[0],X.shape[0]),[0], colors='black')
ax.set(
xlim=(min_xy[0]-border_xy[0], max_xy[0]+border_xy[0]),
ylim=(min_xy[1]-border_xy[1], max_xy[1]+border_xy[1]),
xlabel=xaxis, ylabel=yaxis)
plt.title(title)
plt.show()
def analisis_clasificado(y_obtenida, y_target):
'''
Imprime y devuelve además la precisión
'''
diferencias = (y_target - y_obtenida) / 2 # el dos proviene de que las etiquetas son +1 y -1
positivos_fallados = sum(filter (lambda x: x>0, diferencias))
negativos_fallados = abs(sum(filter (lambda x: x<0, diferencias)))
numero_positivos = sum(filter (lambda x: x>0, y_target))
numero_negativos = abs(sum(filter (lambda x: x<0, y_target)))
porcentaje_positivos_fallados = positivos_fallados /numero_positivos * 100
porcentaje_negativos_fallados = negativos_fallados /numero_negativos * 100
total_fallados = positivos_fallados + negativos_fallados
numero_total = numero_positivos + numero_negativos
porcentaje_fallado_total = total_fallados / numero_total * 100
precision = 100 - porcentaje_fallado_total
print('Resultado clasificación: ')
print(f'\t Positivos fallados {positivos_fallados}',
f' de {numero_positivos},',
f'lo que hace un porcentaje de {porcentaje_positivos_fallados}'
)
print(f'\t Negativos fallados {negativos_fallados}',
f' de {numero_negativos},',
f'lo que hace un porcentaje de {porcentaje_negativos_fallados}'
)
print(f'\t Total fallados {total_fallados}',
f' de {numero_total},',
f'lo que hace un porcentaje de {porcentaje_fallado_total}'
)
print(f'\t La precisión es ',
f' de {100 - porcentaje_fallado_total} %'
)
return precision
def getPrecision(y_obtenida, y_target):
'''
Imprime y devuelve además la precisión
'''
diferencias = (y_target - y_obtenida) / 2 # el dos proviene de que las etiquetas son +1 y -1
fallados = sum( map(abs, diferencias))
total = len(y_target)
precision = (1 - fallados/total)*100
return precision
def noisyVector(y, percent_noisy_data, labels):
'''
y vector sobre el que introducir ruido
q labels: etiquetas sobre las que vamso a introducir ruido
percent_noisy_data: porcentaje de ruido de cada etiqueta , debe ser menro o igual que 100
'''
noisy_y = np.copy(y)
for l in labels:
index = [i for i,v in enumerate(y) if v == l]
np.random.shuffle(index)
len_y = len(index)
size_noisy_data = round((len_y*percent_noisy_data)/ 100 )
for i in index[:size_noisy_data]:
noisy_y[i] *= -1
return noisy_y
porcentaje_ruido = 10 # por ciento
noisy_y = noisyVector(y, porcentaje_ruido, labels)
precision = analisis_clasificado(noisy_y, y)
classified_scatter_plot(x,noisy_y,
lambda x,y: f_sin_signo(x,y,a,b),
f'Apartado 2.b $f$ ruidosa, con precisión del {precision}%',
labels, colors)
stop()
plot_datos_cuad(x,noisy_y,
lambda x: np.array([ f_sin_signo(v[0],v[1],a,b) for v in x]),
title=f'Apartado 2.b $f$ ruidosa, con precisión del {precision}%',
xaxis='x axis', yaxis='y axis')
stop('Apartado 2.c')
funciones = [
lambda x,y: (x -10)** 2 + (y-20)** 2 - 400,
lambda x,y: 0.5*(x +10)** 2 + (y-20)** 2 - 400,
lambda x,y: 0.5*(x -10)** 2 - (y+20)** 2 - 400,
lambda x,y: y - 20* x**2 - 5 * x + 3
]
funciones_en_latex = [
'$f(x,y) = (x-10)^2 + (y -20) ^2 -400$',
'$f(x,y) = 0.5(x +10) ^ 2 + (y-20)^ 2 - 400$',
'$f(x,y) = 0.5(x -10)^ 2 - (y-20)^ 2 - 400$',
'$f(x,y) = y - 20 x^2 - 5 x + 3$'
]
for i in range(len(funciones)):
print(f'\nPara {funciones_en_latex[i]}')
y_ajustado = np.array([ signo( funciones[i](v[0], v[1])) for v in x])
precision = analisis_clasificado(y_ajustado, noisy_y)
# gráficas
classified_scatter_plot(x,noisy_y,
funciones[i],
'Clasificación para '+funciones_en_latex[i] + f' Precisión del {precision}%',
labels, colors)
stop()
plot_datos_cuad(x,y_ajustado,
lambda x: np.array([signo( funciones[i](v[0], v[1])) for v in x]),
title='Clasificación para '+funciones_en_latex[i] + f' Precisión del {precision}%',
xaxis='x axis', yaxis='y axis')
stop()
###############################################################################
###############################################################################
###############################################################################
stop('EJERCICIO 2.1 ALGORITMO PERCEPTRON')
# EJERCICIO 2.1: ALGORITMO PERCEPTRON
def ajusta_PLA(datos, labels, max_iter, vector_inicial):
'''
DATOS DE ENTRADA
datos: matriz donde cada item y etiqueta es una fila
labels: vector de eitquetas +- 1
max_iter: número máximo de iteraciones
valor iniciar del vector (vector fila)
SALIDA
w: ajustado
paso: pasos usados para alcanzar la solución devuelta
'''
w = np.copy(vector_inicial)
w = w.T
optimo = False
paso = 0
while not optimo and paso < max_iter:
optimo = True
paso += 1
for i,x in enumerate(datos):
if signo(x.dot(w)) != labels[i]:
optimo = False
w = w + labels[i]*x.T #transpones
return w , paso
## Funciones para el formateo rápido
to_round = lambda x : float(str('%.3f' % round(x,3)))
def to_round_list(l):
'''
Dada una lista de flotantes, la devuelve redondeada a tres decimales
'''
return list( map(to_round, l))
H = lambda w: (lambda x,y: w[0] + x*w[1] + y*w[2])
print('Ejercicio 2.a.1')
# datos del problema
rango = [-50, 50]
N = 100
dimension = 2
x_redimensionada = np.c_[np.ones(len(x)),x]
print('Apartado 2.a.1 a) vector inicial nulo')
w_inicial = np.array([0.0, 0.0, 0.0])
w_final, pasos = ajusta_PLA(x_redimensionada, y,
max_iter=100, vector_inicial= w_inicial)
print(f'Tras ajustar el vector final es { w_final} tras {pasos} pasos' )
# Comparativas con recta inicial
print(f'Los coeficientes recta originaria y = ax +b son: a = {a}, b = {b}')
print(f'Mientras que los nuestros son a ={-w_final[1]/w_final[2]}, b = {-w_final[0]/w_final[2]}')
stop()
# veamso si ajusta bien
h = H( w_final)
classified_scatter_plot(x,y,
h,
'2.a.1 Ajuste perceptrón, vector nulo',
labels,
colors)
stop()
plot_datos_cuad(x,y,
lambda x: np.array([signo( h(v[0], v[1])) for v in x]),
title='2.a.1 Ajuste perceptrón, vector nulo',
xaxis='x axis', yaxis='y axis')
stop()
veces_experimento = 10
stop(f'Apartado 2.a.1 b) Con vectores aleatorios en [0,1] {veces_experimento}')
sucesion_pasos = []
sucesion_wf = []
sucesion_w0 = []
print('numero_pasos | \t w_0 \t |\t w_f')
for i in range(veces_experimento):
w_0 = simula_unif(3, 1, [0, 1]).reshape(1, -1)[0]
w_f, numero_pasos = ajusta_PLA(x_redimensionada, y,
max_iter=500, vector_inicial= w_0)
sucesion_pasos.append(numero_pasos)
sucesion_wf.append(w_f)
sucesion_w0.append(w_0)
print(numero_pasos, ' | ',
to_round_list(w_0), ' |',
to_round_list(w_f))
stop()
## imprimimos resultados
print('El número de pasos necesario en cada iteración es de:\n', sucesion_pasos)
media = np.mean(sucesion_pasos)
desviacion_tipica = np.std(sucesion_pasos)
print(f'Tiene una media de {media} y desviación típica {desviacion_tipica}')
stop(f'Apartado 2.a.2 ) Repetición experimento con valores de 2b')
iteraciones_maximas = [100, 200, 300]
for j in iteraciones_maximas:
print(f'\nPara max_iter = {j}: ')
sucesion_pasos = []
sucesion_wf = []
print('numero_pasos | \t w_0 \t |\t w_f | \t Precisión (%) ')
print(' --- | --- |--- | --- ')
for i in range(veces_experimento):
w_0 = sucesion_w0[i]
w_f, numero_pasos = ajusta_PLA(x_redimensionada, noisy_y,
max_iter= j,
vector_inicial= w_0)
y_obtenida = [signo((H(w_f))(v[0],v[1])) for v in x]
sucesion_pasos.append(numero_pasos)
sucesion_wf.append(w_f)
print(numero_pasos, ' | ',
to_round_list(w_0), ' |',
to_round_list(w_f), '|',
to_round(getPrecision(y_obtenida, noisy_y)),
' '
)
stop()
stop('\nEjercicio 1.2.b _____ Regresión logística ______\n')
def funcionLogistica(x, w):
s = w.dot(x.T)
return np.exp(s) / (1 + np.exp(s))
def gradienteLogistica(x,y,w):
'''
caso N = 1
'''
return -(y * x)/(1 + np.exp(y * w.dot(x.T)))
def errorRegresionLogistica(x,y,w):
n = len(y)
sumatoria = 0
productorio = y*x.dot(w.T)
for i in productorio:
sumatoria += np.log(1 + np.exp(- i))
return sumatoria / n
def regresionLogistica(x, y, tasa_aprendizaje, w_inicial, max_iteraciones, tolerancia):
'''
Regresión logística(LR) con Gradiente Descendente Estocástico (SGD)
'''
w_final = np.copy(w_inicial)
w_nueva = np.copy(w_inicial)
iteraciones = 0
tolerancia_actual = np.inf
n = len(y)
indices = np.arange(n)
while (iteraciones < max_iteraciones and
tolerancia_actual > tolerancia):
indices = np.random.permutation(indices)
for i in indices:
# tamaño del bath uno
w_nueva = w_nueva - tasa_aprendizaje * gradienteLogistica(x[i],y[i],w_nueva)
tolerancia_actual = np.linalg.norm(w_nueva - w_final)
w_final = np.copy(w_nueva)
iteraciones += 1
return w_final , iteraciones
### 2b Experimento
# Datos iniciales
tamano_muestra_2b = 100
intervalo_2b = [0.0, 2.0]
x_2b = simula_unif( tamano_muestra_2b, 2, intervalo_2b )
x_2b_redimensionado = np.c_[np.ones(tamano_muestra_2b), x_2b]
### Seleccionamos el índice de dos datos aleatorios de la muestra
puntos_recta = np.random.choice(tamano_muestra_2b, 2, replace = False)
#coordenadas de los puntos aleatorios
x_1 = x_2b [puntos_recta[0]][0]
y_1 = x_2b [puntos_recta[0]][1]
x_2 = x_2b [puntos_recta[1]][0]
y_2 = x_2b [puntos_recta[1]][1]
# cálculos recta
pendiente = (y_2 - y_1) / (x_2 - x_1) # m
ordenada_origen = y_2 - pendiente * x_2 # a
# ecuación punto punto pendiente # y = mx + a
recta = lambda x,y : y - (x*pendiente + ordenada_origen)
print(f'La recta que hace de frontera es de la forma y = {pendiente}x + {ordenada_origen}')
# ahora que tenemos la recta, etiquetamos los puntos de la muestra a partir de ella
y_2b = [np.sign( recta(v[0], v[1])) for v in x_2b]
## Mustramos resultado
classified_scatter_plot(x_2b,
y_2b,
recta,
'Nube puntos experimento 2b',
labels, colors)
stop ('Vamos a determinar la w para este caso')
w_2b, epocas = regresionLogistica(x_2b_redimensionado,
y_2b,
tasa_aprendizaje = 0.01,
w_inicial =np.array([0.0 for i in range(3)]),
max_iteraciones = np.inf, # por ser separable convergirá
tolerancia = 0.01)
print(f'El vecto al que converge es {w_2b}, tras {epocas} epocas error del 0.01')
print(f'Recta encontrada de pendiente {-w_2b[1]/w_2b[2]}, ordenada en el origen {-w_2b[0]/w_2b[2]}')
#AÑADIR
E_in = errorRegresionLogistica(x_2b_redimensionado,y_2b,w_2b)
print(f'E_in = {E_in}')
n_test = 1000
x_test = simula_unif( n_test, 2, intervalo_2b )
y_test = np.array([np.sign(recta(v[0], v[1])) for v in x_test])
x_test_redimensionado = np.c_[np.ones(n_test), x_test]
E_out = errorRegresionLogistica(x_test_redimensionado,y_test,w_2b)
print(f'E_out = {E_out}')
h = lambda x,y :np.sign(np.array([1,x,y]).dot(w_2b))
y_test_obtenida = [h(v[0], v[1]) for v in x_test]
analisis_clasificado(y_test_obtenida, y_test)
stop()
classified_scatter_plot(x_test,y_test,
lambda x,y :np.array([1,x,y]).dot(w_2b),
f'Ajuste test regresión logística test, tras {epocas} épocas, $Eout$ = {to_round(E_out)} ',
labels, colors)
repeticiones_experimento = 100
stop(f'Repetimos el experimentos {repeticiones_experimento} veces')
precisiones = np.empty(repeticiones_experimento)
pasos = np.empty(repeticiones_experimento)
errores = np.empty(repeticiones_experimento)
h_experimento = lambda x,y, w:np.sign(np.array([1,x,y]).dot(w))
w_0 = np.zeros(3)
for i in range(repeticiones_experimento):
# generamso los datos
# entrenamiento
x = simula_unif( tamano_muestra_2b, 2, intervalo_2b )
x = np.c_[np.ones(tamano_muestra_2b), x]
y = np.array([np.sign( recta(v[1], v[2])) for v in x])
# test
x_test = simula_unif( n_test, 2, intervalo_2b )
y_test = np.array([np.sign(recta(v[0], v[1])) for v in x_test])
# entrenamos
w, pasos[i] = regresionLogistica(x,
y,
tasa_aprendizaje = 0.01,
w_inicial = w_0,
max_iteraciones = np.inf, # por ser separable convergirá
tolerancia = 0.01)
y_obtenida = [h_experimento(v[0], v[1], w) for v in x_test]
precisiones[i] = getPrecision(y_obtenida, y_test)
errores[i] = errorRegresionLogistica(np.c_[np.ones(n_test), x_test],
y_test,
w)
if i % 10 == 0:
print(f'{i+1} experimentos de {repeticiones_experimento}')
# medias
media_epocas = np.mean(pasos)
desviacion_tipica_epocas = np.std(pasos)
media_errores = np.mean(errores)
desviacion_tipica_errores = np.std(errores)
media_precisiones = np.mean(precisiones)
desviacion_tipica_precisiones = np.std(precisiones)
print('\nResultados sin redondear')
print(f'El número medio de épocas es { media_epocas}, con desviación típica { desviacion_tipica_epocas}')
print(f'El E_out medio es { media_errores}, con desviación típica { desviacion_tipica_errores}')
print(f'La precisión media es {media_precisiones}, con desviación típica {desviacion_tipica_precisiones}')
print('\nResultados redondeados a tres decimales')
print(f'El número medio de épocas es {to_round(media_epocas)}, con desviación típica {to_round(desviacion_tipica_epocas)}')
print(f'El E_out medio es {to_round(media_errores)}, con desviación típica {to_round(desviacion_tipica_errores)}')
print(f'La precisión media es {to_round(media_precisiones)}, con desviación típica {to_round(desviacion_tipica_precisiones)}')
stop('BONUS')
####################################################################################################
# BONUS
####################################################################################################
label8 = 1
label4 = -1
# Funcion para leer los datos
def readData(file_x, file_y):
# Leemos los ficheros
datax = np.load(file_x)
datay = np.load(file_y)
y = []
x = []
# Solo guardamos los datos cuya clase sea la 8 la 4
for i in range(0,datay.size):
if datay[i] == 8 or datay[i] == 4:
if datay[i] == 8:
y.append(label8)
else:
y.append(label4)
x.append(np.array([1, datax[i][0], datax[i][1]]))
x = np.array(x, np.float64)
y = np.array(y, np.float64)
return x, y
# REGRESIÓN LINEAL
def Error(x,y,w):
'''quadratic error
INPUT
x: input data matrix
y: target vector
w: vector to
OUTPUT
quadratic error >= 0
'''
error_times_n = np.float64(np.linalg.norm(x.dot(w) - y.reshape(-1,1))**2)
return np.float64(error_times_n/len(x))
def dError(x,y,w):
''' gradient
OUTPUT
column vector
'''
return 2/len(x)*(x.T.dot(x.dot(w) - y.reshape(-1,1)))
def sgd(x,y, eta = 0.01, max_iter = 1000, batch_size = 32, error=10**(-10)):
'''
Stochastic gradient descent
INPUT
x: data set
y: target vector
eta: learning rate
max_iter
OUTPUT
w: weight vector
'''
#initialize data
w = np.zeros((x.shape[1], 1), np.float64)
n_iterations = 0
len_x = len(x)
x_index = np.arange( len_x )
batch_start = 0
w_error = Error(x,y,w)
while n_iterations < max_iter and w_error > error :
#shuffle and split the same into a sequence of mini-batches
np.random.shuffle(x_index)
for batch_start in range(0, len_x, batch_size):
iter_index = x_index[ batch_start : batch_start + batch_size]
w = w - eta* dError(x[iter_index, :], y[iter_index], w)
n_iterations += 1
w_error = Error(x,y,w)
return w
## Gráfica para comparar dos líneas
def comparaValoresGrafica (x, y_1, y_2, etiqueta_1, etiqueta_2, etiqueta_x, etiqueta_y, titulo):
plt.clf()
plt.title(titulo)
plt.plot(x, y_1, label=etiqueta_1,
linestyle = 'solid', color = 'blue')
plt.plot(x, y_2, label=etiqueta_2,
linestyle = 'solid', color = 'mediumorchid')
plt.xlabel(etiqueta_x)
plt.ylabel(etiqueta_y)
plt.legend()
plt.show()
def muestraTabla (titulos, columnas):
print(' | '.join(titulos) + '\t ')
n = len(titulos)
print( (n-1)*((3*'-') + '|') + 3*'-')
columnas = np.array(columnas)
for f in columnas.T:
print(' | '.join((map(str, map(to_round,f)))) + '\t ')
# Reading training data set
x, y = readData('datos/X_train.npy', 'datos/y_train.npy')
# Reading test data set
x_test, y_test = readData('datos/X_test.npy', 'datos/y_test.npy')
############# Mostramos datos que acabamos de leer
print('Mostramos datos a clasificar:')
classified_scatter_plot_simple(x[:, 1:],y,
'Datos manuscritos a clasificar (para entrenamiento)',
'Intensidad promedio',
'Simetría',
labels, colors)
stop()
classified_scatter_plot_simple(x_test[:, 1:],y_test,
'Datos manuscritos a clasificar (para test)',
'Intensidad promedio',
'Simetría',
labels, colors)
stop('Experimento bonus')
########
batch_sizes =[32] #batch sizes compared in the experiment
n_iterations = [10,20,50,100,200,500,750,1000]
titulos = ['Iteraciones', 'Ein', 'Eout', 'Precision In', 'Precision out']
len_iter = len(n_iterations)
Ein_SGD = np.empty(len_iter)
Eout_SGD = np.empty(len_iter)
accuracy_in_SGD = np.empty(len_iter)
accuracy_out_SGD = np.empty(len_iter)
w_SGD = []
for _batch_size in batch_sizes:
print(f'Para SGD de tamaño de batch {_batch_size}')
for i, iteration in enumerate(n_iterations):
w_SGD = sgd(x,y, eta = 0.01, max_iter = iteration, batch_size = _batch_size)
Ein_SGD[i] = Error(x,y,w_SGD)
Eout_SGD[i] = Error(x_test, y_test, w_SGD)
y_obtenida_entrenamiento =np.sign(x.dot(w_SGD).T)[0]
accuracy_in_SGD[i] = getPrecision(y_obtenida_entrenamiento, y)
y_obtenida = np.sign(x_test.dot(w_SGD).T)[0]
accuracy_out_SGD[i] = getPrecision(y_obtenida, y_test)
### Muestra de los datos
#comparación Ein_SGD Eout_SGD
comparaValoresGrafica (x = n_iterations,
y_1 = Ein_SGD, y_2 = Eout_SGD,
etiqueta_1 = '$E_{in}$', etiqueta_2 ='$E_{out}$',
etiqueta_x = 'Iteraciones',
etiqueta_y = 'Error',
titulo = f'Errors para SGD, tamaño de batch {_batch_size}'
)
comparaValoresGrafica (x = n_iterations,
y_1 = accuracy_in_SGD, y_2 = accuracy_out_SGD,
etiqueta_1 = 'Precisión entrenamiento',
etiqueta_2 ='Precisión test',
etiqueta_x = 'Iteraciones',
etiqueta_y = 'Precisión',
titulo = f'Precisión para SGD, tamaño de batch {_batch_size}'
)
muestraTabla (titulos, [n_iterations, Ein_SGD, Eout_SGD, accuracy_in_SGD, accuracy_out_SGD])
stop()
print('Grafica para clasificación test SGD, con zona de clasificación')
plot_datos_cuad(x_test[:, 1:],y_test,
#lambda x: np.sign( np.c_[np.ones(len(x), x)].dot(w_SGD)),
lambda x: np.array([signo( np.array([1,v[0], v[1]]).dot(w_SGD)) for v in x]),
title=f'Clasificación para SGD {n_iterations[-1]} iteraciones, Eout = {to_round(Eout_SGD[-1])}, precisión {to_round(accuracy_out_SGD[-1])}',
xaxis='Intensidad media', yaxis='Simetría media')
print('\nGrafica para clasificación test SGD, sin zona de clasificación')
stop()
classified_scatter_plot(x_test[:, 1:],y_test,
lambda x,y : np.array([1,x,y]).dot(w_SGD)[0],
f'Clasificación para SGD {n_iterations[-1]} iteraciones, Eout = {to_round(Eout_SGD[-1])}, precisión {to_round(accuracy_out_SGD[-1])}',
labels, colors)
#-------------------------------------------------------
stop('Clasificación para PLA')
# Utilizaremos ahora el algoritmo PLA
Ein_PLA = np.empty(len_iter)
Eout_PLA = np.empty(len_iter)
accuracy_in_PLA = np.empty(len_iter)
accuracy_out_PLA = np.empty(len_iter)
w_PLA = []
for i, iteration in enumerate(n_iterations):
w_PLA , _ = ajusta_PLA(x, y,
max_iter=iteration, vector_inicial= np.zeros(3))
Ein_PLA[i] = Error(x,y,w_PLA)
Eout_PLA[i] = Error(x_test, y_test, w_PLA)
y_obtenida_entrenamiento = np.sign(x.dot(w_PLA))
accuracy_in_PLA[i] = getPrecision(y_obtenida_entrenamiento, y)
y_obtenida = np.sign(x_test.dot(w_PLA))
accuracy_out_PLA[i] = getPrecision(y_obtenida, y_test)
### Muestra de los datos
#comparación Ein_PLA Eout_PLA
comparaValoresGrafica (x = n_iterations,
y_1 = Ein_PLA, y_2 = Eout_PLA,
etiqueta_1 = '$E_{in}$', etiqueta_2 ='$E_{out}$',
etiqueta_x = 'Iteraciones',
etiqueta_y = 'Error',
titulo = f'Errors para PLA'
)
stop()
comparaValoresGrafica (x = n_iterations,
y_1 = accuracy_in_PLA, y_2 = accuracy_out_PLA,
etiqueta_1 = 'Precisión entrenamiento',
etiqueta_2 ='Precisión test',
etiqueta_x = 'Iteraciones',
etiqueta_y = 'Precisión',
titulo = 'Precisión para PLA'
)
stop()
muestraTabla (titulos, [n_iterations, Ein_PLA, Eout_PLA, accuracy_in_PLA, accuracy_out_PLA])
stop()
## Dibujamos gráficas
print('Grafica para clasificación test PLA, con zona de clasificación')
plot_datos_cuad(x_test[:, 1:],y_test,
lambda x: np.array([signo( np.array([1,v[0], v[1]]).dot(w_PLA.T)) for v in x]),
title=f'Clasificación para PLA {n_iterations[-1]} iteraciones, Eout = {to_round(Eout_PLA[-1])}, precisión {to_round(accuracy_out_PLA[-1])}',
xaxis='Intensidad media', yaxis='Simetría media')
stop()
print('\nGrafica para clasificación test PLA, sin zona de clasificación')
classified_scatter_plot(x_test[:, 1:],y_test,
lambda x,y : np.array([1,x,y]).dot(w_PLA.T),
f'Clasificación para PLA {n_iterations[-1]} iteraciones, Eout = {to_round(Eout_PLA[-1])}, precisión {to_round(accuracy_out_PLA[-1])}',
labels, colors)
stop('Comparamos errores del test')
# COMPARAMOS VALORES
comparaValoresGrafica (x = n_iterations,
y_1 = Eout_SGD, y_2 = Eout_PLA,
etiqueta_1 = '$E_{out} SGD$', etiqueta_2 ='$E_{out} PLA$',
etiqueta_x = 'Iteraciones',
etiqueta_y = '$E_{out}$',
titulo = 'Comparativa error $E_{out}$ PLA y SGD'
)
### Implementación de PLA-Pocket
stop('Clasificación para PLA-pocket')
def ajusta_PLA_pocket(x, y, max_iter, vector_inicial):
'''
DATOS DE ENTRADA
x: matriz donde cada item y etiqueta es una fila
y: vector de eitquetas +- 1
max_iter: número máximo de iteraciones
valor iniciar del vector (vector fila)
SALIDA
w: ajustado
paso: pasos usados para alcanzar la solución devuelta
'''
w = np.copy(vector_inicial)
w = w.T
optimo = False
paso = 0
w_mejor = np.copy(vector_inicial)
y_obtenida = np.sign(x.dot(w_mejor))
precision_mejor =getPrecision(y_obtenida, y)
traza_precision = []
while not optimo and paso < max_iter:
optimo = True
paso += 1
for i,v in enumerate(x):
if signo(v.dot(w)) != y[i]:
optimo = False
w = w + y[i]*v.T #transpones
# actualizamso si es mejor (tiene más precisión)
y_obtenida = np.sign(x.dot(w))
precision_nueva = getPrecision(y_obtenida, y)
if(precision_nueva > precision_mejor):
precision_mejor = precision_nueva
w_mejor = w
#traza_precision.append(precision_mejor)
return w_mejor , paso, precision_mejor#traza_precision
# Utilizaremos ahora el algoritmo PLA_POCKET
Ein_PLA_POCKET = np.empty(len_iter)
Eout_PLA_POCKET = np.empty(len_iter)
accuracy_in_PLA_POCKET = np.empty(len_iter)
accuracy_out_PLA_POCKET = np.empty(len_iter)
w_PLA_POCKET = []
for i, iteration in enumerate(n_iterations):
w_PLA_POCKET , _, accuracy_in_PLA_POCKET[i] = ajusta_PLA_pocket(x, y,
max_iter=iteration, vector_inicial= np.zeros(3))
Ein_PLA_POCKET[i] = Error(x,y,w_PLA_POCKET)
Eout_PLA_POCKET[i] = Error(x_test, y_test, w_PLA_POCKET)
y_obtenida = np.sign(x_test.dot(w_PLA_POCKET))
accuracy_out_PLA_POCKET[i] = getPrecision(y_obtenida, y_test)
### Muestra de los datos
#comparación Ein_PLA_POCKET Eout_PLA_POCKET
comparaValoresGrafica (x = n_iterations,
y_1 = Ein_PLA_POCKET, y_2 = Eout_PLA_POCKET,
etiqueta_1 = '$E_{in}$', etiqueta_2 ='$E_{out}$',
etiqueta_x = 'Iteraciones',
etiqueta_y = 'Error',
titulo = f'Errors para PLA_POCKET'
)
stop()
comparaValoresGrafica (x = n_iterations,
y_1 = accuracy_in_PLA_POCKET, y_2 = accuracy_out_PLA_POCKET,
etiqueta_1 = 'Precisión entrenamiento',
etiqueta_2 ='Precisión test',
etiqueta_x = 'Iteraciones',
etiqueta_y = 'Precisión',
titulo = 'Precisión para PLA_POCKET'
)
stop()
muestraTabla (titulos, [n_iterations, Ein_PLA_POCKET, Eout_PLA_POCKET, accuracy_in_PLA_POCKET, accuracy_out_PLA_POCKET])
stop()
## Dibujamos gráficas
print('Grafica para clasificación test PLA_POCKET, con zona de clasificación')
plot_datos_cuad(x_test[:, 1:],y_test,
lambda x: np.array([signo( np.array([1,v[0], v[1]]).dot(w_PLA_POCKET.T)) for v in x]),
title=f'Clasificación para PLA_POCKET {n_iterations[-1]} iteraciones, Eout = {to_round(Eout_PLA_POCKET[-1])}, precisión {to_round(accuracy_out_PLA_POCKET[-1])}',
xaxis='Intensidad media', yaxis='Simetría media')
print('\nGráfica para clasificación test PLA_POCKET, sin zona de clasificación')
classified_scatter_plot(x_test[:, 1:],y_test,
lambda x,y : np.array([1,x,y]).dot(w_PLA_POCKET.T),
f'Clasificación para PLA_POCKET {n_iterations[-1]} iteraciones, Eout = {to_round(Eout_PLA_POCKET[-1])}, precisión {to_round(accuracy_out_PLA_POCKET[-1])}',
labels, colors)
stop('Fin experimento previo')
_eta = 0.01
_error = 0.01
_iteraciones = 50
print('Ajusto primero usando SGD')
print(f'eta = {_eta}, error {_error} y máximo iteraciones {_iteraciones} ')
w_SGD = sgd(x,y, eta = _eta, max_iter = _iteraciones, batch_size = 32,error = _error)
print(f'w obtenido = {w_SGD}')
# analisis de los resultados
Ein_SGD = Error(x,y,w_SGD)
Eout_SGD = Error(x_test, y_test, w_SGD)
y_obtenida_entrenamiento =np.sign(x.dot(w_SGD).T)[0]
accuracy_in_SGD = getPrecision(y_obtenida_entrenamiento, y)
y_obtenida = np.sign(x_test.dot(w_SGD).T)[0]
accuracy_out_SGD = getPrecision(y_obtenida, y_test)
stop('__ Análisis de los resultados__')
print(f'Ein_SGD = {Ein_SGD}')
print(f'Eout_SGD = {Eout_SGD}')
print(f'accuracy_in_SGD = {accuracy_in_SGD}')
print(f'accuracy_out_SGD = {accuracy_out_SGD}')
stop('Procedemos a concatenar al w conseguida con SGD con el Pla-pocket')
print(f'con un máximo de {_iteraciones} iteraciones ')
w_PLA_POCKET, epocas , accuracy_in_PLA_POCKET = ajusta_PLA_pocket(x, y, _iteraciones, w_SGD.T[0])
print(f'w obtenido = {w_PLA_POCKET} tras {epocas} epocas')
# analisis de los resultados
Ein_PLA_POCKET = Error(x,y,w_PLA_POCKET)
Eout_PLA_POCKET = Error(x_test, y_test, w_PLA_POCKET)
y_obtenida = np.sign(x_test.dot(w_PLA_POCKET))
accuracy_out_PLA_POCKET = getPrecision(y_obtenida, y_test)
stop('__ Análisis de los resultados__')
print(f'Ein_PLA_POCKET = {Ein_PLA_POCKET}')
print(f'Etest_PLA_POCKET = {Eout_PLA_POCKET}')
print(f'accuracy_in_PLA_POCKET = {accuracy_in_PLA_POCKET}')
print(f'accuracy_test_PLA_POCKET = {accuracy_out_PLA_POCKET}')
stop('Análisis de la cota')
#datos
N = len(x_test)
H = 3 * 2 ** 64
E_test = Eout_PLA_POCKET
delta = 0.05
dvc = 3 #perceptrón
def hoeffding(N, delta, H, E_test):
return E_test + np.sqrt( 1/(2*N)* np.log( 2*H/delta ))
def vc (N, delta, dvc, E_test):
return E_test + np.sqrt(8/N * np.log(4*((2*N)**dvc +1)/delta))
print(f'N = {N}, H = {H}, dvc = {dvc}, delta = {delta}, E_test = {E_test}')
c_hoeffding = hoeffding(N, delta, H, E_test)
c_vc = vc(N, delta, dvc, E_test)
print(f'Usando desigualdad de Hoeffding E_out <= {c_hoeffding }')
print(f'Usando la generalización VC E_out <= {c_vc }')
print('\n------------------ FIN PRÁCTICA ------------------')
| [
"matplotlib.pyplot.title",
"numpy.load",
"numpy.random.seed",
"matplotlib.pyplot.clf",
"numpy.empty",
"numpy.ones",
"numpy.clip",
"numpy.mean",
"matplotlib.pyplot.contour",
"numpy.arange",
"numpy.exp",
"numpy.linalg.norm",
"numpy.copy",
"numpy.std",
"numpy.max",
"numpy.linspace",
"nu... | [((155, 172), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (169, 172), True, 'import numpy as np\n'), ((13433, 13458), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (13441, 13458), True, 'import numpy as np\n'), ((15143, 15166), 'numpy.mean', 'np.mean', (['sucesion_pasos'], {}), '(sucesion_pasos)\n', (15150, 15166), True, 'import numpy as np\n'), ((15188, 15210), 'numpy.std', 'np.std', (['sucesion_pasos'], {}), '(sucesion_pasos)\n', (15194, 15210), True, 'import numpy as np\n'), ((18277, 18330), 'numpy.random.choice', 'np.random.choice', (['tamano_muestra_2b', '(2)'], {'replace': '(False)'}), '(tamano_muestra_2b, 2, replace=False)\n', (18293, 18330), True, 'import numpy as np\n'), ((20780, 20814), 'numpy.empty', 'np.empty', (['repeticiones_experimento'], {}), '(repeticiones_experimento)\n', (20788, 20814), True, 'import numpy as np\n'), ((20824, 20858), 'numpy.empty', 'np.empty', (['repeticiones_experimento'], {}), '(repeticiones_experimento)\n', (20832, 20858), True, 'import numpy as np\n'), ((20870, 20904), 'numpy.empty', 'np.empty', (['repeticiones_experimento'], {}), '(repeticiones_experimento)\n', (20878, 20904), True, 'import numpy as np\n'), ((20980, 20991), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (20988, 20991), True, 'import numpy as np\n'), ((22250, 22264), 'numpy.mean', 'np.mean', (['pasos'], {}), '(pasos)\n', (22257, 22264), True, 'import numpy as np\n'), ((22293, 22306), 'numpy.std', 'np.std', (['pasos'], {}), '(pasos)\n', (22299, 22306), True, 'import numpy as np\n'), ((22326, 22342), 'numpy.mean', 'np.mean', (['errores'], {}), '(errores)\n', (22333, 22342), True, 'import numpy as np\n'), ((22372, 22387), 'numpy.std', 'np.std', (['errores'], {}), '(errores)\n', (22378, 22387), True, 'import numpy as np\n'), ((22411, 22431), 'numpy.mean', 'np.mean', (['precisiones'], {}), '(precisiones)\n', (22418, 22431), True, 'import numpy as np\n'), ((22465, 22484), 'numpy.std', 'np.std', (['precisiones'], {}), '(precisiones)\n', (22471, 22484), True, 'import numpy as np\n'), ((27594, 27612), 'numpy.empty', 'np.empty', (['len_iter'], {}), '(len_iter)\n', (27602, 27612), True, 'import numpy as np\n'), ((27625, 27643), 'numpy.empty', 'np.empty', (['len_iter'], {}), '(len_iter)\n', (27633, 27643), True, 'import numpy as np\n'), ((27663, 27681), 'numpy.empty', 'np.empty', (['len_iter'], {}), '(len_iter)\n', (27671, 27681), True, 'import numpy as np\n'), ((27702, 27720), 'numpy.empty', 'np.empty', (['len_iter'], {}), '(len_iter)\n', (27710, 27720), True, 'import numpy as np\n'), ((30669, 30687), 'numpy.empty', 'np.empty', (['len_iter'], {}), '(len_iter)\n', (30677, 30687), True, 'import numpy as np\n'), ((30700, 30718), 'numpy.empty', 'np.empty', (['len_iter'], {}), '(len_iter)\n', (30708, 30718), True, 'import numpy as np\n'), ((30738, 30756), 'numpy.empty', 'np.empty', (['len_iter'], {}), '(len_iter)\n', (30746, 30756), True, 'import numpy as np\n'), ((30777, 30795), 'numpy.empty', 'np.empty', (['len_iter'], {}), '(len_iter)\n', (30785, 30795), True, 'import numpy as np\n'), ((35088, 35106), 'numpy.empty', 'np.empty', (['len_iter'], {}), '(len_iter)\n', (35096, 35106), True, 'import numpy as np\n'), ((35126, 35144), 'numpy.empty', 'np.empty', (['len_iter'], {}), '(len_iter)\n', (35134, 35144), True, 'import numpy as np\n'), ((35171, 35189), 'numpy.empty', 'np.empty', (['len_iter'], {}), '(len_iter)\n', (35179, 35189), True, 'import numpy as np\n'), ((35217, 35235), 'numpy.empty', 'np.empty', (['len_iter'], {}), '(len_iter)\n', (35225, 35235), True, 'import numpy as np\n'), ((219, 266), 'numpy.random.uniform', 'np.random.uniform', (['rango[0]', 'rango[1]', '(N, dim)'], {}), '(rango[0], rango[1], (N, dim))\n', (236, 266), True, 'import numpy as np\n'), ((329, 359), 'numpy.zeros', 'np.zeros', (['(N, dim)', 'np.float64'], {}), '((N, dim), np.float64)\n', (337, 359), True, 'import numpy as np\n'), ((750, 808), 'numpy.random.uniform', 'np.random.uniform', (['intervalo[0]', 'intervalo[1]'], {'size': '(2, 2)'}), '(intervalo[0], intervalo[1], size=(2, 2))\n', (767, 808), True, 'import numpy as np\n'), ((1659, 1668), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1666, 1668), True, 'import matplotlib.pyplot as plt\n'), ((1680, 1716), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x[:, 0]', 'x[:, 1]'], {'c': '"""b"""'}), "(x[:, 0], x[:, 1], c='b')\n", (1691, 1716), True, 'import matplotlib.pyplot as plt\n'), ((1728, 1749), 'matplotlib.pyplot.title', 'plt.title', (['plot_title'], {}), '(plot_title)\n', (1737, 1749), True, 'import matplotlib.pyplot as plt\n'), ((1761, 1771), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1769, 1771), True, 'import matplotlib.pyplot as plt\n'), ((2213, 2222), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2220, 2222), True, 'import matplotlib.pyplot as plt\n'), ((2455, 2477), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['etiqueta_x'], {}), '(etiqueta_x)\n', (2465, 2477), True, 'import matplotlib.pyplot as plt\n'), ((2487, 2509), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['etiqueta_y'], {}), '(etiqueta_y)\n', (2497, 2509), True, 'import matplotlib.pyplot as plt\n'), ((2519, 2540), 'matplotlib.pyplot.title', 'plt.title', (['plot_title'], {}), '(plot_title)\n', (2528, 2540), True, 'import matplotlib.pyplot as plt\n'), ((2550, 2562), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2560, 2562), True, 'import matplotlib.pyplot as plt\n'), ((2572, 2582), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2580, 2582), True, 'import matplotlib.pyplot as plt\n'), ((3003, 3012), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3010, 3012), True, 'import matplotlib.pyplot as plt\n'), ((3421, 3449), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(100)'], {}), '(xmin, xmax, 100)\n', (3432, 3449), True, 'import numpy as np\n'), ((3466, 3494), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', '(100)'], {}), '(ymin, ymax, 100)\n', (3477, 3494), True, 'import numpy as np\n'), ((3567, 3630), 'matplotlib.pyplot.contour', 'plt.contour', (['spacex', 'spacey', 'z', '(0)'], {'colors': "['red']", 'linewidths': '(2)'}), "(spacex, spacey, z, 0, colors=['red'], linewidths=2)\n", (3578, 3630), True, 'import matplotlib.pyplot as plt\n'), ((3660, 3681), 'matplotlib.pyplot.title', 'plt.title', (['plot_title'], {}), '(plot_title)\n', (3669, 3681), True, 'import matplotlib.pyplot as plt\n'), ((3691, 3703), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3701, 3703), True, 'import matplotlib.pyplot as plt\n'), ((3713, 3723), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3721, 3723), True, 'import matplotlib.pyplot as plt\n'), ((5986, 6014), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (5998, 6014), True, 'import matplotlib.pyplot as plt\n'), ((6824, 6840), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (6833, 6840), True, 'import matplotlib.pyplot as plt\n'), ((6846, 6856), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6854, 6856), True, 'import matplotlib.pyplot as plt\n'), ((9392, 9402), 'numpy.copy', 'np.copy', (['y'], {}), '(y)\n', (9399, 9402), True, 'import numpy as np\n'), ((12450, 12473), 'numpy.copy', 'np.copy', (['vector_inicial'], {}), '(vector_inicial)\n', (12457, 12473), True, 'import numpy as np\n'), ((17218, 17236), 'numpy.copy', 'np.copy', (['w_inicial'], {}), '(w_inicial)\n', (17225, 17236), True, 'import numpy as np\n'), ((17256, 17274), 'numpy.copy', 'np.copy', (['w_inicial'], {}), '(w_inicial)\n', (17263, 17274), True, 'import numpy as np\n'), ((17396, 17408), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (17405, 17408), True, 'import numpy as np\n'), ((23645, 23660), 'numpy.load', 'np.load', (['file_x'], {}), '(file_x)\n', (23652, 23660), True, 'import numpy as np\n'), ((23671, 23686), 'numpy.load', 'np.load', (['file_y'], {}), '(file_y)\n', (23678, 23686), True, 'import numpy as np\n'), ((23971, 23994), 'numpy.array', 'np.array', (['x', 'np.float64'], {}), '(x, np.float64)\n', (23979, 23994), True, 'import numpy as np\n'), ((24001, 24024), 'numpy.array', 'np.array', (['y', 'np.float64'], {}), '(y, np.float64)\n', (24009, 24024), True, 'import numpy as np\n'), ((24855, 24892), 'numpy.zeros', 'np.zeros', (['(x.shape[1], 1)', 'np.float64'], {}), '((x.shape[1], 1), np.float64)\n', (24863, 24892), True, 'import numpy as np\n'), ((24964, 24980), 'numpy.arange', 'np.arange', (['len_x'], {}), '(len_x)\n', (24973, 24980), True, 'import numpy as np\n'), ((25742, 25751), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (25749, 25751), True, 'import matplotlib.pyplot as plt\n'), ((25761, 25778), 'matplotlib.pyplot.title', 'plt.title', (['titulo'], {}), '(titulo)\n', (25770, 25778), True, 'import matplotlib.pyplot as plt\n'), ((25798, 25865), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_1'], {'label': 'etiqueta_1', 'linestyle': '"""solid"""', 'color': '"""blue"""'}), "(x, y_1, label=etiqueta_1, linestyle='solid', color='blue')\n", (25806, 25865), True, 'import matplotlib.pyplot as plt\n'), ((25897, 25972), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_2'], {'label': 'etiqueta_2', 'linestyle': '"""solid"""', 'color': '"""mediumorchid"""'}), "(x, y_2, label=etiqueta_2, linestyle='solid', color='mediumorchid')\n", (25905, 25972), True, 'import matplotlib.pyplot as plt\n'), ((26005, 26027), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['etiqueta_x'], {}), '(etiqueta_x)\n', (26015, 26027), True, 'import matplotlib.pyplot as plt\n'), ((26037, 26059), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['etiqueta_y'], {}), '(etiqueta_y)\n', (26047, 26059), True, 'import matplotlib.pyplot as plt\n'), ((26069, 26081), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (26079, 26081), True, 'import matplotlib.pyplot as plt\n'), ((26091, 26101), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26099, 26101), True, 'import matplotlib.pyplot as plt\n'), ((26291, 26309), 'numpy.array', 'np.array', (['columnas'], {}), '(columnas)\n', (26299, 26309), True, 'import numpy as np\n'), ((34082, 34105), 'numpy.copy', 'np.copy', (['vector_inicial'], {}), '(vector_inicial)\n', (34089, 34105), True, 'import numpy as np\n'), ((34172, 34195), 'numpy.copy', 'np.copy', (['vector_inicial'], {}), '(vector_inicial)\n', (34179, 34195), True, 'import numpy as np\n'), ((3277, 3292), 'numpy.min', 'np.min', (['x[:, 0]'], {}), '(x[:, 0])\n', (3283, 3292), True, 'import numpy as np\n'), ((3294, 3309), 'numpy.max', 'np.max', (['x[:, 0]'], {}), '(x[:, 0])\n', (3300, 3309), True, 'import numpy as np\n'), ((3332, 3347), 'numpy.min', 'np.min', (['x[:, 1]'], {}), '(x[:, 1])\n', (3338, 3347), True, 'import numpy as np\n'), ((3349, 3364), 'numpy.max', 'np.max', (['x[:, 1]'], {}), '(x[:, 1])\n', (3355, 3364), True, 'import numpy as np\n'), ((9519, 9543), 'numpy.random.shuffle', 'np.random.shuffle', (['index'], {}), '(index)\n', (9536, 9543), True, 'import numpy as np\n'), ((16554, 16563), 'numpy.exp', 'np.exp', (['s'], {}), '(s)\n', (16560, 16563), True, 'import numpy as np\n'), ((17547, 17577), 'numpy.random.permutation', 'np.random.permutation', (['indices'], {}), '(indices)\n', (17568, 17577), True, 'import numpy as np\n'), ((17801, 17834), 'numpy.linalg.norm', 'np.linalg.norm', (['(w_nueva - w_final)'], {}), '(w_nueva - w_final)\n', (17815, 17834), True, 'import numpy as np\n'), ((17863, 17879), 'numpy.copy', 'np.copy', (['w_nueva'], {}), '(w_nueva)\n', (17870, 17879), True, 'import numpy as np\n'), ((18158, 18184), 'numpy.ones', 'np.ones', (['tamano_muestra_2b'], {}), '(tamano_muestra_2b)\n', (18165, 18184), True, 'import numpy as np\n'), ((20076, 20091), 'numpy.ones', 'np.ones', (['n_test'], {}), '(n_test)\n', (20083, 20091), True, 'import numpy as np\n'), ((25201, 25227), 'numpy.random.shuffle', 'np.random.shuffle', (['x_index'], {}), '(x_index)\n', (25218, 25227), True, 'import numpy as np\n'), ((5915, 5937), 'numpy.clip', 'np.clip', (['pred_y', '(-1)', '(1)'], {}), '(pred_y, -1, 1)\n', (5922, 5937), True, 'import numpy as np\n'), ((16571, 16580), 'numpy.exp', 'np.exp', (['s'], {}), '(s)\n', (16577, 16580), True, 'import numpy as np\n'), ((21178, 21204), 'numpy.ones', 'np.ones', (['tamano_muestra_2b'], {}), '(tamano_muestra_2b)\n', (21185, 21204), True, 'import numpy as np\n'), ((30958, 30969), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (30966, 30969), True, 'import numpy as np\n'), ((35446, 35457), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (35454, 35457), True, 'import numpy as np\n'), ((654, 668), 'numpy.sqrt', 'np.sqrt', (['sigma'], {}), '(sigma)\n', (661, 668), True, 'import numpy as np\n'), ((16912, 16922), 'numpy.exp', 'np.exp', (['(-i)'], {}), '(-i)\n', (16918, 16922), True, 'import numpy as np\n'), ((20227, 20246), 'numpy.array', 'np.array', (['[1, x, y]'], {}), '([1, x, y])\n', (20235, 20246), True, 'import numpy as np\n'), ((20443, 20462), 'numpy.array', 'np.array', (['[1, x, y]'], {}), '([1, x, y])\n', (20451, 20462), True, 'import numpy as np\n'), ((20947, 20966), 'numpy.array', 'np.array', (['[1, x, y]'], {}), '([1, x, y])\n', (20955, 20966), True, 'import numpy as np\n'), ((21981, 21996), 'numpy.ones', 'np.ones', (['n_test'], {}), '(n_test)\n', (21988, 21996), True, 'import numpy as np\n'), ((23919, 23958), 'numpy.array', 'np.array', (['[1, datax[i][0], datax[i][1]]'], {}), '([1, datax[i][0], datax[i][1]])\n', (23927, 23958), True, 'import numpy as np\n'), ((32940, 32959), 'numpy.array', 'np.array', (['[1, x, y]'], {}), '([1, x, y])\n', (32948, 32959), True, 'import numpy as np\n'), ((37446, 37465), 'numpy.array', 'np.array', (['[1, x, y]'], {}), '([1, x, y])\n', (37454, 37465), True, 'import numpy as np\n'), ((39616, 39637), 'numpy.log', 'np.log', (['(2 * H / delta)'], {}), '(2 * H / delta)\n', (39622, 39637), True, 'import numpy as np\n'), ((39713, 39753), 'numpy.log', 'np.log', (['(4 * ((2 * N) ** dvc + 1) / delta)'], {}), '(4 * ((2 * N) ** dvc + 1) / delta)\n', (39719, 39753), True, 'import numpy as np\n'), ((5812, 5828), 'numpy.ones_like', 'np.ones_like', (['xx'], {}), '(xx)\n', (5824, 5828), True, 'import numpy as np\n'), ((30275, 30294), 'numpy.array', 'np.array', (['[1, x, y]'], {}), '([1, x, y])\n', (30283, 30294), True, 'import numpy as np\n'), ((32490, 32515), 'numpy.array', 'np.array', (['[1, v[0], v[1]]'], {}), '([1, v[0], v[1]])\n', (32498, 32515), True, 'import numpy as np\n'), ((36969, 36994), 'numpy.array', 'np.array', (['[1, v[0], v[1]]'], {}), '([1, v[0], v[1]])\n', (36977, 36994), True, 'import numpy as np\n'), ((29795, 29820), 'numpy.array', 'np.array', (['[1, v[0], v[1]]'], {}), '([1, v[0], v[1]])\n', (29803, 29820), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cable task."""
import os
import time
import numpy as np
import pybullet as p
from ravens import utils
from ravens.tasks import Task
class Cable(Task):
"""Cable task."""
def __init__(self):
super().__init__()
self.ee = 'suction'
self.max_steps = 20
self.metric = 'zone'
self.primitive = 'pick_place'
def reset(self, env):
self.total_rewards = 0
self.goal = {'places': {}, 'steps': [{}]}
num_parts = 20
radius = 0.005
length = 2 * radius * num_parts * np.sqrt(2)
square_size = (length, length, 0)
square_pose = self.random_pose(env, square_size)
square_template = 'assets/square/square-template.urdf'
replace = {'DIM': (length,), 'HALF': (length / 2 - 0.005,)}
urdf = self.fill_template(square_template, replace)
env.add_object(urdf, square_pose, fixed=True)
os.remove(urdf)
# Add goal line.
# line_template = 'assets/line/line-template.urdf'
self.zone_size = (length, 0.03, 0.2)
zone_range = (self.zone_size[0], self.zone_size[1], 0.001)
zone_position = (0, length / 2, 0.001)
zone_position = utils.apply(square_pose, zone_position)
self.zone_pose = (zone_position, square_pose[1])
# urdf = self.fill_template(line_template, {'DIM': (length,)})
# env.add_object(urdf, self.zone_pose, fixed=True)
# os.remove(urdf)
# Add beaded cable.
distance = length / num_parts
position, _ = self.random_pose(env, zone_range)
position = np.float32(position)
part_shape = p.createCollisionShape(p.GEOM_BOX, halfExtents=[radius] * 3)
part_visual = p.createVisualShape(p.GEOM_SPHERE, radius=radius * 1.5)
# part_visual = p.createVisualShape(p.GEOM_BOX, halfExtents=[radius] * 3)
self.object_points = {}
for i in range(num_parts):
position[2] += distance
part_id = p.createMultiBody(
0.1, part_shape, part_visual, basePosition=position)
if env.objects:
constraint_id = p.createConstraint(
parentBodyUniqueId=env.objects[-1],
parentLinkIndex=-1,
childBodyUniqueId=part_id,
childLinkIndex=-1,
jointType=p.JOINT_POINT2POINT,
jointAxis=(0, 0, 0),
parentFramePosition=(0, 0, distance),
childFramePosition=(0, 0, 0))
p.changeConstraint(constraint_id, maxForce=100)
if (i > 0) and (i < num_parts - 1):
color = utils.COLORS['red'] + [1]
p.changeVisualShape(part_id, -1, rgbaColor=color)
env.objects.append(part_id)
self.object_points[part_id] = np.float32((0, 0, 0)).reshape(3, 1)
true_position = (radius + distance * i - length / 2, 0, 0)
true_position = utils.apply(self.zone_pose, true_position)
self.goal['places'][part_id] = (true_position, (0, 0, 0, 1.))
symmetry = 0 # zone-evaluation: symmetry does not matter
self.goal['steps'][0][part_id] = (symmetry, [part_id])
# Wait for beaded cable to settle.
env.start()
time.sleep(1)
env.pause()
| [
"os.remove",
"pybullet.createMultiBody",
"pybullet.createVisualShape",
"numpy.float32",
"pybullet.createConstraint",
"time.sleep",
"pybullet.changeConstraint",
"ravens.utils.apply",
"pybullet.changeVisualShape",
"pybullet.createCollisionShape",
"numpy.sqrt"
] | [((1450, 1465), 'os.remove', 'os.remove', (['urdf'], {}), '(urdf)\n', (1459, 1465), False, 'import os\n'), ((1710, 1749), 'ravens.utils.apply', 'utils.apply', (['square_pose', 'zone_position'], {}), '(square_pose, zone_position)\n', (1721, 1749), False, 'from ravens import utils\n'), ((2073, 2093), 'numpy.float32', 'np.float32', (['position'], {}), '(position)\n', (2083, 2093), True, 'import numpy as np\n'), ((2111, 2171), 'pybullet.createCollisionShape', 'p.createCollisionShape', (['p.GEOM_BOX'], {'halfExtents': '([radius] * 3)'}), '(p.GEOM_BOX, halfExtents=[radius] * 3)\n', (2133, 2171), True, 'import pybullet as p\n'), ((2190, 2245), 'pybullet.createVisualShape', 'p.createVisualShape', (['p.GEOM_SPHERE'], {'radius': '(radius * 1.5)'}), '(p.GEOM_SPHERE, radius=radius * 1.5)\n', (2209, 2245), True, 'import pybullet as p\n'), ((3583, 3596), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3593, 3596), False, 'import time\n'), ((1114, 1124), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1121, 1124), True, 'import numpy as np\n'), ((2429, 2499), 'pybullet.createMultiBody', 'p.createMultiBody', (['(0.1)', 'part_shape', 'part_visual'], {'basePosition': 'position'}), '(0.1, part_shape, part_visual, basePosition=position)\n', (2446, 2499), True, 'import pybullet as p\n'), ((3287, 3329), 'ravens.utils.apply', 'utils.apply', (['self.zone_pose', 'true_position'], {}), '(self.zone_pose, true_position)\n', (3298, 3329), False, 'from ravens import utils\n'), ((2557, 2810), 'pybullet.createConstraint', 'p.createConstraint', ([], {'parentBodyUniqueId': 'env.objects[-1]', 'parentLinkIndex': '(-1)', 'childBodyUniqueId': 'part_id', 'childLinkIndex': '(-1)', 'jointType': 'p.JOINT_POINT2POINT', 'jointAxis': '(0, 0, 0)', 'parentFramePosition': '(0, 0, distance)', 'childFramePosition': '(0, 0, 0)'}), '(parentBodyUniqueId=env.objects[-1], parentLinkIndex=-1,\n childBodyUniqueId=part_id, childLinkIndex=-1, jointType=p.\n JOINT_POINT2POINT, jointAxis=(0, 0, 0), parentFramePosition=(0, 0,\n distance), childFramePosition=(0, 0, 0))\n', (2575, 2810), True, 'import pybullet as p\n'), ((2903, 2950), 'pybullet.changeConstraint', 'p.changeConstraint', (['constraint_id'], {'maxForce': '(100)'}), '(constraint_id, maxForce=100)\n', (2921, 2950), True, 'import pybullet as p\n'), ((3043, 3092), 'pybullet.changeVisualShape', 'p.changeVisualShape', (['part_id', '(-1)'], {'rgbaColor': 'color'}), '(part_id, -1, rgbaColor=color)\n', (3062, 3092), True, 'import pybullet as p\n'), ((3163, 3184), 'numpy.float32', 'np.float32', (['(0, 0, 0)'], {}), '((0, 0, 0))\n', (3173, 3184), True, 'import numpy as np\n')] |
from __future__ import print_function
import argparse
import numpy as np
import numpy.random as npr
import time
import os
import sys
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
# Format time for printing purposes
def get_hms(seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return h, m, s
# Setup basic CNN model
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
if args.no_dropout:
x = F.relu(F.max_pool2d(self.conv2(x), 2))
else:
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
if not args.no_dropout:
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
# Train model for one epoch
#
# example_stats: dictionary containing statistics accumulated over every presentation of example
#
def train(args, model, device, trainset, optimizer, epoch, example_stats):
train_loss = 0
correct = 0
total = 0
batch_size = args.batch_size
model.train()
# Get permutation to shuffle trainset
trainset_permutation_inds = npr.permutation(
np.arange(len(trainset.train_labels)))
for batch_idx, batch_start_ind in enumerate(
range(0, len(trainset.train_labels), batch_size)):
# Get trainset indices for batch
batch_inds = trainset_permutation_inds[batch_start_ind:
batch_start_ind + batch_size]
# Get batch inputs and targets, transform them appropriately
transformed_trainset = []
for ind in batch_inds:
transformed_trainset.append(trainset.__getitem__(ind)[0])
inputs = torch.stack(transformed_trainset)
targets = torch.LongTensor(
np.array(trainset.train_labels)[batch_inds].tolist())
# Map to available device
inputs, targets = inputs.to(device), targets.to(device)
# Forward propagation, compute loss, get predictions
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
_, predicted = torch.max(outputs.data, 1)
# Update statistics and loss
acc = predicted == targets
for j, index in enumerate(batch_inds):
# Get index in original dataset (not sorted by forgetting)
index_in_original_dataset = train_indx[index]
# Compute missclassification margin
output_correct_class = outputs.data[
j, targets[j].item()] # output for correct class
sorted_output, _ = torch.sort(outputs.data[j, :])
if acc[j]:
# Example classified correctly, highest incorrect class is 2nd largest output
output_highest_incorrect_class = sorted_output[-2]
else:
# Example misclassified, highest incorrect class is max output
output_highest_incorrect_class = sorted_output[-1]
margin = output_correct_class.item(
) - output_highest_incorrect_class.item()
# Add the statistics of the current training example to dictionary
index_stats = example_stats.get(index_in_original_dataset,
[[], [], []])
index_stats[0].append(loss[j].item())
index_stats[1].append(acc[j].sum().item())
index_stats[2].append(margin)
example_stats[index_in_original_dataset] = index_stats
# Update loss, backward propagate, update optimizer
loss = loss.mean()
train_loss += loss.item()
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
loss.backward()
optimizer.step()
sys.stdout.write('\r')
sys.stdout.write(
'| Epoch [%3d/%3d] Iter[%3d/%3d]\t\tLoss: %.4f Acc@1: %.3f%%' %
(epoch, args.epochs, batch_idx + 1,
(len(trainset) // batch_size) + 1, loss.item(),
100. * correct.item() / total))
sys.stdout.flush()
# Add training accuracy to dict
index_stats = example_stats.get('train', [[], []])
index_stats[1].append(100. * correct.item() / float(total))
example_stats['train'] = index_stats
# Evaluate model predictions on heldout test data
#
# example_stats: dictionary containing statistics accumulated over every presentation of example
#
def test(args, model, device, testset, example_stats):
test_loss = 0
correct = 0
total = 0
test_batch_size = 32
model.eval()
for batch_idx, batch_start_ind in enumerate(
range(0, len(testset.test_labels), test_batch_size)):
# Get batch inputs and targets
transformed_testset = []
for ind in range(
batch_start_ind,
min(
len(testset.test_labels),
batch_start_ind + test_batch_size)):
transformed_testset.append(testset.__getitem__(ind)[0])
inputs = torch.stack(transformed_testset)
targets = torch.LongTensor(
np.array(testset.test_labels)[batch_start_ind:batch_start_ind +
test_batch_size].tolist())
# Map to available device
inputs, targets = inputs.to(device), targets.to(device)
# Forward propagation, compute loss, get predictions
outputs = model(inputs)
loss = criterion(outputs, targets)
loss = loss.mean()
test_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
# Add test accuracy to dict
acc = 100. * correct.item() / total
index_stats = example_stats.get('test', [[], []])
index_stats[1].append(100. * correct.item() / float(total))
example_stats['test'] = index_stats
print("\n| Validation Epoch #%d\t\t\tLoss: %.4f Acc@1: %.2f%%" %
(epoch, loss.item(), acc))
parser = argparse.ArgumentParser(description='training MNIST')
parser.add_argument(
'--dataset',
default='mnist',
help='dataset to use, can be mnist or permuted_mnist')
parser.add_argument(
'--batch_size',
type=int,
default=64,
metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument(
'--epochs',
type=int,
default=200,
metavar='N',
help='number of epochs to train (default: 200)')
parser.add_argument(
'--lr',
type=float,
default=0.01,
metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument(
'--momentum',
type=float,
default=0.5,
metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument(
'--no_cuda',
action='store_true',
default=False,
help='disables CUDA training')
parser.add_argument(
'--seed',
type=int,
default=1,
metavar='S',
help='random seed (default: 1)')
parser.add_argument(
'--sorting_file',
default="none",
help=
'name of a file containing order of examples sorted by a certain metric (default: "none", i.e. not sorted)'
)
parser.add_argument(
'--remove_n',
type=int,
default=0,
help='number of sorted examples to remove from training')
parser.add_argument(
'--keep_lowest_n',
type=int,
default=0,
help=
'number of sorted examples to keep that have the lowest metric score, equivalent to start index of removal; if a negative number given, remove random draw of examples'
)
parser.add_argument(
'--no_dropout', action='store_true', default=False, help='remove dropout')
parser.add_argument(
'--input_dir',
default='mnist_results/',
help='directory where to read sorting file from')
parser.add_argument(
'--output_dir', required=True, help='directory where to save results')
# Enter all arguments that you want to be in the filename of the saved output
ordered_args = [
'dataset', 'no_dropout', 'seed', 'sorting_file', 'remove_n',
'keep_lowest_n'
]
# Parse arguments and setup name of output file with forgetting stats
args = parser.parse_args()
args_dict = vars(args)
print(args_dict)
save_fname = '__'.join(
'{}_{}'.format(arg, args_dict[arg]) for arg in ordered_args)
# Set appropriate devices
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
# Set random seed for initialization
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
npr.seed(args.seed)
# Setup transforms
all_transforms = [
transforms.ToTensor(),
transforms.Normalize((0.1307, ), (0.3081, ))
]
if args.dataset == 'permuted_mnist':
pixel_permutation = torch.randperm(28 * 28)
all_transforms.append(
transforms.Lambda(
lambda x: x.view(-1, 1)[pixel_permutation].view(1, 28, 28)))
transform = transforms.Compose(all_transforms)
os.makedirs(args.output_dir, exist_ok=True)
# Load the appropriate train and test datasets
trainset = datasets.MNIST(
root='/tmp/data', train=True, download=True, transform=transform)
testset = datasets.MNIST(
root='/tmp/data', train=False, download=True, transform=transform)
# Get indices of examples that should be used for training
if args.sorting_file == 'none':
train_indx = np.array(range(len(trainset.train_labels)))
else:
try:
with open(
os.path.join(args.input_dir, args.sorting_file) + '.pkl',
'rb') as fin:
ordered_indx = pickle.load(fin)['indices']
except IOError:
with open(os.path.join(args.input_dir, args.sorting_file),
'rb') as fin:
ordered_indx = pickle.load(fin)['indices']
# Get the indices to remove from training
elements_to_remove = np.array(
ordered_indx)[args.keep_lowest_n:args.keep_lowest_n + args.remove_n]
# Remove the corresponding elements
train_indx = np.setdiff1d(
range(len(trainset.train_labels)), elements_to_remove)
# Remove remove_n number of examples from the train set at random
if args.keep_lowest_n < 0:
train_indx = npr.permutation(np.arange(len(
trainset.train_labels)))[:len(trainset.train_labels) - args.remove_n]
# Reassign train data and labels
trainset.train_data = trainset.train_data[train_indx, :, :]
trainset.train_labels = np.array(trainset.train_labels)[train_indx].tolist()
print('Training on ' + str(len(trainset.train_labels)) + ' examples')
# Setup model and optimizer
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
# Setup loss
criterion = nn.CrossEntropyLoss()
criterion.__init__(reduce=False)
# Initialize dictionary to save statistics for every example presentation
example_stats = {}
elapsed_time = 0
for epoch in range(args.epochs):
start_time = time.time()
train(args, model, device, trainset, optimizer, epoch, example_stats)
test(args, model, device, testset, example_stats)
epoch_time = time.time() - start_time
elapsed_time += epoch_time
print('| Elapsed time : %d:%02d:%02d' % (get_hms(elapsed_time)))
# Save the stats dictionary
fname = os.path.join(args.output_dir, save_fname)
with open(fname + "__stats_dict.pkl", "wb") as f:
pickle.dump(example_stats, f)
# Log the best train and test accuracy so far
with open(fname + "__best_acc.txt", "w") as f:
f.write('train test \n')
f.write(str(max(example_stats['train'][1])))
f.write(' ')
f.write(str(max(example_stats['test'][1])))
| [
"sys.stdout.write",
"pickle.dump",
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.nn.functional.dropout",
"pickle.load",
"sys.stdout.flush",
"torch.device",
"torchvision.transforms.Normalize",
"os.path.join",
"torchvision.transforms.Compose",
"torch.nn.functional.log_softmax",
"torch... | [((6525, 6578), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""training MNIST"""'}), "(description='training MNIST')\n", (6548, 6578), False, 'import argparse\n'), ((8868, 8911), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (8880, 8911), False, 'import torch\n'), ((9018, 9046), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (9035, 9046), False, 'import torch\n'), ((9050, 9075), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9073, 9075), False, 'import torch\n'), ((9115, 9134), 'numpy.random.seed', 'npr.seed', (['args.seed'], {}), '(args.seed)\n', (9123, 9134), True, 'import numpy.random as npr\n'), ((9476, 9510), 'torchvision.transforms.Compose', 'transforms.Compose', (['all_transforms'], {}), '(all_transforms)\n', (9494, 9510), False, 'from torchvision import datasets, transforms\n'), ((9512, 9555), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {'exist_ok': '(True)'}), '(args.output_dir, exist_ok=True)\n', (9523, 9555), False, 'import os\n'), ((9615, 9700), 'torchvision.datasets.MNIST', 'datasets.MNIST', ([], {'root': '"""/tmp/data"""', 'train': '(True)', 'download': '(True)', 'transform': 'transform'}), "(root='/tmp/data', train=True, download=True, transform=transform\n )\n", (9629, 9700), False, 'from torchvision import datasets, transforms\n'), ((9711, 9797), 'torchvision.datasets.MNIST', 'datasets.MNIST', ([], {'root': '"""/tmp/data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform'}), "(root='/tmp/data', train=False, download=True, transform=\n transform)\n", (9725, 9797), False, 'from torchvision import datasets, transforms\n'), ((11232, 11253), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (11251, 11253), True, 'import torch.nn as nn\n'), ((8833, 8858), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8856, 8858), False, 'import torch\n'), ((9081, 9114), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (9103, 9114), False, 'import torch\n'), ((9178, 9199), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (9197, 9199), False, 'from torchvision import datasets, transforms\n'), ((9205, 9247), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (9225, 9247), False, 'from torchvision import datasets, transforms\n'), ((9313, 9336), 'torch.randperm', 'torch.randperm', (['(28 * 28)'], {}), '(28 * 28)\n', (9327, 9336), False, 'import torch\n'), ((11449, 11460), 'time.time', 'time.time', ([], {}), '()\n', (11458, 11460), False, 'import time\n'), ((11778, 11819), 'os.path.join', 'os.path.join', (['args.output_dir', 'save_fname'], {}), '(args.output_dir, save_fname)\n', (11790, 11819), False, 'import os\n'), ((554, 585), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(10)'], {'kernel_size': '(5)'}), '(1, 10, kernel_size=5)\n', (563, 585), True, 'import torch.nn as nn\n'), ((607, 639), 'torch.nn.Conv2d', 'nn.Conv2d', (['(10)', '(20)'], {'kernel_size': '(5)'}), '(10, 20, kernel_size=5)\n', (616, 639), True, 'import torch.nn as nn\n'), ((666, 680), 'torch.nn.Dropout2d', 'nn.Dropout2d', ([], {}), '()\n', (678, 680), True, 'import torch.nn as nn\n'), ((700, 718), 'torch.nn.Linear', 'nn.Linear', (['(320)', '(50)'], {}), '(320, 50)\n', (709, 718), True, 'import torch.nn as nn\n'), ((738, 755), 'torch.nn.Linear', 'nn.Linear', (['(50)', '(10)'], {}), '(50, 10)\n', (747, 755), True, 'import torch.nn as nn\n'), ((1191, 1214), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (1204, 1214), True, 'import torch.nn.functional as F\n'), ((2179, 2212), 'torch.stack', 'torch.stack', (['transformed_trainset'], {}), '(transformed_trainset)\n', (2190, 2212), False, 'import torch\n'), ((2604, 2630), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (2613, 2630), False, 'import torch\n'), ((4251, 4273), 'sys.stdout.write', 'sys.stdout.write', (["'\\r'"], {}), "('\\r')\n", (4267, 4273), False, 'import sys\n'), ((4538, 4556), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4554, 4556), False, 'import sys\n'), ((5526, 5558), 'torch.stack', 'torch.stack', (['transformed_testset'], {}), '(transformed_testset)\n', (5537, 5558), False, 'import torch\n'), ((6059, 6085), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (6068, 6085), False, 'import torch\n'), ((10390, 10412), 'numpy.array', 'np.array', (['ordered_indx'], {}), '(ordered_indx)\n', (10398, 10412), True, 'import numpy as np\n'), ((11608, 11619), 'time.time', 'time.time', ([], {}), '()\n', (11617, 11619), False, 'import time\n'), ((11882, 11911), 'pickle.dump', 'pickle.dump', (['example_stats', 'f'], {}), '(example_stats, f)\n', (11893, 11911), False, 'import pickle\n'), ((1114, 1150), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'training': 'self.training'}), '(x, training=self.training)\n', (1123, 1150), True, 'import torch.nn.functional as F\n'), ((3076, 3106), 'torch.sort', 'torch.sort', (['outputs.data[j, :]'], {}), '(outputs.data[j, :])\n', (3086, 3106), False, 'import torch\n'), ((10950, 10981), 'numpy.array', 'np.array', (['trainset.train_labels'], {}), '(trainset.train_labels)\n', (10958, 10981), True, 'import numpy as np\n'), ((10116, 10132), 'pickle.load', 'pickle.load', (['fin'], {}), '(fin)\n', (10127, 10132), False, 'import pickle\n'), ((10001, 10048), 'os.path.join', 'os.path.join', (['args.input_dir', 'args.sorting_file'], {}), '(args.input_dir, args.sorting_file)\n', (10013, 10048), False, 'import os\n'), ((10182, 10229), 'os.path.join', 'os.path.join', (['args.input_dir', 'args.sorting_file'], {}), '(args.input_dir, args.sorting_file)\n', (10194, 10229), False, 'import os\n'), ((10290, 10306), 'pickle.load', 'pickle.load', (['fin'], {}), '(fin)\n', (10301, 10306), False, 'import pickle\n'), ((2261, 2292), 'numpy.array', 'np.array', (['trainset.train_labels'], {}), '(trainset.train_labels)\n', (2269, 2292), True, 'import numpy as np\n'), ((5607, 5636), 'numpy.array', 'np.array', (['testset.test_labels'], {}), '(testset.test_labels)\n', (5615, 5636), True, 'import numpy as np\n')] |
# %%
import numpy as np
from bokeh.layouts import gridplot
from bokeh.plotting import figure, output_file, show
from bokeh.io import output_notebook
import pandas as pd
class plotresult:
def __init__(self, savefile):
container = np.load(savefile)
self.sim_result = [container[key] for key in container]
def infectiongrowth(self):
num_infected = []
for t in range(len(self.sim_result)):
num_infected.append(len(np.where(self.sim_result[t] < 30)[0]))
infected_growth_df = pd.DataFrame({'num_infected': num_infected, 'Day': [
i for i in range(len(self.sim_result))]})
output_notebook()
p1 = figure(x_axis_type="datetime", title="Stock Closing Prices")
p1.grid.grid_line_alpha = 0.3
p1.xaxis.axis_label = 'Date'
p1.yaxis.axis_label = 'Number of the infected cases'
p1.line(
infected_growth_df['Day'], infected_growth_df['num_infected'], color='#A6CEE3', line_width=3)
p1.circle(
infected_growth_df['Day'], infected_growth_df['num_infected'], fill_color="white", size=5)
show(p1)
# %%
if __name__ == "__main__":
result = 'outfile.npz'
testplot = plotresult(result)
testplot.infectiongrowth()
# %%
| [
"numpy.load",
"bokeh.io.output_notebook",
"bokeh.plotting.figure",
"numpy.where",
"bokeh.plotting.show"
] | [((243, 260), 'numpy.load', 'np.load', (['savefile'], {}), '(savefile)\n', (250, 260), True, 'import numpy as np\n'), ((678, 695), 'bokeh.io.output_notebook', 'output_notebook', ([], {}), '()\n', (693, 695), False, 'from bokeh.io import output_notebook\n'), ((709, 769), 'bokeh.plotting.figure', 'figure', ([], {'x_axis_type': '"""datetime"""', 'title': '"""Stock Closing Prices"""'}), "(x_axis_type='datetime', title='Stock Closing Prices')\n", (715, 769), False, 'from bokeh.plotting import figure, output_file, show\n'), ((1160, 1168), 'bokeh.plotting.show', 'show', (['p1'], {}), '(p1)\n', (1164, 1168), False, 'from bokeh.plotting import figure, output_file, show\n'), ((465, 498), 'numpy.where', 'np.where', (['(self.sim_result[t] < 30)'], {}), '(self.sim_result[t] < 30)\n', (473, 498), True, 'import numpy as np\n')] |
# =============================================================================
# Created By: bpatter5
# Updated By: bpatter5
# Created On: 12/3/2018
# Updated On: 12/8/2018
# Purpose: Methods for working with Parquet files and Arrow operations
# =============================================================================
import pyarrow as pa
from pyarrow import parquet as pq
import numpy as np
def write_test(path, test, context):
'''
Description
-----------
Method to serialize a test and write it to disk
Parameters
----------
path : string
File path for which to write the serialized object to
test : ann_inference.testing.xxxx_test
Test to serialize and write to disk
context : pa.SerializationContext
Custom serialization context that defines how the test is to be written
Returns
-------
: void
Writes a serialized pyarrow buffer to disk
'''
buf = context.serialize(test).to_buffer()
with open(path, 'wb') as f:
f.write(buf)
def read_test(path, context):
'''
Description
-----------
Read a serialized pyarrow buffer from disk and return a test object
Parameters
----------
path : string
File path to serialized test
context: pa.SerializationContext
Custom serialization context to read the test back into memory
Returns
-------
: ann_inference.testing.xxxx_test
Desearialized test using the given context
'''
mmap = pa.memory_map(path)
return(context.deserialize(mmap.read_buffer()))
def gen_parquet_batch(test_stat, fill_col, epoch_num, test_num, col_names):
'''
Description
-----------
Generate a pyarrow.RecordBatch from a numpy array of PyTorch model parameters.
Parameters
----------
test_stat : np.array
Array of parameters to be written to batch and eventually saved to disk for later testing
fill_col : string
Name of the parameter being tested which will later become a partition on disk
epoch_num : int
Current epoch
test_num : int
Current test assuming multiple model runs
col_names : list[string]
Column names to assign to fields in the current batch
Returns
-------
: pyarrow.RecordBatch
Record batch with nrows(test_stat) x ncols(test_stat) + 3
'''
data = []
data.append(pa.array(np.full(test_stat.shape[0], fill_col)))
data.append(pa.array(np.full(test_stat.shape[0], test_num)))
data.append(pa.array(np.full(test_stat.shape[0], epoch_num)))
for j in np.arange(0, test_stat.shape[1]):
data.append(pa.array(test_stat[:,j]))
return(pa.RecordBatch.from_arrays(data, col_names))
def array_to_parquet(path, test_stat, fill_col, test_num, col_names):
'''
Description
-----------
Generate a Parquet dataset from a numpy array of PyTorch model parameters.
Parameters
----------
test_stat : np.array
Array of parameters to be written to batch and eventually saved to disk for later testing
fill_col : string
Name of the parameter being tested which will later become a partition on disk
test_num : int
Current test assuming multiple model runs
col_names : list[string]
Column names to assign to fields in the current batch
Returns
-------
: void
writes to parquet store
'''
data = []
data.append(pa.array(np.full(test_stat.shape[0], fill_col)))
data.append(pa.array(np.full(test_stat.shape[0], test_num)))
data.append(pa.array(np.arange(0, test_stat.shape[0])))
data.append(pa.array(test_stat))
error_tbl = pa.Table.from_arrays(data, col_names)
pq.write_to_dataset(error_tbl, path, partition_cols=['stat'])
def results_to_parquet(path, batch_list):
'''
Description
-----------
Function turns a list of batches into a parquet dataset on disk for future analysis.
Parameters
----------
path : string
Base path to create parquet dataset at
batch_list : list[pyarrow.RecordBatch]
Batches with matching schema definitions to be written to disk
Returns
-------
: void
Writes a parquet dataset to disk
'''
tbl = pa.Table.from_batches(batch_list)
pq.write_to_dataset(tbl, path, partition_cols=['stat'])
def read_parquet_store(path, nthreads=5):
'''
Description
-----------
Read dataset at the given path into a pandas dataframe for analysis
Parameters
----------
path : string
path to parquet data store to read into memory for analysis
nthreads : int, 5
number of threads to use for reading the parquet store into memory
Returns
-------
: pandas.DataFrame
pandas frame at the given path
'''
return(pq.read_table(path, nthreads=nthreads).to_pandas()) | [
"numpy.full",
"pyarrow.RecordBatch.from_arrays",
"pyarrow.Table.from_batches",
"pyarrow.Table.from_arrays",
"numpy.arange",
"pyarrow.parquet.read_table",
"pyarrow.array",
"pyarrow.parquet.write_to_dataset",
"pyarrow.memory_map"
] | [((1587, 1606), 'pyarrow.memory_map', 'pa.memory_map', (['path'], {}), '(path)\n', (1600, 1606), True, 'import pyarrow as pa\n'), ((2754, 2786), 'numpy.arange', 'np.arange', (['(0)', 'test_stat.shape[1]'], {}), '(0, test_stat.shape[1])\n', (2763, 2786), True, 'import numpy as np\n'), ((2854, 2897), 'pyarrow.RecordBatch.from_arrays', 'pa.RecordBatch.from_arrays', (['data', 'col_names'], {}), '(data, col_names)\n', (2880, 2897), True, 'import pyarrow as pa\n'), ((3899, 3936), 'pyarrow.Table.from_arrays', 'pa.Table.from_arrays', (['data', 'col_names'], {}), '(data, col_names)\n', (3919, 3936), True, 'import pyarrow as pa\n'), ((3946, 4007), 'pyarrow.parquet.write_to_dataset', 'pq.write_to_dataset', (['error_tbl', 'path'], {'partition_cols': "['stat']"}), "(error_tbl, path, partition_cols=['stat'])\n", (3965, 4007), True, 'from pyarrow import parquet as pq\n'), ((4512, 4545), 'pyarrow.Table.from_batches', 'pa.Table.from_batches', (['batch_list'], {}), '(batch_list)\n', (4533, 4545), True, 'import pyarrow as pa\n'), ((4555, 4610), 'pyarrow.parquet.write_to_dataset', 'pq.write_to_dataset', (['tbl', 'path'], {'partition_cols': "['stat']"}), "(tbl, path, partition_cols=['stat'])\n", (4574, 4610), True, 'from pyarrow import parquet as pq\n'), ((3857, 3876), 'pyarrow.array', 'pa.array', (['test_stat'], {}), '(test_stat)\n', (3865, 3876), True, 'import pyarrow as pa\n'), ((2561, 2598), 'numpy.full', 'np.full', (['test_stat.shape[0]', 'fill_col'], {}), '(test_stat.shape[0], fill_col)\n', (2568, 2598), True, 'import numpy as np\n'), ((2626, 2663), 'numpy.full', 'np.full', (['test_stat.shape[0]', 'test_num'], {}), '(test_stat.shape[0], test_num)\n', (2633, 2663), True, 'import numpy as np\n'), ((2691, 2729), 'numpy.full', 'np.full', (['test_stat.shape[0]', 'epoch_num'], {}), '(test_stat.shape[0], epoch_num)\n', (2698, 2729), True, 'import numpy as np\n'), ((2808, 2833), 'pyarrow.array', 'pa.array', (['test_stat[:, j]'], {}), '(test_stat[:, j])\n', (2816, 2833), True, 'import pyarrow as pa\n'), ((3671, 3708), 'numpy.full', 'np.full', (['test_stat.shape[0]', 'fill_col'], {}), '(test_stat.shape[0], fill_col)\n', (3678, 3708), True, 'import numpy as np\n'), ((3736, 3773), 'numpy.full', 'np.full', (['test_stat.shape[0]', 'test_num'], {}), '(test_stat.shape[0], test_num)\n', (3743, 3773), True, 'import numpy as np\n'), ((3801, 3833), 'numpy.arange', 'np.arange', (['(0)', 'test_stat.shape[0]'], {}), '(0, test_stat.shape[0])\n', (3810, 3833), True, 'import numpy as np\n'), ((5114, 5152), 'pyarrow.parquet.read_table', 'pq.read_table', (['path'], {'nthreads': 'nthreads'}), '(path, nthreads=nthreads)\n', (5127, 5152), True, 'from pyarrow import parquet as pq\n')] |
"""
1. Crop( _crop1 ~ _crop4 )
2. Random RGB Scaling( _rgb )
3. weak Gaussian Blur( _blur )
4. rotation( _rot1 ~ _rot2 )
"""
import cv2
import os
import numpy as np
import matplotlib.pyplot as plt
import copy
import random
def filtering(img, number):
if number == 0:
return cv2.GaussianBlur(img, ksize=(13, 13), sigmaX=0)
if number == 1:
return cv2.GaussianBlur(img, ksize=(25, 25), sigmaX=0)
if number == 2:
return adjust_gamma(img, random.randint(11, 16) / 10)
if number == 3:
return adjust_gamma(img, random.randint(6, 9) / 10)
if number == 4:
return salt_and_pepper(img, p=random.randint(0, 7) / 100)
if number == 5:
return salt_and_pepper(img, p=random.randint(8, 15) / 100)
# def dropout(image, count, rate_row, rate_col):
# output = np.zeros(image.shape, np.uint8)
# row, col, ch = image.shape
# width = int(row * rate_row)
# height = int(col * rate_col)
# print(width, height)
# for i in range(count):
# random.seed(random.random())
# rand_x = random.randint(1, row)
# rand_y = random.randint(1, col)
#
# xmin = int(rand_x - (width / 2))
# ymin = int(rand_y - (height / 2))
# xmax = int(rand_x + (width / 2))
# ymax = int(rand_y + (height / 2))
# print(xmin, ymin, xmax, ymax)
#
# for r in range(image.shape[0]):
# for c in range(image.shape[1]):
# if r >= xmin and r <= xmax and c >= ymin and c <= ymax:
# output[r, c, :] = 0
# else:
# output[r, c, :] = image[r, c, :]
# return output
def salt_and_pepper(image, p):
output = np.zeros(image.shape, np.uint8)
thres = 1 - p
for i in range(image.shape[0]):
for j in range(image.shape[1]):
rdn = random.random()
if rdn < p:
output[i][j] = 0
elif rdn > thres:
output[i][j] = 255
else:
output[i][j] = image[i][j]
return output
def adjust_gamma(image, gamma=1.0):
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
# apply gamma correction using the lookup table
return cv2.LUT(image, table)
def augment_for_classification(image_path, new_image_path, crop_rate):
image_list = os.listdir(image_path)
for num in range(0, len(image_list)):
img = cv2.imread(image_path + image_list[num]) # Original Image
# img = cv2.resize(img, (448, 448))
col, row, ch = img.shape
img_rot1 = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
img_rot2 = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)
img_flip1 = cv2.flip(img, -1)
img_flip2 = cv2.flip(img, 1)
img_flip3 = cv2.flip(img, 0)
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_rot1 = filtering(img_rot1, random.randint(0, 5))
cv2.imwrite(new_image_path + image_list[num][:-4] + "_rot_1" + ".jpg", img_rot1)
img_rot2 = filtering(img_rot2, random.randint(0, 5))
cv2.imwrite(new_image_path + image_list[num][:-4] + "_rot_2" + ".jpg", img_rot2)
img_flip1 = filtering(img_flip1, random.randint(0, 5))
cv2.imwrite(new_image_path + image_list[num][:-4] + "_flip_1" + ".jpg", img_flip1)
img_flip2 = filtering(img_flip2, random.randint(0, 5))
cv2.imwrite(new_image_path + image_list[num][:-4] + "_flip_2" + ".jpg", img_flip2)
img_flip3 = filtering(img_flip3, random.randint(0, 5))
cv2.imwrite(new_image_path + image_list[num][:-4] + "_flip_3" + ".jpg", img_flip3)
if crop_rate != 0:
img_crop1 = copy.deepcopy(img[0:int(col * (1 - crop_rate)), 0:int(row * (1 - crop_rate)), :])
img_crop2 = copy.deepcopy(img[0:int(col * (1 - crop_rate)), int(row * crop_rate):row, :])
img_crop3 = copy.deepcopy(img[int(col * crop_rate):col, 0:int(row * (1 - crop_rate)), :])
img_crop4 = copy.deepcopy(img[int(col * crop_rate):col, int(row * crop_rate):row, :])
img_crop1 = filtering(img_crop1, random.randint(0, 5))
cv2.imwrite(new_image_path + image_list[num][:-4] + "_crop_1" + ".jpg", img_crop1)
img_crop2 = filtering(img_crop2, random.randint(0, 5))
cv2.imwrite(new_image_path + image_list[num][:-4] + "_crop_2" + ".jpg", img_crop2)
img_crop3 = filtering(img_crop3, random.randint(0, 5))
cv2.imwrite(new_image_path + image_list[num][:-4] + "_crop_3" + ".jpg", img_crop3)
img_crop4 = filtering(img_crop4, random.randint(0, 5))
cv2.imwrite(new_image_path + image_list[num][:-4] + "_crop_4" + ".jpg", img_crop4)
if __name__ == "__main__":
image_path = "C:\\dataset\\MyDataset\\classifier_colon\\1\\"
new_image_path = "C:\\dataset\\MyDataset\\classifier_colon\\new1\\"
crop_rate = 0.15
augment_for_classification(image_path=image_path,
new_image_path=new_image_path,
crop_rate=crop_rate)
exit(0)
| [
"cv2.GaussianBlur",
"random.randint",
"cv2.rotate",
"cv2.imwrite",
"numpy.zeros",
"cv2.imread",
"random.random",
"cv2.LUT",
"numpy.arange",
"cv2.flip",
"os.listdir"
] | [((1760, 1791), 'numpy.zeros', 'np.zeros', (['image.shape', 'np.uint8'], {}), '(image.shape, np.uint8)\n', (1768, 1791), True, 'import numpy as np\n'), ((2375, 2396), 'cv2.LUT', 'cv2.LUT', (['image', 'table'], {}), '(image, table)\n', (2382, 2396), False, 'import cv2\n'), ((2491, 2513), 'os.listdir', 'os.listdir', (['image_path'], {}), '(image_path)\n', (2501, 2513), False, 'import os\n'), ((308, 355), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img'], {'ksize': '(13, 13)', 'sigmaX': '(0)'}), '(img, ksize=(13, 13), sigmaX=0)\n', (324, 355), False, 'import cv2\n'), ((393, 440), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img'], {'ksize': '(25, 25)', 'sigmaX': '(0)'}), '(img, ksize=(25, 25), sigmaX=0)\n', (409, 440), False, 'import cv2\n'), ((2574, 2614), 'cv2.imread', 'cv2.imread', (['(image_path + image_list[num])'], {}), '(image_path + image_list[num])\n', (2584, 2614), False, 'import cv2\n'), ((2736, 2776), 'cv2.rotate', 'cv2.rotate', (['img', 'cv2.ROTATE_90_CLOCKWISE'], {}), '(img, cv2.ROTATE_90_CLOCKWISE)\n', (2746, 2776), False, 'import cv2\n'), ((2797, 2844), 'cv2.rotate', 'cv2.rotate', (['img', 'cv2.ROTATE_90_COUNTERCLOCKWISE'], {}), '(img, cv2.ROTATE_90_COUNTERCLOCKWISE)\n', (2807, 2844), False, 'import cv2\n'), ((2866, 2883), 'cv2.flip', 'cv2.flip', (['img', '(-1)'], {}), '(img, -1)\n', (2874, 2883), False, 'import cv2\n'), ((2905, 2921), 'cv2.flip', 'cv2.flip', (['img', '(1)'], {}), '(img, 1)\n', (2913, 2921), False, 'import cv2\n'), ((2943, 2959), 'cv2.flip', 'cv2.flip', (['img', '(0)'], {}), '(img, 0)\n', (2951, 2959), False, 'import cv2\n'), ((3087, 3172), 'cv2.imwrite', 'cv2.imwrite', (["(new_image_path + image_list[num][:-4] + '_rot_1' + '.jpg')", 'img_rot1'], {}), "(new_image_path + image_list[num][:-4] + '_rot_1' + '.jpg', img_rot1\n )\n", (3098, 3172), False, 'import cv2\n'), ((3241, 3326), 'cv2.imwrite', 'cv2.imwrite', (["(new_image_path + image_list[num][:-4] + '_rot_2' + '.jpg')", 'img_rot2'], {}), "(new_image_path + image_list[num][:-4] + '_rot_2' + '.jpg', img_rot2\n )\n", (3252, 3326), False, 'import cv2\n'), ((3397, 3483), 'cv2.imwrite', 'cv2.imwrite', (["(new_image_path + image_list[num][:-4] + '_flip_1' + '.jpg')", 'img_flip1'], {}), "(new_image_path + image_list[num][:-4] + '_flip_1' + '.jpg',\n img_flip1)\n", (3408, 3483), False, 'import cv2\n'), ((3555, 3641), 'cv2.imwrite', 'cv2.imwrite', (["(new_image_path + image_list[num][:-4] + '_flip_2' + '.jpg')", 'img_flip2'], {}), "(new_image_path + image_list[num][:-4] + '_flip_2' + '.jpg',\n img_flip2)\n", (3566, 3641), False, 'import cv2\n'), ((3713, 3799), 'cv2.imwrite', 'cv2.imwrite', (["(new_image_path + image_list[num][:-4] + '_flip_3' + '.jpg')", 'img_flip3'], {}), "(new_image_path + image_list[num][:-4] + '_flip_3' + '.jpg',\n img_flip3)\n", (3724, 3799), False, 'import cv2\n'), ((1908, 1923), 'random.random', 'random.random', ([], {}), '()\n', (1921, 1923), False, 'import random\n'), ((3056, 3076), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (3070, 3076), False, 'import random\n'), ((3210, 3230), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (3224, 3230), False, 'import random\n'), ((3366, 3386), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (3380, 3386), False, 'import random\n'), ((3524, 3544), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (3538, 3544), False, 'import random\n'), ((3682, 3702), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (3696, 3702), False, 'import random\n'), ((4321, 4407), 'cv2.imwrite', 'cv2.imwrite', (["(new_image_path + image_list[num][:-4] + '_crop_1' + '.jpg')", 'img_crop1'], {}), "(new_image_path + image_list[num][:-4] + '_crop_1' + '.jpg',\n img_crop1)\n", (4332, 4407), False, 'import cv2\n'), ((4487, 4573), 'cv2.imwrite', 'cv2.imwrite', (["(new_image_path + image_list[num][:-4] + '_crop_2' + '.jpg')", 'img_crop2'], {}), "(new_image_path + image_list[num][:-4] + '_crop_2' + '.jpg',\n img_crop2)\n", (4498, 4573), False, 'import cv2\n'), ((4653, 4739), 'cv2.imwrite', 'cv2.imwrite', (["(new_image_path + image_list[num][:-4] + '_crop_3' + '.jpg')", 'img_crop3'], {}), "(new_image_path + image_list[num][:-4] + '_crop_3' + '.jpg',\n img_crop3)\n", (4664, 4739), False, 'import cv2\n'), ((4819, 4905), 'cv2.imwrite', 'cv2.imwrite', (["(new_image_path + image_list[num][:-4] + '_crop_4' + '.jpg')", 'img_crop4'], {}), "(new_image_path + image_list[num][:-4] + '_crop_4' + '.jpg',\n img_crop4)\n", (4830, 4905), False, 'import cv2\n'), ((496, 518), 'random.randint', 'random.randint', (['(11)', '(16)'], {}), '(11, 16)\n', (510, 518), False, 'import random\n'), ((580, 600), 'random.randint', 'random.randint', (['(6)', '(9)'], {}), '(6, 9)\n', (594, 600), False, 'import random\n'), ((4286, 4306), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (4300, 4306), False, 'import random\n'), ((4452, 4472), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (4466, 4472), False, 'import random\n'), ((4618, 4638), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (4632, 4638), False, 'import random\n'), ((4784, 4804), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (4798, 4804), False, 'import random\n'), ((667, 687), 'random.randint', 'random.randint', (['(0)', '(7)'], {}), '(0, 7)\n', (681, 687), False, 'import random\n'), ((755, 776), 'random.randint', 'random.randint', (['(8)', '(15)'], {}), '(8, 15)\n', (769, 776), False, 'import random\n'), ((2274, 2291), 'numpy.arange', 'np.arange', (['(0)', '(256)'], {}), '(0, 256)\n', (2283, 2291), True, 'import numpy as np\n')] |
import mmcv
import numpy as np
from pycocotools_local.coco import *
import os.path as osp
from .utils import to_tensor, random_scale
from mmcv.parallel import DataContainer as DC
from .custom import CustomDataset
from .forkedpdb import ForkedPdb
from skimage.transform import resize
class Coco3D2ScalesDataset(CustomDataset):
CLASSES = ('microbleed')
def load_annotations(self, ann_file, ann_file_2=None):
if ann_file_2 is None:
ann_file_2 = ann_file
self.coco = COCO(ann_file)
self.coco_2 = COCO(ann_file_2)
self.cat_ids = self.coco.getCatIds()
self.cat_ids_2 = self.coco_2.getCatIds()
self.cat2label = {
cat_id: i + 1
for i, cat_id in enumerate(self.cat_ids)
}
self.cat2label_2 = {
cat_id: i + 1
for i, cat_id in enumerate(self.cat_ids_2)
}
self.img_ids = self.coco.getImgIds()
self.img_ids_2 = self.coco_2.getImgIds()
img_infos, img_infos_2 = [], []
for i in self.img_ids:
info = self.coco.loadImgs([i])[0]
info['filename'] = info['file_name']
img_infos.append(info)
for i in self.img_ids_2:
info = self.coco_2.loadImgs([i])[0]
info['filename'] = info['file_name']
img_infos_2.append(info)
return img_infos, img_infos_2
def get_ann_infos(self, idx):
img_id = self.img_infos[idx]['id']
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
ann_info = self.coco.loadAnns(ann_ids)
img_id_2 = self.img_infos_2[idx]['id']
ann_ids_2 = self.coco_2.getAnnIds(imgIds=[img_id_2])
ann_info_2 = self.coco_2.loadAnns(ann_ids_2)
# return self._parse_ann_info(ann_info, False), self._parse_ann_info(ann_info_2, self.with_mask)
ann = self._parse_ann_info(ann_info, idx, self.with_mask, isOriginalScale=True)
ann_2 = self._parse_ann_info(ann_info_2, idx, self.with_mask, isOriginalScale=False)
return ann, ann_2
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
for i, img_info in enumerate(self.img_infos):
if self.img_ids[i] not in ids_with_ann:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_inds_2 = []
ids_with_ann_2 = set(_['image_id'] for _ in self.coco_2.anns.values())
for i, img_info in enumerate(self.img_infos_2):
if self.img_ids_2[i] not in ids_with_ann_2:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds_2.append(i)
return valid_inds, valid_inds_2
def _parse_ann_info(self, ann_info, idx, with_mask=True, isOriginalScale=False):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,
labels, masks, mask_polys, poly_lens.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
# Two formats are provided.
# 1. mask: a binary map of the same size of the image.
# 2. polys: each mask consists of one or several polys, each poly is a
# list of float.
if with_mask:
gt_masks = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h, z1, depth = ann['bbox']
if ann['area'] <= 0 or w < 1 or h < 1 or depth < 1:
continue
bbox = [x1, y1, x1 + w - 1, y1 + h - 1, z1, z1 + depth - 1]
if ann['iscrowd']:
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
if with_mask:
# error: ValueError: Cannot load file containing pickled data when allow_pickle=False
if isOriginalScale:
if self.load_mask_from_memory and self.seg_masks_memory_isloaded[idx] is False:
mask = np.load(ann['segmentation'], allow_pickle=True)
mask[mask != ann['segmentation_label']] = 0
mask[mask == ann['segmentation_label']] = 1
self.seg_masks_memory[idx][i, :, :, :] = mask
elif self.load_mask_from_memory and self.seg_masks_memory_isloaded[idx] is True:
mask = self.seg_masks_memory[idx][i, :, :, :]
elif self.load_mask_from_memory is None:
mask = np.load(ann['segmentation'], allow_pickle=True)
mask[mask != ann['segmentation_label']] = 0
mask[mask == ann['segmentation_label']] = 1
else:
mask = None
gt_masks.append(mask)
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 6), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 6), dtype=np.float32)
ann = dict(
bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore)
if with_mask:
ann['masks'] = gt_masks
if self.load_mask_from_memory:
self.seg_masks_memory_isloaded[idx] = True
return ann
def find_indices_of_slice(self, gt_bboxes, slice_num):
indices = []
for i in range(len(gt_bboxes)):
_, _, _, _, zmin, zmax = gt_bboxes[i]
if zmin <= slice_num and slice_num <= zmax:
indices.append(i)
return indices
def insert_to_dict(self, data, key, tensors):
if key in data:
data[key].append(tensors)
else:
data[key] = [tensors]
def prepare_train_img(self, idx):
scale_factor = 1.0
flip = False
img_info = self.img_infos[idx]
img_info_2 = self.img_infos_2[idx]
# ForkedPdb().set_trace()
# ensure it's the same image but at different scales
assert img_info['filename'] == img_info_2['filename']
img_file_path = osp.join(self.img_prefix, img_info['filename'])
img_file_path_2 = osp.join(self.img_prefix_2, img_info_2['filename'])
orig_img = np.load(img_file_path, allow_pickle=True)
orig_img_2 = np.load(img_file_path_2, allow_pickle=True)
upscale_factor = orig_img_2.shape[0]/orig_img.shape[0]
total_num_slices = orig_img.shape[2]
ann, ann_2 = self.get_ann_infos(idx)
gt_bboxes = ann['bboxes']
gt_bboxes_2 = ann_2['bboxes']
gt_labels = ann['labels']
gt_labels_2 = ann_2['labels']
if 'masks' not in ann and 'masks' in ann_2:
gt_masks = ann_2['masks']
gt_masks_2 = None
elif 'masks' in ann and 'masks' in ann_2:
gt_masks = ann['masks']
gt_masks_2 = ann_2['masks']
else:
gt_masks = None
gt_masks_2 = None
# skip the image if there is no valid gt bbox
if len(gt_bboxes) == 0 or len(gt_bboxes_2) == 0:
return None
# extra augmentation
if self.extra_aug is not None:
# orig_img, gt_bboxes, gt_labels, _ = self.extra_aug(orig_img, gt_bboxes, gt_labels, None)
# img_scale = (orig_img.shape[0], orig_img.shape[1]) # disable scaling...
# orig_img_2, gt_bboxes_2, gt_labels_2, gt_masks = self.extra_aug(orig_img_2, gt_bboxes_2, gt_labels_2, gt_masks)
# img_scale_2 = (orig_img_2.shape[0], orig_img_2.shape[1]) # disable scaling...
orig_img, gt_bboxes, gt_labels, gt_masks = self.extra_aug(orig_img, gt_bboxes, gt_labels, gt_masks)
img_scale = (orig_img.shape[0], orig_img.shape[1]) # disable scaling...
# upscale original scale patch so that during training original scale data and upscaled data are the same
# patch but with different scale
orig_img_2 = resize(orig_img, (orig_img.shape[0]*upscale_factor, orig_img.shape[1]*upscale_factor, orig_img.shape[2]*upscale_factor))
# TODO: Better way to toggle on/off gt_masks_2. Currently it is turned off.
# if gt_masks is not None:
# gt_masks_2 = []
# for cur_mask in gt_masks:
# gt_masks_2.append(resize(cur_mask, (cur_mask.shape[0]*upscale_factor, cur_mask.shape[1]*upscale_factor, cur_mask.shape[2]*upscale_factor)))
# gt_masks_2 = np.array(gt_masks_2)
gt_masks_2 = None
gt_bboxes_2 = gt_bboxes * upscale_factor
gt_labels_2 = gt_labels
# original code
# orig_img_2, gt_bboxes_2, gt_labels_2, gt_masks_2 = self.extra_aug(orig_img_2, gt_bboxes_2, gt_labels_2, gt_masks_2)
img_scale_2 = (orig_img_2.shape[0], orig_img_2.shape[1]) # disable scaling...
else:
# randomly sample a scale
img_scale = random_scale(self.img_scales, self.multiscale_mode)
img_scale_2 = random_scale(self.img_scales_2, self.multiscale_mode)
total_num_slices = orig_img.shape[2]
total_num_slices_2 = orig_img_2.shape[2]
data = None
for cur_slice in range(total_num_slices):
img = orig_img[:,:,cur_slice]
# convert Greyscale to RGB
img = np.repeat(img[:, :, np.newaxis], 3, axis=2)
img, img_shape, pad_shape, _ = self.img_transform(
img, img_scale, flip, keep_ratio=self.resize_keep_ratio)
img = img.copy()
if data is None:
ori_shape = (img.shape[1], img.shape[2], 3)
img_shape = (*img_shape, total_num_slices)
pad_shape = (*pad_shape, total_num_slices)
img_meta = dict(
ori_shape=ori_shape,
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=flip,
image_id=img_info['id'])
data = dict(img_meta=DC(img_meta, cpu_only=True))
self.insert_to_dict(data, 'imgs', img)
first_iter = True
for cur_slice in range(total_num_slices_2):
img = orig_img_2[:,:,cur_slice]
# convert Greyscale to RGB
img = np.repeat(img[:, :, np.newaxis], 3, axis=2)
img, img_shape, pad_shape, _ = self.img_transform(
img, img_scale_2, flip, keep_ratio=self.resize_keep_ratio)
img = img.copy()
if first_iter:
ori_shape = (img.shape[1], img.shape[2], 3)
img_shape_2 = (*img_shape, total_num_slices_2)
pad_shape = (*pad_shape, total_num_slices_2)
img_meta = dict(
ori_shape=ori_shape,
img_shape=img_shape_2,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=flip,
image_id=img_info_2['id'])
data['img_meta_2'] = DC(img_meta, cpu_only=True)
first_iter = False
self.insert_to_dict(data, 'imgs_2', img)
gt_bboxes = self.bbox_transform(gt_bboxes, (*img_shape, total_num_slices), scale_factor, flip)
gt_bboxes_2 = self.bbox_transform(gt_bboxes_2, (*img_shape_2, total_num_slices_2), scale_factor, flip)
data['gt_bboxes'] = DC(to_tensor(gt_bboxes))
data['gt_bboxes_2'] = DC(to_tensor(gt_bboxes_2))
if self.with_label:
data['gt_labels'] = DC(to_tensor(gt_labels))
data['gt_labels_2'] = DC(to_tensor(gt_labels_2))
if gt_masks is not None:
gt_masks = self.mask_transform(gt_masks, pad_shape, scale_factor, flip, is3D=True)
gt_masks = gt_masks.transpose(0,3,1,2)
data['gt_masks'] = DC(to_tensor(gt_masks.astype(np.uint8)), cpu_only=True)
if gt_masks_2 is not None:
gt_masks_2 = self.mask_transform(gt_masks_2, pad_shape, scale_factor, flip, is3D=True)
gt_masks_2 = gt_masks_2.transpose(0,3,1,2)
data['gt_masks_2'] = DC(to_tensor(gt_masks_2.astype(np.uint8)), cpu_only=True)
imgs = np.array(data['imgs'])
imgs = imgs.transpose(1, 0, 2, 3)
data['imgs'] = DC(to_tensor(imgs), stack=True)
imgs_2 = np.array(data['imgs_2'])
imgs_2 = imgs_2.transpose(1, 0, 2, 3)
data['imgs_2'] = DC(to_tensor(imgs_2), stack=True)
if self.with_precomp_proposals:
# load unsupervised learning's proposals
pp = np.load('precomputed-proposals.pickle', allow_pickle=True)
pp_2 = np.load('precomputed-proposals1.5.pickle', allow_pickle=True)
pp = pp[img_info['filename'].split('.')[0]]
pp_2 = pp_2[img_info_2['filename'].split('.')[0]]
data['pp'] = DC(to_tensor(pp))
data['pp_2'] = DC(to_tensor(pp_2))
return data
def prepare_test_img(self, idx):
"""Prepare an image for testing (multi-scale and flipping)"""
img_info = self.img_infos[idx]
# find corresponding img_info for 1.5x dataset
index_2 = -1
for i, cur_info in enumerate(self.img_infos_2):
if cur_info['filename'] == img_info['filename']:
index_2 = i
img_info_2 = self.img_infos_2[index_2]
patient_imgs = np.load((osp.join(self.img_prefix, img_info['filename'])), allow_pickle=True)
patient_imgs_2 = np.load((osp.join(self.img_prefix_2, img_info_2['filename'])), allow_pickle=True)
# scale_factor = 1.0 / (img_info_2['width'] / img_info['width']) # scale up to img_info_2's resolution
scale_factor = 1 # remain the same
scale_factor_2 = (patient_imgs_2.shape[0] / patient_imgs.shape[0]) # scale up to img_info_2's resolution
total_num_slices = patient_imgs.shape[2]
total_num_slices_2 = patient_imgs_2.shape[2]
def prepare_single(img, scale, flip, img_info, cur_total_num_slices, scale_factor, proposal=None):
_img, img_shape, pad_shape, _ = self.img_transform(
img, scale, flip, keep_ratio=self.resize_keep_ratio)
# old code without resizing depth
# _img, img_shape, pad_shape, scale_factor = self.img_transform(
# img, scale, flip, keep_ratio=self.resize_keep_ratio)
img_shape = (*img_shape, cur_total_num_slices)
pad_shape = (*pad_shape, cur_total_num_slices)
_img_meta = dict(
ori_shape=(_img.shape[1], _img.shape[2], 3),
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=flip)
if proposal is not None:
breakpoint()
if proposal.shape[1] == 5:
score = proposal[:, 4, None]
proposal = proposal[:, :4]
else:
score = None
_proposal = self.bbox_transform(proposal, img_shape,
scale_factor, flip)
_proposal = np.hstack(
[_proposal, score]) if score is not None else _proposal
_proposal = to_tensor(_proposal)
else:
_proposal = None
return _img, _img_meta, _proposal
proposal = None
imgs = []
img_metas = []
for cur_slice in range(total_num_slices):
img = patient_imgs[:,:,cur_slice]
# convert Greyscale to RGB
img = np.repeat(img[:, :, np.newaxis], 3, axis=2)
for scale in self.img_scales:
_img, _img_meta, _proposal = prepare_single(
img, scale, False, img_info, total_num_slices, scale_factor)
imgs.append(_img)
if len(img_metas) == 0:
img_metas.append(DC(_img_meta, cpu_only=True))
imgs_2 = []
img_metas_2 = []
for cur_slice in range(total_num_slices_2):
img_2 = patient_imgs_2[:,:,cur_slice]
# convert Greyscale to RGB
img_2 = np.repeat(img_2[:, :, np.newaxis], 3, axis=2)
for scale in self.img_scales_2:
_img, _img_meta, _proposal = prepare_single(
img_2, scale, False, img_info_2, total_num_slices_2, scale_factor_2)
imgs_2.append(_img)
if len(img_metas_2) == 0:
img_metas_2.append(DC(_img_meta, cpu_only=True))
imgs = np.array(imgs)
imgs_2 = np.array(imgs_2)
imgs = np.transpose(imgs, (1, 0, 2, 3))
imgs_2 = np.transpose(imgs_2, (1, 0, 2, 3))
assert imgs.shape[0] == 3 and imgs_2.shape[0] == 3# make sure [0] is the number of channels
data = dict(imgs=to_tensor(imgs), img_meta=img_metas, imgs_2=to_tensor(imgs_2), img_meta_2=img_metas_2)
if self.with_precomp_proposals:
# load unsupervised learning's proposals
pp = np.load('precomputed-proposals.pickle', allow_pickle=True)
pp_2 = np.load('precomputed-proposals1.5.pickle', allow_pickle=True)
pp = pp[img_info['filename'].split('.')[0]]
pp_2 = pp_2[img_info_2['filename'].split('.')[0]]
data['pp'] = to_tensor(pp)
data['pp_2'] = to_tensor(pp_2)
return data | [
"numpy.load",
"numpy.transpose",
"numpy.zeros",
"numpy.hstack",
"numpy.array",
"skimage.transform.resize",
"mmcv.parallel.DataContainer",
"os.path.join",
"numpy.repeat"
] | [((6808, 6855), 'os.path.join', 'osp.join', (['self.img_prefix', "img_info['filename']"], {}), "(self.img_prefix, img_info['filename'])\n", (6816, 6855), True, 'import os.path as osp\n'), ((6882, 6933), 'os.path.join', 'osp.join', (['self.img_prefix_2', "img_info_2['filename']"], {}), "(self.img_prefix_2, img_info_2['filename'])\n", (6890, 6933), True, 'import os.path as osp\n'), ((6953, 6994), 'numpy.load', 'np.load', (['img_file_path'], {'allow_pickle': '(True)'}), '(img_file_path, allow_pickle=True)\n', (6960, 6994), True, 'import numpy as np\n'), ((7016, 7059), 'numpy.load', 'np.load', (['img_file_path_2'], {'allow_pickle': '(True)'}), '(img_file_path_2, allow_pickle=True)\n', (7023, 7059), True, 'import numpy as np\n'), ((12966, 12988), 'numpy.array', 'np.array', (["data['imgs']"], {}), "(data['imgs'])\n", (12974, 12988), True, 'import numpy as np\n'), ((13104, 13128), 'numpy.array', 'np.array', (["data['imgs_2']"], {}), "(data['imgs_2'])\n", (13112, 13128), True, 'import numpy as np\n'), ((17391, 17405), 'numpy.array', 'np.array', (['imgs'], {}), '(imgs)\n', (17399, 17405), True, 'import numpy as np\n'), ((17423, 17439), 'numpy.array', 'np.array', (['imgs_2'], {}), '(imgs_2)\n', (17431, 17439), True, 'import numpy as np\n'), ((17456, 17488), 'numpy.transpose', 'np.transpose', (['imgs', '(1, 0, 2, 3)'], {}), '(imgs, (1, 0, 2, 3))\n', (17468, 17488), True, 'import numpy as np\n'), ((17506, 17540), 'numpy.transpose', 'np.transpose', (['imgs_2', '(1, 0, 2, 3)'], {}), '(imgs_2, (1, 0, 2, 3))\n', (17518, 17540), True, 'import numpy as np\n'), ((5318, 5355), 'numpy.array', 'np.array', (['gt_bboxes'], {'dtype': 'np.float32'}), '(gt_bboxes, dtype=np.float32)\n', (5326, 5355), True, 'import numpy as np\n'), ((5380, 5415), 'numpy.array', 'np.array', (['gt_labels'], {'dtype': 'np.int64'}), '(gt_labels, dtype=np.int64)\n', (5388, 5415), True, 'import numpy as np\n'), ((5454, 5488), 'numpy.zeros', 'np.zeros', (['(0, 6)'], {'dtype': 'np.float32'}), '((0, 6), dtype=np.float32)\n', (5462, 5488), True, 'import numpy as np\n'), ((5513, 5541), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int64'}), '([], dtype=np.int64)\n', (5521, 5541), True, 'import numpy as np\n'), ((5603, 5647), 'numpy.array', 'np.array', (['gt_bboxes_ignore'], {'dtype': 'np.float32'}), '(gt_bboxes_ignore, dtype=np.float32)\n', (5611, 5647), True, 'import numpy as np\n'), ((5693, 5727), 'numpy.zeros', 'np.zeros', (['(0, 6)'], {'dtype': 'np.float32'}), '((0, 6), dtype=np.float32)\n', (5701, 5727), True, 'import numpy as np\n'), ((8684, 8814), 'skimage.transform.resize', 'resize', (['orig_img', '(orig_img.shape[0] * upscale_factor, orig_img.shape[1] * upscale_factor, \n orig_img.shape[2] * upscale_factor)'], {}), '(orig_img, (orig_img.shape[0] * upscale_factor, orig_img.shape[1] *\n upscale_factor, orig_img.shape[2] * upscale_factor))\n', (8690, 8814), False, 'from skimage.transform import resize\n'), ((10069, 10112), 'numpy.repeat', 'np.repeat', (['img[:, :, np.newaxis]', '(3)'], {'axis': '(2)'}), '(img[:, :, np.newaxis], 3, axis=2)\n', (10078, 10112), True, 'import numpy as np\n'), ((11063, 11106), 'numpy.repeat', 'np.repeat', (['img[:, :, np.newaxis]', '(3)'], {'axis': '(2)'}), '(img[:, :, np.newaxis], 3, axis=2)\n', (11072, 11106), True, 'import numpy as np\n'), ((13345, 13403), 'numpy.load', 'np.load', (['"""precomputed-proposals.pickle"""'], {'allow_pickle': '(True)'}), "('precomputed-proposals.pickle', allow_pickle=True)\n", (13352, 13403), True, 'import numpy as np\n'), ((13423, 13484), 'numpy.load', 'np.load', (['"""precomputed-proposals1.5.pickle"""'], {'allow_pickle': '(True)'}), "('precomputed-proposals1.5.pickle', allow_pickle=True)\n", (13430, 13484), True, 'import numpy as np\n'), ((14167, 14214), 'os.path.join', 'osp.join', (['self.img_prefix', "img_info['filename']"], {}), "(self.img_prefix, img_info['filename'])\n", (14175, 14214), True, 'import os.path as osp\n'), ((14270, 14321), 'os.path.join', 'osp.join', (['self.img_prefix_2', "img_info_2['filename']"], {}), "(self.img_prefix_2, img_info_2['filename'])\n", (14278, 14321), True, 'import os.path as osp\n'), ((16385, 16428), 'numpy.repeat', 'np.repeat', (['img[:, :, np.newaxis]', '(3)'], {'axis': '(2)'}), '(img[:, :, np.newaxis], 3, axis=2)\n', (16394, 16428), True, 'import numpy as np\n'), ((16962, 17007), 'numpy.repeat', 'np.repeat', (['img_2[:, :, np.newaxis]', '(3)'], {'axis': '(2)'}), '(img_2[:, :, np.newaxis], 3, axis=2)\n', (16971, 17007), True, 'import numpy as np\n'), ((17865, 17923), 'numpy.load', 'np.load', (['"""precomputed-proposals.pickle"""'], {'allow_pickle': '(True)'}), "('precomputed-proposals.pickle', allow_pickle=True)\n", (17872, 17923), True, 'import numpy as np\n'), ((17943, 18004), 'numpy.load', 'np.load', (['"""precomputed-proposals1.5.pickle"""'], {'allow_pickle': '(True)'}), "('precomputed-proposals1.5.pickle', allow_pickle=True)\n", (17950, 18004), True, 'import numpy as np\n'), ((11807, 11834), 'mmcv.parallel.DataContainer', 'DC', (['img_meta'], {'cpu_only': '(True)'}), '(img_meta, cpu_only=True)\n', (11809, 11834), True, 'from mmcv.parallel import DataContainer as DC\n'), ((15932, 15961), 'numpy.hstack', 'np.hstack', (['[_proposal, score]'], {}), '([_proposal, score])\n', (15941, 15961), True, 'import numpy as np\n'), ((4478, 4525), 'numpy.load', 'np.load', (["ann['segmentation']"], {'allow_pickle': '(True)'}), "(ann['segmentation'], allow_pickle=True)\n", (4485, 4525), True, 'import numpy as np\n'), ((10803, 10830), 'mmcv.parallel.DataContainer', 'DC', (['img_meta'], {'cpu_only': '(True)'}), '(img_meta, cpu_only=True)\n', (10805, 10830), True, 'from mmcv.parallel import DataContainer as DC\n'), ((16725, 16753), 'mmcv.parallel.DataContainer', 'DC', (['_img_meta'], {'cpu_only': '(True)'}), '(_img_meta, cpu_only=True)\n', (16727, 16753), True, 'from mmcv.parallel import DataContainer as DC\n'), ((17320, 17348), 'mmcv.parallel.DataContainer', 'DC', (['_img_meta'], {'cpu_only': '(True)'}), '(_img_meta, cpu_only=True)\n', (17322, 17348), True, 'from mmcv.parallel import DataContainer as DC\n'), ((4995, 5042), 'numpy.load', 'np.load', (["ann['segmentation']"], {'allow_pickle': '(True)'}), "(ann['segmentation'], allow_pickle=True)\n", (5002, 5042), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu May 21 19:48:17 2020
@author: zhang
"""
import os
import numpy as np
import xgboost as xgb
from sklearn.metrics import accuracy_score
from xgboost import plot_importance #显示特征重要性
from matplotlib import pyplot
import random
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
label_class = [np.array([0,0,0,0,1]),np.array([1,0,0,1,0]),np.array([1,0,1,1,0]),np.array([0,0,0,1,0]),np.array([1,1,1,0,0]),np.array([1,1,0,0,0]),np.array([1,0,0,1,0]),np.array([1,0,1,0,0]),np.array([1,0,1,0,0]),np.array([1,1,0,0,0]),np.array([1,1,0,0,0])]
labels = []
accuracy = []
param = []
save_path = 'Q:\\大学\\毕业设计\\代码\\'
for i in range(0,5):
labels.append(np.load(save_path+'class'+str(i)+'.npy',allow_pickle=True))
task_list = ['Vowel only vs consonant','non-nasal vs nasal', 'non-bilabial vs bilabial ','non-iy vs iy ','non-uw vs uw']
parameters = {
'max_depth': [10],
'learning_rate': [ 0.1],
'n_estimators': [5000],
'min_child_weight': [ 2],
'max_delta_step': [0.3],
'subsample': [0.8],
'colsample_bytree': [0.4,0.7],
'reg_alpha': [ 0,0.25]
}
for task in range(0,5):
all_data = np.load(save_path+task_list[task]+'DAE.npy',allow_pickle=True)
train_x, valid_x, train_y, valid_y = train_test_split(all_data, labels[task], test_size=0.1, random_state=1) # 分训练集和验证集
# 这里不需要Dmatrix
xlf = xgb.XGBClassifier(max_depth=10,
learning_rate=0.1,
n_estimators=2000,
silent=True,
objective='binary:logistic',
nthread=-1,
gamma=0,
min_child_weight=1,
max_delta_step=0,
subsample=0.85,
colsample_bytree=0.7,
colsample_bylevel=1,
reg_alpha=0,
reg_lambda=1,
scale_pos_weight=1,
seed=1440,
missing=None)
# 有了gridsearch我们便不需要fit函数
gsearch = GridSearchCV(xlf, param_grid=parameters, scoring='accuracy', cv=3)
gsearch.fit(train_x, train_y)
print("Best score: %0.3f" % gsearch.best_score_)
accuracy.append(gsearch.best_score_)
print("Best parameters set:")
best_parameters = gsearch.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
param.append(best_parameters) | [
"sklearn.model_selection.GridSearchCV",
"numpy.load",
"sklearn.model_selection.train_test_split",
"numpy.array",
"xgboost.XGBClassifier"
] | [((399, 424), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 1]'], {}), '([0, 0, 0, 0, 1])\n', (407, 424), True, 'import numpy as np\n'), ((421, 446), 'numpy.array', 'np.array', (['[1, 0, 0, 1, 0]'], {}), '([1, 0, 0, 1, 0])\n', (429, 446), True, 'import numpy as np\n'), ((443, 468), 'numpy.array', 'np.array', (['[1, 0, 1, 1, 0]'], {}), '([1, 0, 1, 1, 0])\n', (451, 468), True, 'import numpy as np\n'), ((465, 490), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 0]'], {}), '([0, 0, 0, 1, 0])\n', (473, 490), True, 'import numpy as np\n'), ((487, 512), 'numpy.array', 'np.array', (['[1, 1, 1, 0, 0]'], {}), '([1, 1, 1, 0, 0])\n', (495, 512), True, 'import numpy as np\n'), ((509, 534), 'numpy.array', 'np.array', (['[1, 1, 0, 0, 0]'], {}), '([1, 1, 0, 0, 0])\n', (517, 534), True, 'import numpy as np\n'), ((531, 556), 'numpy.array', 'np.array', (['[1, 0, 0, 1, 0]'], {}), '([1, 0, 0, 1, 0])\n', (539, 556), True, 'import numpy as np\n'), ((553, 578), 'numpy.array', 'np.array', (['[1, 0, 1, 0, 0]'], {}), '([1, 0, 1, 0, 0])\n', (561, 578), True, 'import numpy as np\n'), ((575, 600), 'numpy.array', 'np.array', (['[1, 0, 1, 0, 0]'], {}), '([1, 0, 1, 0, 0])\n', (583, 600), True, 'import numpy as np\n'), ((597, 622), 'numpy.array', 'np.array', (['[1, 1, 0, 0, 0]'], {}), '([1, 1, 0, 0, 0])\n', (605, 622), True, 'import numpy as np\n'), ((619, 644), 'numpy.array', 'np.array', (['[1, 1, 0, 0, 0]'], {}), '([1, 1, 0, 0, 0])\n', (627, 644), True, 'import numpy as np\n'), ((1236, 1303), 'numpy.load', 'np.load', (["(save_path + task_list[task] + 'DAE.npy')"], {'allow_pickle': '(True)'}), "(save_path + task_list[task] + 'DAE.npy', allow_pickle=True)\n", (1243, 1303), True, 'import numpy as np\n'), ((1341, 1412), 'sklearn.model_selection.train_test_split', 'train_test_split', (['all_data', 'labels[task]'], {'test_size': '(0.1)', 'random_state': '(1)'}), '(all_data, labels[task], test_size=0.1, random_state=1)\n', (1357, 1412), False, 'from sklearn.model_selection import train_test_split\n'), ((1452, 1770), 'xgboost.XGBClassifier', 'xgb.XGBClassifier', ([], {'max_depth': '(10)', 'learning_rate': '(0.1)', 'n_estimators': '(2000)', 'silent': '(True)', 'objective': '"""binary:logistic"""', 'nthread': '(-1)', 'gamma': '(0)', 'min_child_weight': '(1)', 'max_delta_step': '(0)', 'subsample': '(0.85)', 'colsample_bytree': '(0.7)', 'colsample_bylevel': '(1)', 'reg_alpha': '(0)', 'reg_lambda': '(1)', 'scale_pos_weight': '(1)', 'seed': '(1440)', 'missing': 'None'}), "(max_depth=10, learning_rate=0.1, n_estimators=2000,\n silent=True, objective='binary:logistic', nthread=-1, gamma=0,\n min_child_weight=1, max_delta_step=0, subsample=0.85, colsample_bytree=\n 0.7, colsample_bylevel=1, reg_alpha=0, reg_lambda=1, scale_pos_weight=1,\n seed=1440, missing=None)\n", (1469, 1770), True, 'import xgboost as xgb\n'), ((2260, 2326), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['xlf'], {'param_grid': 'parameters', 'scoring': '"""accuracy"""', 'cv': '(3)'}), "(xlf, param_grid=parameters, scoring='accuracy', cv=3)\n", (2272, 2326), False, 'from sklearn.model_selection import GridSearchCV\n')] |
#/usr/bin/env python
# system import
import os
import pkg_resources
import yaml
import pprint
import numpy as np
import pandas as pd
import itertools
import matplotlib.pyplot as plt
# 3rd party
import torch
from torch_geometric.data import Data
from trackml.dataset import load_event
# local import
from heptrkx.dataset import event as master
from exatrkx import config_dict # for accessing predefined configuration files
from exatrkx import outdir_dict # for accessing predefined output directories
from exatrkx.src import utils_dir
from exatrkx.src import utils_torch
from exatrkx import LayerlessEmbedding
def start_view(args):
outdir = args.outdir
event = master.Event(utils_dir.inputdir)
event.read(args.evtid)
# randomly select N particles with each having at least 6 hits
pids = event.particles[(event.particles.nhits) > 5]
np.random.seed(args.seed)
rnd = np.random.randint(0, pids.shape[0], args.npids)
sel_pids = pids.particle_id.values[rnd]
event._hits = event.hits[event.hits.particle_id.isin(sel_pids)]
hits = event.cluster_info(utils_dir.detector_path)
# track labeling -- determine true edges...
hits = hits.assign(R=np.sqrt((hits.x - hits.vx)**2 + (hits.y - hits.vy)**2 + (hits.z - hits.vz)**2))
hits = hits.sort_values('R').reset_index(drop=True).reset_index(drop=False)
hit_list = hits.groupby(['particle_id', 'layer'], sort=False)['index'].agg(lambda x: list(x)).groupby(level=0).agg(lambda x: list(x))
e = []
for row in hit_list.values:
for i, j in zip(row[0:-1], row[1:]):
e.extend(list(itertools.product(i, j)))
layerless_true_edges = np.array(e).T
# input data for embedding
data = Data(x=torch.from_numpy(hits[['r', 'phi', 'z']].to_numpy()/np.array([1000, np.pi, 1000])).float(),\
pid=torch.from_numpy(hits.particle_id.to_numpy()),
layers=torch.from_numpy(hits.layer.to_numpy()), hid=torch.from_numpy(hits.hit_id.to_numpy()))
cell_features = ['cell_count', 'cell_val', 'leta', 'lphi', 'lx', 'ly', 'lz', 'geta', 'gphi']
data.layerless_true_edges = torch.from_numpy(layerless_true_edges)
data.cell_data = torch.from_numpy(hits[cell_features].values).float()
action = 'embedding'
config_file = pkg_resources.resource_filename(
"exatrkx",
os.path.join('configs', config_dict[action]))
with open(config_file) as f:
e_config = yaml.load(f, Loader=yaml.FullLoader)
e_config['train_split'] = [1, 0, 0]
e_config['r_val'] = 2.0
e_model = LayerlessEmbedding(e_config)
e_model = e_model.load_from_checkpoint(args.embed_ckpt_dir, hparams=e_config)
e_model.eval()
spatial = e_model(torch.cat([data.cell_data, data.x], axis=-1))
spatial_np = spatial.detach().numpy()
# plot hits in the embedding space
embedding_dims = [(0, 1), (2, 3), (4, 5), (6, 7)]
for id1, id2 in embedding_dims:
fig = plt.figure(figsize=(6,6))
for pid in sel_pids:
idx = hits.particle_id == pid
plt.scatter(spatial_np[idx, id1], spatial_np[idx, id2])
plt.savefig(os.path.join(outdir, "embedding_{}_{}.pdf".format(id1, id2)))
del fig
# build edges from the embedding space
e_spatial = utils_torch.build_edges(spatial, e_model.hparams['r_val'], e_model.hparams['knn_val'])
e_spatial_np = e_spatial.detach().numpy()
# view hits with or without edge candidates...
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, projection='3d')
for pid in sel_pids:
ax.scatter(hits[hits.particle_id == pid].x.values, hits[hits.particle_id == pid].y.values, hits[hits.particle_id == pid].z.values)
# add edges
e_spatial_np_t = e_spatial_np.T
for iedge in range(e_spatial_np.shape[1]):
ax.plot(hits.iloc[e_spatial_np_t[iedge]].x.values, hits.iloc[e_spatial_np_t[iedge]].y.values, hits.iloc[e_spatial_np_t[iedge]].z.values, color='k', alpha=0.3, lw=1.)
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.savefig(os.path.join(outdir, "emedding_edges_3d.pdf"))
del fig
del ax
e_spatial_np_t = e_spatial_np.T
layerless_true_edges_t = layerless_true_edges.T # same as e
def plot_edges(xname, yname, xlabel, ylabel, outname, with_edges=True, no_axis=False, edges=e_spatial_np_t):
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
for pid in sel_pids:
ax.scatter(hits[hits.particle_id == pid][xname].values, hits[hits.particle_id == pid][yname].values)
# add edges
if with_edges:
for iedge in range(edges.shape[0]):
ax.plot(hits.iloc[edges[iedge]][xname].values,\
hits.iloc[edges[iedge]][yname].values, color='k', alpha=0.3, lw=1.)
ax.set_xlabel(xlabel, fontsize=16)
ax.set_ylabel(ylabel, fontsize=16)
if xname=='z':
ax.set_xlim(-3000, 3000)
trans=False
if no_axis:
ax.set_axis_off()
trans=True
plt.savefig(os.path.join(outdir, "{}.png".format(outname)), transparent=trans)
plt.savefig(os.path.join(outdir, "{}.pdf".format(outname)), transparent=trans)
def plot_hits(xname, yname, outname):
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
ax.scatter(hits[xname].values, hits[yname].values)
if xname=='z':
ax.set_xlim(-3000, 3000)
ax.set_xlabel(xname, fontsize=16)
ax.set_ylabel(yname, fontsize=16)
plt.savefig(os.path.join(outdir, "{}.pdf".format(outname)))
plot_edges("x", 'y', 'x', 'y', 'embedding_edges_x_y')
plot_edges("z", 'r', 'z', 'r', 'embedding_edges_z_r')
plot_edges("x", 'y', 'x', 'y', 'embedding_edges_truth_x_y', edges=layerless_true_edges_t)
plot_edges("z", 'r', 'z', 'r', 'embedding_edges_truth_z_r', edges=layerless_true_edges_t)
plot_edges("x", 'y', 'x', 'y', 'embedding_hits_truth_x_y', with_edges=False)
plot_edges("z", 'r', 'z', 'r', 'embedding_hits_truth_z_r', with_edges=False)
plot_hits("x", 'y', 'embedding_hits_x_y')
plot_hits("z", 'r', 'embedding_hits_z_r')
plot_edges("x", 'y', 'x', 'y', 'embedding_front', no_axis=True)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="view embedding results")
add_arg = parser.add_argument
add_arg("embed_ckpt_dir", help="embedding checkpoint")
add_arg("outdir", help="output directory")
add_arg("--evtid", default=8000, type=int, help='event id')
add_arg("--npids", default=10, type=int, help='number of particles')
add_arg("--seed", default=456, type=int, help='seeding for selecting particles')
args = parser.parse_args()
start_view(args) | [
"exatrkx.LayerlessEmbedding",
"yaml.load",
"numpy.random.seed",
"argparse.ArgumentParser",
"matplotlib.pyplot.scatter",
"torch.cat",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"numpy.array",
"heptrkx.dataset.event.Event",
"exatrkx.src.utils_torch.build_edges",
"itertools.product",
"... | [((673, 705), 'heptrkx.dataset.event.Event', 'master.Event', (['utils_dir.inputdir'], {}), '(utils_dir.inputdir)\n', (685, 705), True, 'from heptrkx.dataset import event as master\n'), ((861, 886), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (875, 886), True, 'import numpy as np\n'), ((897, 944), 'numpy.random.randint', 'np.random.randint', (['(0)', 'pids.shape[0]', 'args.npids'], {}), '(0, pids.shape[0], args.npids)\n', (914, 944), True, 'import numpy as np\n'), ((2108, 2146), 'torch.from_numpy', 'torch.from_numpy', (['layerless_true_edges'], {}), '(layerless_true_edges)\n', (2124, 2146), False, 'import torch\n'), ((2576, 2604), 'exatrkx.LayerlessEmbedding', 'LayerlessEmbedding', (['e_config'], {}), '(e_config)\n', (2594, 2604), False, 'from exatrkx import LayerlessEmbedding\n'), ((3296, 3387), 'exatrkx.src.utils_torch.build_edges', 'utils_torch.build_edges', (['spatial', "e_model.hparams['r_val']", "e_model.hparams['knn_val']"], {}), "(spatial, e_model.hparams['r_val'], e_model.hparams[\n 'knn_val'])\n", (3319, 3387), False, 'from exatrkx.src import utils_torch\n'), ((3491, 3517), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (3501, 3517), True, 'import matplotlib.pyplot as plt\n'), ((6346, 6407), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""view embedding results"""'}), "(description='view embedding results')\n", (6369, 6407), False, 'import argparse\n'), ((1652, 1663), 'numpy.array', 'np.array', (['e'], {}), '(e)\n', (1660, 1663), True, 'import numpy as np\n'), ((2358, 2402), 'os.path.join', 'os.path.join', (['"""configs"""', 'config_dict[action]'], {}), "('configs', config_dict[action])\n", (2370, 2402), False, 'import os\n'), ((2456, 2492), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (2465, 2492), False, 'import yaml\n'), ((2728, 2772), 'torch.cat', 'torch.cat', (['[data.cell_data, data.x]'], {'axis': '(-1)'}), '([data.cell_data, data.x], axis=-1)\n', (2737, 2772), False, 'import torch\n'), ((2960, 2986), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (2970, 2986), True, 'import matplotlib.pyplot as plt\n'), ((4105, 4150), 'os.path.join', 'os.path.join', (['outdir', '"""emedding_edges_3d.pdf"""'], {}), "(outdir, 'emedding_edges_3d.pdf')\n", (4117, 4150), False, 'import os\n'), ((4403, 4429), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (4413, 4429), True, 'import matplotlib.pyplot as plt\n'), ((5326, 5352), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (5336, 5352), True, 'import matplotlib.pyplot as plt\n'), ((1187, 1276), 'numpy.sqrt', 'np.sqrt', (['((hits.x - hits.vx) ** 2 + (hits.y - hits.vy) ** 2 + (hits.z - hits.vz) ** 2)'], {}), '((hits.x - hits.vx) ** 2 + (hits.y - hits.vy) ** 2 + (hits.z - hits.\n vz) ** 2)\n', (1194, 1276), True, 'import numpy as np\n'), ((2168, 2212), 'torch.from_numpy', 'torch.from_numpy', (['hits[cell_features].values'], {}), '(hits[cell_features].values)\n', (2184, 2212), False, 'import torch\n'), ((3069, 3124), 'matplotlib.pyplot.scatter', 'plt.scatter', (['spatial_np[idx, id1]', 'spatial_np[idx, id2]'], {}), '(spatial_np[idx, id1], spatial_np[idx, id2])\n', (3080, 3124), True, 'import matplotlib.pyplot as plt\n'), ((1599, 1622), 'itertools.product', 'itertools.product', (['i', 'j'], {}), '(i, j)\n', (1616, 1622), False, 'import itertools\n'), ((1769, 1798), 'numpy.array', 'np.array', (['[1000, np.pi, 1000]'], {}), '([1000, np.pi, 1000])\n', (1777, 1798), True, 'import numpy as np\n')] |
from numpy.random import randn
import ref
import torch
import numpy as np
def adjust_learning_rate(optimizer, epoch, LR, LR_param):
#lr = LR * (0.1 ** (epoch // dropLR))
LR_policy = LR_param.get('lr_policy', 'step')
if LR_policy == 'step':
steppoints = LR_param.get('steppoints', [4, 7, 9, 10])
lrs = LR_param.get('lrs', [0.01, 0.001, 0.0001, 0.00001, 0.000001])
assert len(lrs) == len(steppoints) + 1
lr = None
for idx, steppoint in enumerate(steppoints):
if epoch > steppoint:
continue
elif epoch <= steppoint:
lr = lrs[idx]
break
if lr is None:
lr = lrs[-1]
for param_group in optimizer.param_groups:
param_group['lr'] = lr
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def Rnd(x):
return max(-2 * x, min(2 * x, randn() * x))
def Flip(img):
return img[:, :, ::-1].copy()
def ShuffleLR(x):
for e in ref.shuffleRef:
x[e[0]], x[e[1]] = x[e[1]].copy(), x[e[0]].copy()
return x
| [
"numpy.random.randn"
] | [((1243, 1250), 'numpy.random.randn', 'randn', ([], {}), '()\n', (1248, 1250), False, 'from numpy.random import randn\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
# Hack so you don't have to put the library containing this script in the PYTHONPATH.
sys.path = [os.path.abspath(os.path.join(__file__, '..', '..'))] + sys.path
import theano
import theano.tensor as T
import numpy as np
import tempfile
from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal
import smartlearner.initializers as initer
from smartlearner import Trainer, Dataset, Model
from smartlearner import tasks
from smartlearner import views
from smartlearner import stopping_criteria
import smartlearner.initializers as initer
from smartlearner.utils import sharedX
from smartlearner.optimizers import SGD
from smartlearner.direction_modifiers import ConstantLearningRate
#from smartlearner.batch_schedulers import MiniBatchScheduler, FullBatchScheduler
#from smartlearner.losses.classification_losses import NegativeLogLikelihood as NLL
#from smartlearner.losses.classification_losses import ClassificationError
from convnade.utils import Timer, cartesian
from convnade.datasets import load_binarized_mnist
from convnade import DeepConvNADE, DeepConvNADEBuilder
from convnade import generate_blueprints
#from convnade.tasks import DeepNadeOrderingTask
from convnade.batch_schedulers import MiniBatchSchedulerWithAutoregressiveMask
from convnade.losses import BinaryCrossEntropyEstimateWithAutoRegressiveMask
np.set_printoptions(linewidth=220)
def test_simple_convnade():
nb_kernels = 8
kernel_shape = (2, 2)
hidden_activation = "sigmoid"
use_mask_as_input = True
batch_size = 1024
ordering_seed = 1234
max_epoch = 3
nb_orderings = 1
print("Will train Convoluational Deep NADE for a total of {0} epochs.".format(max_epoch))
with Timer("Loading/processing binarized MNIST"):
trainset, validset, testset = load_binarized_mnist()
# Extract the center patch (4x4 pixels) of each image.
indices_to_keep = [348, 349, 350, 351, 376, 377, 378, 379, 404, 405, 406, 407, 432, 433, 434, 435]
trainset = Dataset(trainset.inputs.get_value()[:, indices_to_keep], trainset.inputs.get_value()[:, indices_to_keep], name="trainset")
validset = Dataset(validset.inputs.get_value()[:, indices_to_keep], validset.inputs.get_value()[:, indices_to_keep], name="validset")
testset = Dataset(testset.inputs.get_value()[:, indices_to_keep], testset.inputs.get_value()[:, indices_to_keep], name="testset")
image_shape = (4, 4)
nb_channels = 1
with Timer("Building model"):
builder = DeepConvNADEBuilder(image_shape=image_shape,
nb_channels=nb_channels,
use_mask_as_input=use_mask_as_input)
convnet_blueprint = "64@2x2(valid) -> 1@2x2(full)"
fullnet_blueprint = "5 -> 16"
print("Convnet:", convnet_blueprint)
print("Fullnet:", fullnet_blueprint)
builder.build_convnet_from_blueprint(convnet_blueprint)
builder.build_fullnet_from_blueprint(fullnet_blueprint)
model = builder.build()
model.initialize() # By default, uniform initialization.
with Timer("Building optimizer"):
loss = BinaryCrossEntropyEstimateWithAutoRegressiveMask(model, trainset)
optimizer = SGD(loss=loss)
optimizer.append_direction_modifier(ConstantLearningRate(0.001))
with Timer("Building trainer"):
batch_scheduler = MiniBatchSchedulerWithAutoregressiveMask(trainset, batch_size)
trainer = Trainer(optimizer, batch_scheduler)
trainer.append_task(stopping_criteria.MaxEpochStopping(max_epoch))
# Print time for one epoch
trainer.append_task(tasks.PrintEpochDuration())
trainer.append_task(tasks.PrintTrainingDuration())
# Log training error
loss_monitor = views.MonitorVariable(loss.loss)
avg_loss = tasks.AveragePerEpoch(loss_monitor)
accum = tasks.Accumulator(loss_monitor)
logger = tasks.Logger(loss_monitor, avg_loss)
trainer.append_task(logger, avg_loss, accum)
# Print average training loss.
trainer.append_task(tasks.Print("Avg. training loss: : {}", avg_loss))
# Print NLL mean/stderror.
nll = views.LossView(loss=BinaryCrossEntropyEstimateWithAutoRegressiveMask(model, validset),
batch_scheduler=MiniBatchSchedulerWithAutoregressiveMask(validset, batch_size=len(validset)))
trainer.append_task(tasks.Print("Validset - NLL : {0:.2f} ± {1:.2f}", nll.mean, nll.stderror))
trainer.build_theano_graph()
with Timer("Training"):
trainer.train()
with Timer("Checking the probs for all possible inputs sum to 1"):
rng = np.random.RandomState(ordering_seed)
D = np.prod(image_shape)
inputs = cartesian([[0, 1]]*int(D), dtype=np.float32)
ordering = np.arange(D, dtype=np.int32)
rng.shuffle(ordering)
symb_input = T.vector("input")
symb_input.tag.test_value = inputs[-len(inputs)//4]
symb_ordering = T.ivector("ordering")
symb_ordering.tag.test_value = ordering
nll_of_x_given_o = theano.function([symb_input, symb_ordering], model.nll_of_x_given_o(symb_input, symb_ordering), name="nll_of_x_given_o")
#theano.printing.pydotprint(nll_of_x_given_o, '{0}_nll_of_x_given_o_{1}'.format(model.__class__.__name__, theano.config.device), with_ids=True)
for i in range(nb_orderings):
print ("Ordering:", ordering)
ordering = np.arange(D, dtype=np.int32)
rng.shuffle(ordering)
nlls = []
for no, input in enumerate(inputs):
print("{}/{}".format(no, len(inputs)), end='\r')
nlls.append(nll_of_x_given_o(input, ordering))
print("{}/{} Done".format(len(inputs), len(inputs)))
p_x = np.exp(np.logaddexp.reduce(-np.array(nlls)))
print("Sum of p(x) for all x:", p_x)
assert_almost_equal(p_x, 1., decimal=5)
def test_convnade_with_max_pooling():
nb_kernels = 8
kernel_shape = (2, 2)
hidden_activation = "sigmoid"
use_mask_as_input = True
batch_size = 1024
ordering_seed = 1234
max_epoch = 3
nb_orderings = 1
print("Will train Convoluational Deep NADE for a total of {0} epochs.".format(max_epoch))
with Timer("Loading/processing binarized MNIST"):
trainset, validset, testset = load_binarized_mnist()
# Extract the center patch (4x4 pixels) of each image.
indices_to_keep = [348, 349, 350, 351, 376, 377, 378, 379, 404, 405, 406, 407, 432, 433, 434, 435]
trainset = Dataset(trainset.inputs.get_value()[:, indices_to_keep], trainset.inputs.get_value()[:, indices_to_keep], name="trainset")
validset = Dataset(validset.inputs.get_value()[:, indices_to_keep], validset.inputs.get_value()[:, indices_to_keep], name="validset")
testset = Dataset(testset.inputs.get_value()[:, indices_to_keep], testset.inputs.get_value()[:, indices_to_keep], name="testset")
image_shape = (4, 4)
nb_channels = 1
with Timer("Building model"):
builder = DeepConvNADEBuilder(image_shape=image_shape,
nb_channels=nb_channels,
use_mask_as_input=use_mask_as_input)
convnet_blueprint = "64@3x3(valid) -> max@2x2 -> up@2x2 -> 1@3x3(full)"
fullnet_blueprint = "5 -> 16"
print("Convnet:", convnet_blueprint)
print("Fullnet:", fullnet_blueprint)
builder.build_convnet_from_blueprint(convnet_blueprint)
builder.build_fullnet_from_blueprint(fullnet_blueprint)
model = builder.build()
model.initialize() # By default, uniform initialization.
with Timer("Building optimizer"):
loss = BinaryCrossEntropyEstimateWithAutoRegressiveMask(model, trainset)
optimizer = SGD(loss=loss)
optimizer.append_direction_modifier(ConstantLearningRate(0.001))
with Timer("Building trainer"):
batch_scheduler = MiniBatchSchedulerWithAutoregressiveMask(trainset, batch_size)
trainer = Trainer(optimizer, batch_scheduler)
trainer.append_task(stopping_criteria.MaxEpochStopping(max_epoch))
# Print time for one epoch
trainer.append_task(tasks.PrintEpochDuration())
trainer.append_task(tasks.PrintTrainingDuration())
# Log training error
loss_monitor = views.MonitorVariable(loss.loss)
avg_loss = tasks.AveragePerEpoch(loss_monitor)
accum = tasks.Accumulator(loss_monitor)
logger = tasks.Logger(loss_monitor, avg_loss)
trainer.append_task(logger, avg_loss, accum)
# Print average training loss.
trainer.append_task(tasks.Print("Avg. training loss: : {}", avg_loss))
# Print NLL mean/stderror.
nll = views.LossView(loss=BinaryCrossEntropyEstimateWithAutoRegressiveMask(model, validset),
batch_scheduler=MiniBatchSchedulerWithAutoregressiveMask(validset, batch_size=len(validset)))
trainer.append_task(tasks.Print("Validset - NLL : {0:.2f} ± {1:.2f}", nll.mean, nll.stderror))
trainer.build_theano_graph()
with Timer("Training"):
trainer.train()
with Timer("Checking the probs for all possible inputs sum to 1"):
rng = np.random.RandomState(ordering_seed)
D = np.prod(image_shape)
inputs = cartesian([[0, 1]]*int(D), dtype=np.float32)
ordering = np.arange(D, dtype=np.int32)
rng.shuffle(ordering)
symb_input = T.vector("input")
symb_input.tag.test_value = inputs[-len(inputs)//4]
symb_ordering = T.ivector("ordering")
symb_ordering.tag.test_value = ordering
nll_of_x_given_o = theano.function([symb_input, symb_ordering], model.nll_of_x_given_o(symb_input, symb_ordering), name="nll_of_x_given_o")
#theano.printing.pydotprint(nll_of_x_given_o, '{0}_nll_of_x_given_o_{1}'.format(model.__class__.__name__, theano.config.device), with_ids=True)
for i in range(nb_orderings):
print ("Ordering:", ordering)
ordering = np.arange(D, dtype=np.int32)
rng.shuffle(ordering)
nlls = []
for no, input in enumerate(inputs):
print("{}/{}".format(no, len(inputs)), end='\r')
nlls.append(nll_of_x_given_o(input, ordering))
print("{}/{} Done".format(len(inputs), len(inputs)))
p_x = np.exp(np.logaddexp.reduce(-np.array(nlls)))
print("Sum of p(x) for all x:", p_x)
assert_almost_equal(p_x, 1., decimal=5)
def test_convnade_with_mask_as_input_channel():
nb_kernels = 8
kernel_shape = (2, 2)
hidden_activation = "sigmoid"
use_mask_as_input = True
batch_size = 1024
ordering_seed = 1234
max_epoch = 3
nb_orderings = 1
print("Will train Convoluational Deep NADE for a total of {0} epochs.".format(max_epoch))
with Timer("Loading/processing binarized MNIST"):
trainset, validset, testset = load_binarized_mnist()
# Extract the center patch (4x4 pixels) of each image.
indices_to_keep = [348, 349, 350, 351, 376, 377, 378, 379, 404, 405, 406, 407, 432, 433, 434, 435]
trainset = Dataset(trainset.inputs.get_value()[:, indices_to_keep], trainset.inputs.get_value()[:, indices_to_keep], name="trainset")
validset = Dataset(validset.inputs.get_value()[:, indices_to_keep], validset.inputs.get_value()[:, indices_to_keep], name="validset")
testset = Dataset(testset.inputs.get_value()[:, indices_to_keep], testset.inputs.get_value()[:, indices_to_keep], name="testset")
image_shape = (4, 4)
# We consider the mask as an input channel so we do the necessary modification to the datasets.
nb_channels = 1 + (use_mask_as_input is True)
batch_scheduler = MiniBatchSchedulerWithAutoregressiveMask(trainset, batch_size, use_mask_as_input=use_mask_as_input)
with Timer("Building model"):
builder = DeepConvNADEBuilder(image_shape=image_shape, nb_channels=nb_channels)
convnet_blueprint = "64@2x2(valid) -> 1@2x2(full)"
fullnet_blueprint = "5 -> 16"
print("Convnet:", convnet_blueprint)
print("Fullnet:", fullnet_blueprint)
builder.build_convnet_from_blueprint(convnet_blueprint)
builder.build_fullnet_from_blueprint(fullnet_blueprint)
model = builder.build()
model.initialize() # By default, uniform initialization.
with Timer("Building optimizer"):
loss = BinaryCrossEntropyEstimateWithAutoRegressiveMask(model, trainset)
optimizer = SGD(loss=loss)
optimizer.append_direction_modifier(ConstantLearningRate(0.001))
with Timer("Building trainer"):
trainer = Trainer(optimizer, batch_scheduler)
trainer.append_task(stopping_criteria.MaxEpochStopping(max_epoch))
# Print time for one epoch
trainer.append_task(tasks.PrintEpochDuration())
trainer.append_task(tasks.PrintTrainingDuration())
# Log training error
loss_monitor = views.MonitorVariable(loss.loss)
avg_loss = tasks.AveragePerEpoch(loss_monitor)
accum = tasks.Accumulator(loss_monitor)
logger = tasks.Logger(loss_monitor, avg_loss)
trainer.append_task(logger, avg_loss, accum)
# Print average training loss.
trainer.append_task(tasks.Print("Avg. training loss: : {}", avg_loss))
# Print NLL mean/stderror.
nll = views.LossView(loss=BinaryCrossEntropyEstimateWithAutoRegressiveMask(model, validset),
batch_scheduler=MiniBatchSchedulerWithAutoregressiveMask(validset, batch_size=len(validset), use_mask_as_input=use_mask_as_input))
trainer.append_task(tasks.Print("Validset - NLL : {0:.2f} ± {1:.2f}", nll.mean, nll.stderror))
trainer.build_theano_graph()
with Timer("Training"):
trainer.train()
with Timer("Checking the probs for all possible inputs sum to 1"):
rng = np.random.RandomState(ordering_seed)
D = np.prod(image_shape)
inputs = cartesian([[0, 1]]*int(D), dtype=np.float32)
ordering = np.arange(D, dtype=np.int32)
rng.shuffle(ordering)
d = rng.randint(D, size=(D, 1))
masks_o_lt_d = np.arange(D) < d
map(rng.shuffle, masks_o_lt_d) # Inplace shuffling each row.
symb_input = T.vector("input")
symb_input.tag.test_value = inputs[-len(inputs)//4]
symb_ordering = T.ivector("ordering")
symb_ordering.tag.test_value = ordering
nll_of_x_given_o = theano.function([symb_input, symb_ordering], model.nll_of_x_given_o(symb_input, symb_ordering), name="nll_of_x_given_o")
#theano.printing.pydotprint(nll_of_x_given_o, '{0}_nll_of_x_given_o_{1}'.format(model.__class__.__name__, theano.config.device), with_ids=True)
for i in range(nb_orderings):
print ("Ordering:", ordering)
ordering = np.arange(D, dtype=np.int32)
rng.shuffle(ordering)
nlls = []
for no, input in enumerate(inputs):
print("{}/{}".format(no, len(inputs)), end='\r')
nlls.append(nll_of_x_given_o(input, ordering))
print("{}/{} Done".format(len(inputs), len(inputs)))
p_x = np.exp(np.logaddexp.reduce(-np.array(nlls)))
print("Sum of p(x) for all x:", p_x)
assert_almost_equal(p_x, 1., decimal=5)
def test_check_init():
nb_kernels = 8
kernel_shape = (2, 2)
hidden_activation = "hinge"
use_mask_as_input = True
batch_size = 1024
ordering_seed = 1234
max_epoch = 5
nb_orderings = 1
with Timer("Loading/processing binarized MNIST"):
trainset, validset, testset = load_binarized_mnist()
# Extract the center patch (4x4 pixels) of each image.
indices_to_keep = [348, 349, 350, 351, 376, 377, 378, 379, 404, 405, 406, 407, 432, 433, 434, 435]
trainset = Dataset(trainset.inputs.get_value()[:, indices_to_keep], trainset.inputs.get_value()[:, indices_to_keep], name="trainset")
validset = Dataset(validset.inputs.get_value()[:, indices_to_keep], validset.inputs.get_value()[:, indices_to_keep], name="validset")
testset = Dataset(testset.inputs.get_value()[:, indices_to_keep], testset.inputs.get_value()[:, indices_to_keep], name="testset")
image_shape = (4, 4)
nb_channels = 1
# Nested function to build a trainer.
def _build_trainer(nb_epochs):
print("Will train Convoluational Deep NADE for a total of {0} epochs.".format(nb_epochs))
with Timer("Building model"):
builder = DeepConvNADEBuilder(image_shape=image_shape,
nb_channels=nb_channels,
use_mask_as_input=use_mask_as_input)
convnet_blueprint = "64@2x2(valid) -> 1@2x2(full)"
fullnet_blueprint = "5 -> 16"
print("Convnet:", convnet_blueprint)
print("Fullnet:", fullnet_blueprint)
builder.build_convnet_from_blueprint(convnet_blueprint)
builder.build_fullnet_from_blueprint(fullnet_blueprint)
model = builder.build()
model.initialize(initer.UniformInitializer(random_seed=1234))
with Timer("Building optimizer"):
loss = BinaryCrossEntropyEstimateWithAutoRegressiveMask(model, trainset)
optimizer = SGD(loss=loss)
optimizer.append_direction_modifier(ConstantLearningRate(0.001))
with Timer("Building trainer"):
batch_scheduler = MiniBatchSchedulerWithAutoregressiveMask(trainset, batch_size)
trainer = Trainer(optimizer, batch_scheduler)
# Print time for one epoch
trainer.append_task(tasks.PrintEpochDuration())
trainer.append_task(tasks.PrintTrainingDuration())
# Log training error
loss_monitor = views.MonitorVariable(loss.loss)
avg_loss = tasks.AveragePerEpoch(loss_monitor)
accum = tasks.Accumulator(loss_monitor)
logger = tasks.Logger(loss_monitor, avg_loss)
trainer.append_task(logger, avg_loss, accum)
# Print average training loss.
trainer.append_task(tasks.Print("Avg. training loss: : {}", avg_loss))
# Print NLL mean/stderror.
nll = views.LossView(loss=BinaryCrossEntropyEstimateWithAutoRegressiveMask(model, validset),
batch_scheduler=MiniBatchSchedulerWithAutoregressiveMask(validset, batch_size=len(validset),
keep_mask=True))
trainer.append_task(tasks.Print("Validset - NLL : {0:.2f} ± {1:.2f}", nll.mean, nll.stderror))
trainer.append_task(stopping_criteria.MaxEpochStopping(nb_epochs))
return trainer, nll
trainer1, nll1 = _build_trainer(nb_epochs=5)
with Timer("Compiling training graph"):
trainer1.build_theano_graph()
with Timer("Compiling training graph"):
trainer2, nll2 = _build_trainer(nb_epochs=5)
# Check the two models have been initializedd the same way.
assert_equal(len(trainer1._optimizer.loss.model.parameters),
len(trainer2._optimizer.loss.model.parameters))
for param1, param2 in zip(trainer1._optimizer.loss.model.parameters,
trainer2._optimizer.loss.model.parameters):
assert_array_equal(param1.get_value(), param2.get_value(), err_msg=param1.name)
with Timer("Training"):
trainer1.train()
trainer2.train()
# Check the two models are the same after training for 5 epochs.
assert_equal(len(trainer1._optimizer.loss.model.parameters),
len(trainer2._optimizer.loss.model.parameters))
for param1, param2 in zip(trainer1._optimizer.loss.model.parameters,
trainer2._optimizer.loss.model.parameters):
# I tested it, they are equal when using float64.
assert_array_almost_equal(param1.get_value(), param2.get_value(), err_msg=param1.name)
def test_save_load_convnade():
nb_kernels = 8
kernel_shape = (2, 2)
hidden_activation = "hinge"
use_mask_as_input = True
batch_size = 1024
ordering_seed = 1234
max_epoch = 5
nb_orderings = 1
with Timer("Loading/processing binarized MNIST"):
trainset, validset, testset = load_binarized_mnist()
# Extract the center patch (4x4 pixels) of each image.
indices_to_keep = [348, 349, 350, 351, 376, 377, 378, 379, 404, 405, 406, 407, 432, 433, 434, 435]
trainset = Dataset(trainset.inputs.get_value()[:, indices_to_keep], trainset.inputs.get_value()[:, indices_to_keep], name="trainset")
validset = Dataset(validset.inputs.get_value()[:, indices_to_keep], validset.inputs.get_value()[:, indices_to_keep], name="validset")
testset = Dataset(testset.inputs.get_value()[:, indices_to_keep], testset.inputs.get_value()[:, indices_to_keep], name="testset")
image_shape = (4, 4)
nb_channels = 1
# Nested function to build a trainer.
def _build_trainer(nb_epochs):
print("Will train Convoluational Deep NADE for a total of {0} epochs.".format(nb_epochs))
with Timer("Building model"):
builder = DeepConvNADEBuilder(image_shape=image_shape,
nb_channels=nb_channels,
use_mask_as_input=use_mask_as_input)
convnet_blueprint = "64@2x2(valid) -> 1@2x2(full)"
fullnet_blueprint = "5 -> 16"
print("Convnet:", convnet_blueprint)
print("Fullnet:", fullnet_blueprint)
builder.build_convnet_from_blueprint(convnet_blueprint)
builder.build_fullnet_from_blueprint(fullnet_blueprint)
model = builder.build()
model.initialize(initer.UniformInitializer(random_seed=1234))
with Timer("Building optimizer"):
loss = BinaryCrossEntropyEstimateWithAutoRegressiveMask(model, trainset)
optimizer = SGD(loss=loss)
optimizer.append_direction_modifier(ConstantLearningRate(0.001))
with Timer("Building trainer"):
batch_scheduler = MiniBatchSchedulerWithAutoregressiveMask(trainset, batch_size)
trainer = Trainer(optimizer, batch_scheduler)
# Print time for one epoch
trainer.append_task(tasks.PrintEpochDuration())
trainer.append_task(tasks.PrintTrainingDuration())
# Log training error
loss_monitor = views.MonitorVariable(loss.loss)
avg_loss = tasks.AveragePerEpoch(loss_monitor)
accum = tasks.Accumulator(loss_monitor)
logger = tasks.Logger(loss_monitor, avg_loss)
trainer.append_task(logger, avg_loss, accum)
# Print average training loss.
trainer.append_task(tasks.Print("Avg. training loss: : {}", avg_loss))
# Print NLL mean/stderror.
nll = views.LossView(loss=BinaryCrossEntropyEstimateWithAutoRegressiveMask(model, validset),
batch_scheduler=MiniBatchSchedulerWithAutoregressiveMask(validset, batch_size=len(validset),
keep_mask=True))
trainer.append_task(tasks.Print("Validset - NLL : {0:.2f} ± {1:.2f}", nll.mean, nll.stderror))
trainer.append_task(stopping_criteria.MaxEpochStopping(nb_epochs))
return trainer, nll, logger
trainer1, nll1, logger1 = _build_trainer(nb_epochs=10)
with Timer("Compiling training graph"):
trainer1.build_theano_graph()
with Timer("Training"):
trainer1.train()
trainer2a, nll2a, logger2a = _build_trainer(5)
with Timer("Compiling training graph"):
trainer2a.build_theano_graph()
with Timer("Training"):
trainer2a.train()
# Save model halfway during training and resume it.
with tempfile.TemporaryDirectory() as experiment_dir:
with Timer("Saving"):
# Save current state of the model (i.e. after 5 epochs).
trainer2a.save(experiment_dir)
with Timer("Loading"):
# Load previous state from which training will resume.
trainer2b, nll2b, logger2b = _build_trainer(10)
trainer2b.load(experiment_dir)
# Check we correctly reloaded the model.
assert_equal(len(trainer2a._optimizer.loss.model.parameters),
len(trainer2b._optimizer.loss.model.parameters))
for param1, param2 in zip(trainer2a._optimizer.loss.model.parameters,
trainer2b._optimizer.loss.model.parameters):
assert_array_equal(param1.get_value(), param2.get_value(), err_msg=param1.name)
with Timer("Compiling training graph"):
trainer2b.build_theano_graph()
with Timer("Training"):
trainer2b.train()
# Check we correctly resumed training.
assert_equal(len(trainer1._optimizer.loss.model.parameters),
len(trainer2b._optimizer.loss.model.parameters))
for param1, param2 in zip(trainer1._optimizer.loss.model.parameters,
trainer2b._optimizer.loss.model.parameters):
# I tested it, they are exactly equal when using float64.
assert_array_almost_equal(param1.get_value(), param2.get_value(), err_msg=param1.name)
# I tested it, they are exactly equal when using float64.
assert_array_almost_equal(nll1.mean.view(trainer1.status), nll2b.mean.view(trainer2b.status))
assert_array_almost_equal(nll1.stderror.view(trainer1.status), nll2b.stderror.view(trainer2b.status))
# I tested it, they are exactly equal when using float64.
assert_array_almost_equal(logger1.get_variable_history(0), logger2a.get_variable_history(0)+logger2b.get_variable_history(0))
assert_array_almost_equal(logger1.get_variable_history(1), logger2a.get_variable_history(1)+logger2b.get_variable_history(1))
def test_new_fprop_matches_old_fprop():
nb_kernels = 8
kernel_shape = (2, 2)
hidden_activation = "sigmoid"
use_mask_as_input = True
batch_size = 1024
ordering_seed = 1234
max_epoch = 10
nb_orderings = 1
print("Will train Convoluational Deep NADE for a total of {0} epochs.".format(max_epoch))
with Timer("Loading/processing binarized MNIST"):
trainset, validset, testset = load_binarized_mnist()
# Extract the center patch (4x4 pixels) of each image.
indices_to_keep = [348, 349, 350, 351, 376, 377, 378, 379, 404, 405, 406, 407, 432, 433, 434, 435]
trainset = Dataset(trainset.inputs.get_value()[:, indices_to_keep], trainset.inputs.get_value()[:, indices_to_keep], name="trainset")
validset = Dataset(validset.inputs.get_value()[:, indices_to_keep], validset.inputs.get_value()[:, indices_to_keep], name="validset")
testset = Dataset(testset.inputs.get_value()[:, indices_to_keep], testset.inputs.get_value()[:, indices_to_keep], name="testset")
image_shape = (4, 4)
nb_channels = 1 + (use_mask_as_input is True)
with Timer("Building model"):
builder = DeepConvNADEBuilder(image_shape=image_shape,
nb_channels=nb_channels,
use_mask_as_input=use_mask_as_input)
convnet_blueprint = "64@2x2(valid) -> 1@2x2(full)"
fullnet_blueprint = "5 -> 16"
print("Convnet:", convnet_blueprint)
print("Fullnet:", fullnet_blueprint)
builder.build_convnet_from_blueprint(convnet_blueprint)
builder.build_fullnet_from_blueprint(fullnet_blueprint)
model = builder.build()
model.initialize() # By default, uniform initialization.
with Timer("Building optimizer"):
loss = BinaryCrossEntropyEstimateWithAutoRegressiveMask(model, trainset)
optimizer = SGD(loss=loss)
optimizer.append_direction_modifier(ConstantLearningRate(0.001))
with Timer("Building trainer"):
batch_scheduler = MiniBatchSchedulerWithAutoregressiveMask(trainset, batch_size,
use_mask_as_input=use_mask_as_input)
trainer = Trainer(optimizer, batch_scheduler)
# Print time for one epoch
trainer.append_task(tasks.PrintEpochDuration())
trainer.append_task(tasks.PrintTrainingDuration())
# Log training error
loss_monitor = views.MonitorVariable(loss.loss)
avg_loss = tasks.AveragePerEpoch(loss_monitor)
accum = tasks.Accumulator(loss_monitor)
logger = tasks.Logger(loss_monitor, avg_loss)
trainer.append_task(logger, avg_loss, accum)
# Print average training loss.
trainer.append_task(tasks.Print("Avg. training loss: : {}", avg_loss))
trainer.append_task(stopping_criteria.MaxEpochStopping(max_epoch))
trainer.build_theano_graph()
with Timer("Training"):
trainer.train()
mask_o_lt_d = batch_scheduler._shared_batch_mask
fprop_output, fprop_pre_output = model.fprop(trainset.inputs, mask_o_lt_d, return_output_preactivation=True)
model_output = model.get_output(T.concatenate([trainset.inputs * mask_o_lt_d, mask_o_lt_d], axis=1))
assert_array_equal(model_output.eval(), fprop_pre_output.eval())
print(np.sum(abs(model_output.eval() - fprop_pre_output.eval())))
if __name__ == '__main__':
# test_simple_convnade()
# test_convnade_with_mask_as_input_channel()
# test_convnade_with_max_pooling()
test_save_load_convnade()
test_check_init()
test_new_fprop_matches_old_fprop()
| [
"smartlearner.Trainer",
"smartlearner.optimizers.SGD",
"numpy.arange",
"os.path.join",
"numpy.prod",
"convnade.batch_schedulers.MiniBatchSchedulerWithAutoregressiveMask",
"numpy.set_printoptions",
"tempfile.TemporaryDirectory",
"theano.tensor.concatenate",
"smartlearner.tasks.Print",
"numpy.test... | [((1443, 1477), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': '(220)'}), '(linewidth=220)\n', (1462, 1477), True, 'import numpy as np\n'), ((1807, 1850), 'convnade.utils.Timer', 'Timer', (['"""Loading/processing binarized MNIST"""'], {}), "('Loading/processing binarized MNIST')\n", (1812, 1850), False, 'from convnade.utils import Timer, cartesian\n'), ((1890, 1912), 'convnade.datasets.load_binarized_mnist', 'load_binarized_mnist', ([], {}), '()\n', (1910, 1912), False, 'from convnade.datasets import load_binarized_mnist\n'), ((2571, 2594), 'convnade.utils.Timer', 'Timer', (['"""Building model"""'], {}), "('Building model')\n", (2576, 2594), False, 'from convnade.utils import Timer, cartesian\n'), ((2614, 2724), 'convnade.DeepConvNADEBuilder', 'DeepConvNADEBuilder', ([], {'image_shape': 'image_shape', 'nb_channels': 'nb_channels', 'use_mask_as_input': 'use_mask_as_input'}), '(image_shape=image_shape, nb_channels=nb_channels,\n use_mask_as_input=use_mask_as_input)\n', (2633, 2724), False, 'from convnade import DeepConvNADE, DeepConvNADEBuilder\n'), ((3222, 3249), 'convnade.utils.Timer', 'Timer', (['"""Building optimizer"""'], {}), "('Building optimizer')\n", (3227, 3249), False, 'from convnade.utils import Timer, cartesian\n'), ((3266, 3331), 'convnade.losses.BinaryCrossEntropyEstimateWithAutoRegressiveMask', 'BinaryCrossEntropyEstimateWithAutoRegressiveMask', (['model', 'trainset'], {}), '(model, trainset)\n', (3314, 3331), False, 'from convnade.losses import BinaryCrossEntropyEstimateWithAutoRegressiveMask\n'), ((3353, 3367), 'smartlearner.optimizers.SGD', 'SGD', ([], {'loss': 'loss'}), '(loss=loss)\n', (3356, 3367), False, 'from smartlearner.optimizers import SGD\n'), ((3451, 3476), 'convnade.utils.Timer', 'Timer', (['"""Building trainer"""'], {}), "('Building trainer')\n", (3456, 3476), False, 'from convnade.utils import Timer, cartesian\n'), ((3504, 3566), 'convnade.batch_schedulers.MiniBatchSchedulerWithAutoregressiveMask', 'MiniBatchSchedulerWithAutoregressiveMask', (['trainset', 'batch_size'], {}), '(trainset, batch_size)\n', (3544, 3566), False, 'from convnade.batch_schedulers import MiniBatchSchedulerWithAutoregressiveMask\n'), ((3586, 3621), 'smartlearner.Trainer', 'Trainer', (['optimizer', 'batch_scheduler'], {}), '(optimizer, batch_scheduler)\n', (3593, 3621), False, 'from smartlearner import Trainer, Dataset, Model\n'), ((3902, 3934), 'smartlearner.views.MonitorVariable', 'views.MonitorVariable', (['loss.loss'], {}), '(loss.loss)\n', (3923, 3934), False, 'from smartlearner import views\n'), ((3954, 3989), 'smartlearner.tasks.AveragePerEpoch', 'tasks.AveragePerEpoch', (['loss_monitor'], {}), '(loss_monitor)\n', (3975, 3989), False, 'from smartlearner import tasks\n'), ((4006, 4037), 'smartlearner.tasks.Accumulator', 'tasks.Accumulator', (['loss_monitor'], {}), '(loss_monitor)\n', (4023, 4037), False, 'from smartlearner import tasks\n'), ((4055, 4091), 'smartlearner.tasks.Logger', 'tasks.Logger', (['loss_monitor', 'avg_loss'], {}), '(loss_monitor, avg_loss)\n', (4067, 4091), False, 'from smartlearner import tasks\n'), ((4688, 4705), 'convnade.utils.Timer', 'Timer', (['"""Training"""'], {}), "('Training')\n", (4693, 4705), False, 'from convnade.utils import Timer, cartesian\n'), ((4741, 4801), 'convnade.utils.Timer', 'Timer', (['"""Checking the probs for all possible inputs sum to 1"""'], {}), "('Checking the probs for all possible inputs sum to 1')\n", (4746, 4801), False, 'from convnade.utils import Timer, cartesian\n'), ((4817, 4853), 'numpy.random.RandomState', 'np.random.RandomState', (['ordering_seed'], {}), '(ordering_seed)\n', (4838, 4853), True, 'import numpy as np\n'), ((4866, 4886), 'numpy.prod', 'np.prod', (['image_shape'], {}), '(image_shape)\n', (4873, 4886), True, 'import numpy as np\n'), ((4968, 4996), 'numpy.arange', 'np.arange', (['D'], {'dtype': 'np.int32'}), '(D, dtype=np.int32)\n', (4977, 4996), True, 'import numpy as np\n'), ((5049, 5066), 'theano.tensor.vector', 'T.vector', (['"""input"""'], {}), "('input')\n", (5057, 5066), True, 'import theano.tensor as T\n'), ((5151, 5172), 'theano.tensor.ivector', 'T.ivector', (['"""ordering"""'], {}), "('ordering')\n", (5160, 5172), True, 'import theano.tensor as T\n'), ((6457, 6500), 'convnade.utils.Timer', 'Timer', (['"""Loading/processing binarized MNIST"""'], {}), "('Loading/processing binarized MNIST')\n", (6462, 6500), False, 'from convnade.utils import Timer, cartesian\n'), ((6540, 6562), 'convnade.datasets.load_binarized_mnist', 'load_binarized_mnist', ([], {}), '()\n', (6560, 6562), False, 'from convnade.datasets import load_binarized_mnist\n'), ((7221, 7244), 'convnade.utils.Timer', 'Timer', (['"""Building model"""'], {}), "('Building model')\n", (7226, 7244), False, 'from convnade.utils import Timer, cartesian\n'), ((7264, 7374), 'convnade.DeepConvNADEBuilder', 'DeepConvNADEBuilder', ([], {'image_shape': 'image_shape', 'nb_channels': 'nb_channels', 'use_mask_as_input': 'use_mask_as_input'}), '(image_shape=image_shape, nb_channels=nb_channels,\n use_mask_as_input=use_mask_as_input)\n', (7283, 7374), False, 'from convnade import DeepConvNADE, DeepConvNADEBuilder\n'), ((7893, 7920), 'convnade.utils.Timer', 'Timer', (['"""Building optimizer"""'], {}), "('Building optimizer')\n", (7898, 7920), False, 'from convnade.utils import Timer, cartesian\n'), ((7937, 8002), 'convnade.losses.BinaryCrossEntropyEstimateWithAutoRegressiveMask', 'BinaryCrossEntropyEstimateWithAutoRegressiveMask', (['model', 'trainset'], {}), '(model, trainset)\n', (7985, 8002), False, 'from convnade.losses import BinaryCrossEntropyEstimateWithAutoRegressiveMask\n'), ((8024, 8038), 'smartlearner.optimizers.SGD', 'SGD', ([], {'loss': 'loss'}), '(loss=loss)\n', (8027, 8038), False, 'from smartlearner.optimizers import SGD\n'), ((8122, 8147), 'convnade.utils.Timer', 'Timer', (['"""Building trainer"""'], {}), "('Building trainer')\n", (8127, 8147), False, 'from convnade.utils import Timer, cartesian\n'), ((8175, 8237), 'convnade.batch_schedulers.MiniBatchSchedulerWithAutoregressiveMask', 'MiniBatchSchedulerWithAutoregressiveMask', (['trainset', 'batch_size'], {}), '(trainset, batch_size)\n', (8215, 8237), False, 'from convnade.batch_schedulers import MiniBatchSchedulerWithAutoregressiveMask\n'), ((8257, 8292), 'smartlearner.Trainer', 'Trainer', (['optimizer', 'batch_scheduler'], {}), '(optimizer, batch_scheduler)\n', (8264, 8292), False, 'from smartlearner import Trainer, Dataset, Model\n'), ((8573, 8605), 'smartlearner.views.MonitorVariable', 'views.MonitorVariable', (['loss.loss'], {}), '(loss.loss)\n', (8594, 8605), False, 'from smartlearner import views\n'), ((8625, 8660), 'smartlearner.tasks.AveragePerEpoch', 'tasks.AveragePerEpoch', (['loss_monitor'], {}), '(loss_monitor)\n', (8646, 8660), False, 'from smartlearner import tasks\n'), ((8677, 8708), 'smartlearner.tasks.Accumulator', 'tasks.Accumulator', (['loss_monitor'], {}), '(loss_monitor)\n', (8694, 8708), False, 'from smartlearner import tasks\n'), ((8726, 8762), 'smartlearner.tasks.Logger', 'tasks.Logger', (['loss_monitor', 'avg_loss'], {}), '(loss_monitor, avg_loss)\n', (8738, 8762), False, 'from smartlearner import tasks\n'), ((9359, 9376), 'convnade.utils.Timer', 'Timer', (['"""Training"""'], {}), "('Training')\n", (9364, 9376), False, 'from convnade.utils import Timer, cartesian\n'), ((9412, 9472), 'convnade.utils.Timer', 'Timer', (['"""Checking the probs for all possible inputs sum to 1"""'], {}), "('Checking the probs for all possible inputs sum to 1')\n", (9417, 9472), False, 'from convnade.utils import Timer, cartesian\n'), ((9488, 9524), 'numpy.random.RandomState', 'np.random.RandomState', (['ordering_seed'], {}), '(ordering_seed)\n', (9509, 9524), True, 'import numpy as np\n'), ((9537, 9557), 'numpy.prod', 'np.prod', (['image_shape'], {}), '(image_shape)\n', (9544, 9557), True, 'import numpy as np\n'), ((9639, 9667), 'numpy.arange', 'np.arange', (['D'], {'dtype': 'np.int32'}), '(D, dtype=np.int32)\n', (9648, 9667), True, 'import numpy as np\n'), ((9720, 9737), 'theano.tensor.vector', 'T.vector', (['"""input"""'], {}), "('input')\n", (9728, 9737), True, 'import theano.tensor as T\n'), ((9822, 9843), 'theano.tensor.ivector', 'T.ivector', (['"""ordering"""'], {}), "('ordering')\n", (9831, 9843), True, 'import theano.tensor as T\n'), ((11138, 11181), 'convnade.utils.Timer', 'Timer', (['"""Loading/processing binarized MNIST"""'], {}), "('Loading/processing binarized MNIST')\n", (11143, 11181), False, 'from convnade.utils import Timer, cartesian\n'), ((11221, 11243), 'convnade.datasets.load_binarized_mnist', 'load_binarized_mnist', ([], {}), '()\n', (11241, 11243), False, 'from convnade.datasets import load_binarized_mnist\n'), ((12053, 12156), 'convnade.batch_schedulers.MiniBatchSchedulerWithAutoregressiveMask', 'MiniBatchSchedulerWithAutoregressiveMask', (['trainset', 'batch_size'], {'use_mask_as_input': 'use_mask_as_input'}), '(trainset, batch_size,\n use_mask_as_input=use_mask_as_input)\n', (12093, 12156), False, 'from convnade.batch_schedulers import MiniBatchSchedulerWithAutoregressiveMask\n'), ((12163, 12186), 'convnade.utils.Timer', 'Timer', (['"""Building model"""'], {}), "('Building model')\n", (12168, 12186), False, 'from convnade.utils import Timer, cartesian\n'), ((12206, 12275), 'convnade.DeepConvNADEBuilder', 'DeepConvNADEBuilder', ([], {'image_shape': 'image_shape', 'nb_channels': 'nb_channels'}), '(image_shape=image_shape, nb_channels=nb_channels)\n', (12225, 12275), False, 'from convnade import DeepConvNADE, DeepConvNADEBuilder\n'), ((12701, 12728), 'convnade.utils.Timer', 'Timer', (['"""Building optimizer"""'], {}), "('Building optimizer')\n", (12706, 12728), False, 'from convnade.utils import Timer, cartesian\n'), ((12745, 12810), 'convnade.losses.BinaryCrossEntropyEstimateWithAutoRegressiveMask', 'BinaryCrossEntropyEstimateWithAutoRegressiveMask', (['model', 'trainset'], {}), '(model, trainset)\n', (12793, 12810), False, 'from convnade.losses import BinaryCrossEntropyEstimateWithAutoRegressiveMask\n'), ((12832, 12846), 'smartlearner.optimizers.SGD', 'SGD', ([], {'loss': 'loss'}), '(loss=loss)\n', (12835, 12846), False, 'from smartlearner.optimizers import SGD\n'), ((12930, 12955), 'convnade.utils.Timer', 'Timer', (['"""Building trainer"""'], {}), "('Building trainer')\n", (12935, 12955), False, 'from convnade.utils import Timer, cartesian\n'), ((12975, 13010), 'smartlearner.Trainer', 'Trainer', (['optimizer', 'batch_scheduler'], {}), '(optimizer, batch_scheduler)\n', (12982, 13010), False, 'from smartlearner import Trainer, Dataset, Model\n'), ((13291, 13323), 'smartlearner.views.MonitorVariable', 'views.MonitorVariable', (['loss.loss'], {}), '(loss.loss)\n', (13312, 13323), False, 'from smartlearner import views\n'), ((13343, 13378), 'smartlearner.tasks.AveragePerEpoch', 'tasks.AveragePerEpoch', (['loss_monitor'], {}), '(loss_monitor)\n', (13364, 13378), False, 'from smartlearner import tasks\n'), ((13395, 13426), 'smartlearner.tasks.Accumulator', 'tasks.Accumulator', (['loss_monitor'], {}), '(loss_monitor)\n', (13412, 13426), False, 'from smartlearner import tasks\n'), ((13444, 13480), 'smartlearner.tasks.Logger', 'tasks.Logger', (['loss_monitor', 'avg_loss'], {}), '(loss_monitor, avg_loss)\n', (13456, 13480), False, 'from smartlearner import tasks\n'), ((14114, 14131), 'convnade.utils.Timer', 'Timer', (['"""Training"""'], {}), "('Training')\n", (14119, 14131), False, 'from convnade.utils import Timer, cartesian\n'), ((14167, 14227), 'convnade.utils.Timer', 'Timer', (['"""Checking the probs for all possible inputs sum to 1"""'], {}), "('Checking the probs for all possible inputs sum to 1')\n", (14172, 14227), False, 'from convnade.utils import Timer, cartesian\n'), ((14243, 14279), 'numpy.random.RandomState', 'np.random.RandomState', (['ordering_seed'], {}), '(ordering_seed)\n', (14264, 14279), True, 'import numpy as np\n'), ((14292, 14312), 'numpy.prod', 'np.prod', (['image_shape'], {}), '(image_shape)\n', (14299, 14312), True, 'import numpy as np\n'), ((14394, 14422), 'numpy.arange', 'np.arange', (['D'], {'dtype': 'np.int32'}), '(D, dtype=np.int32)\n', (14403, 14422), True, 'import numpy as np\n'), ((14626, 14643), 'theano.tensor.vector', 'T.vector', (['"""input"""'], {}), "('input')\n", (14634, 14643), True, 'import theano.tensor as T\n'), ((14728, 14749), 'theano.tensor.ivector', 'T.ivector', (['"""ordering"""'], {}), "('ordering')\n", (14737, 14749), True, 'import theano.tensor as T\n'), ((15922, 15965), 'convnade.utils.Timer', 'Timer', (['"""Loading/processing binarized MNIST"""'], {}), "('Loading/processing binarized MNIST')\n", (15927, 15965), False, 'from convnade.utils import Timer, cartesian\n'), ((16005, 16027), 'convnade.datasets.load_binarized_mnist', 'load_binarized_mnist', ([], {}), '()\n', (16025, 16027), False, 'from convnade.datasets import load_binarized_mnist\n'), ((19273, 19306), 'convnade.utils.Timer', 'Timer', (['"""Compiling training graph"""'], {}), "('Compiling training graph')\n", (19278, 19306), False, 'from convnade.utils import Timer, cartesian\n'), ((19356, 19389), 'convnade.utils.Timer', 'Timer', (['"""Compiling training graph"""'], {}), "('Compiling training graph')\n", (19361, 19389), False, 'from convnade.utils import Timer, cartesian\n'), ((19884, 19901), 'convnade.utils.Timer', 'Timer', (['"""Training"""'], {}), "('Training')\n", (19889, 19901), False, 'from convnade.utils import Timer, cartesian\n'), ((20689, 20732), 'convnade.utils.Timer', 'Timer', (['"""Loading/processing binarized MNIST"""'], {}), "('Loading/processing binarized MNIST')\n", (20694, 20732), False, 'from convnade.utils import Timer, cartesian\n'), ((20772, 20794), 'convnade.datasets.load_binarized_mnist', 'load_binarized_mnist', ([], {}), '()\n', (20792, 20794), False, 'from convnade.datasets import load_binarized_mnist\n'), ((24058, 24091), 'convnade.utils.Timer', 'Timer', (['"""Compiling training graph"""'], {}), "('Compiling training graph')\n", (24063, 24091), False, 'from convnade.utils import Timer, cartesian\n'), ((24141, 24158), 'convnade.utils.Timer', 'Timer', (['"""Training"""'], {}), "('Training')\n", (24146, 24158), False, 'from convnade.utils import Timer, cartesian\n'), ((24246, 24279), 'convnade.utils.Timer', 'Timer', (['"""Compiling training graph"""'], {}), "('Compiling training graph')\n", (24251, 24279), False, 'from convnade.utils import Timer, cartesian\n'), ((24330, 24347), 'convnade.utils.Timer', 'Timer', (['"""Training"""'], {}), "('Training')\n", (24335, 24347), False, 'from convnade.utils import Timer, cartesian\n'), ((24441, 24470), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (24468, 24470), False, 'import tempfile\n'), ((25307, 25340), 'convnade.utils.Timer', 'Timer', (['"""Compiling training graph"""'], {}), "('Compiling training graph')\n", (25312, 25340), False, 'from convnade.utils import Timer, cartesian\n'), ((25391, 25408), 'convnade.utils.Timer', 'Timer', (['"""Training"""'], {}), "('Training')\n", (25396, 25408), False, 'from convnade.utils import Timer, cartesian\n'), ((26853, 26896), 'convnade.utils.Timer', 'Timer', (['"""Loading/processing binarized MNIST"""'], {}), "('Loading/processing binarized MNIST')\n", (26858, 26896), False, 'from convnade.utils import Timer, cartesian\n'), ((26936, 26958), 'convnade.datasets.load_binarized_mnist', 'load_binarized_mnist', ([], {}), '()\n', (26956, 26958), False, 'from convnade.datasets import load_binarized_mnist\n'), ((27647, 27670), 'convnade.utils.Timer', 'Timer', (['"""Building model"""'], {}), "('Building model')\n", (27652, 27670), False, 'from convnade.utils import Timer, cartesian\n'), ((27690, 27800), 'convnade.DeepConvNADEBuilder', 'DeepConvNADEBuilder', ([], {'image_shape': 'image_shape', 'nb_channels': 'nb_channels', 'use_mask_as_input': 'use_mask_as_input'}), '(image_shape=image_shape, nb_channels=nb_channels,\n use_mask_as_input=use_mask_as_input)\n', (27709, 27800), False, 'from convnade import DeepConvNADE, DeepConvNADEBuilder\n'), ((28298, 28325), 'convnade.utils.Timer', 'Timer', (['"""Building optimizer"""'], {}), "('Building optimizer')\n", (28303, 28325), False, 'from convnade.utils import Timer, cartesian\n'), ((28342, 28407), 'convnade.losses.BinaryCrossEntropyEstimateWithAutoRegressiveMask', 'BinaryCrossEntropyEstimateWithAutoRegressiveMask', (['model', 'trainset'], {}), '(model, trainset)\n', (28390, 28407), False, 'from convnade.losses import BinaryCrossEntropyEstimateWithAutoRegressiveMask\n'), ((28429, 28443), 'smartlearner.optimizers.SGD', 'SGD', ([], {'loss': 'loss'}), '(loss=loss)\n', (28432, 28443), False, 'from smartlearner.optimizers import SGD\n'), ((28527, 28552), 'convnade.utils.Timer', 'Timer', (['"""Building trainer"""'], {}), "('Building trainer')\n", (28532, 28552), False, 'from convnade.utils import Timer, cartesian\n'), ((28580, 28683), 'convnade.batch_schedulers.MiniBatchSchedulerWithAutoregressiveMask', 'MiniBatchSchedulerWithAutoregressiveMask', (['trainset', 'batch_size'], {'use_mask_as_input': 'use_mask_as_input'}), '(trainset, batch_size,\n use_mask_as_input=use_mask_as_input)\n', (28620, 28683), False, 'from convnade.batch_schedulers import MiniBatchSchedulerWithAutoregressiveMask\n'), ((28766, 28801), 'smartlearner.Trainer', 'Trainer', (['optimizer', 'batch_scheduler'], {}), '(optimizer, batch_scheduler)\n', (28773, 28801), False, 'from smartlearner import Trainer, Dataset, Model\n'), ((29006, 29038), 'smartlearner.views.MonitorVariable', 'views.MonitorVariable', (['loss.loss'], {}), '(loss.loss)\n', (29027, 29038), False, 'from smartlearner import views\n'), ((29058, 29093), 'smartlearner.tasks.AveragePerEpoch', 'tasks.AveragePerEpoch', (['loss_monitor'], {}), '(loss_monitor)\n', (29079, 29093), False, 'from smartlearner import tasks\n'), ((29110, 29141), 'smartlearner.tasks.Accumulator', 'tasks.Accumulator', (['loss_monitor'], {}), '(loss_monitor)\n', (29127, 29141), False, 'from smartlearner import tasks\n'), ((29159, 29195), 'smartlearner.tasks.Logger', 'tasks.Logger', (['loss_monitor', 'avg_loss'], {}), '(loss_monitor, avg_loss)\n', (29171, 29195), False, 'from smartlearner import tasks\n'), ((29496, 29513), 'convnade.utils.Timer', 'Timer', (['"""Training"""'], {}), "('Training')\n", (29501, 29513), False, 'from convnade.utils import Timer, cartesian\n'), ((29742, 29809), 'theano.tensor.concatenate', 'T.concatenate', (['[trainset.inputs * mask_o_lt_d, mask_o_lt_d]'], {'axis': '(1)'}), '([trainset.inputs * mask_o_lt_d, mask_o_lt_d], axis=1)\n', (29755, 29809), True, 'import theano.tensor as T\n'), ((184, 218), 'os.path.join', 'os.path.join', (['__file__', '""".."""', '""".."""'], {}), "(__file__, '..', '..')\n", (196, 218), False, 'import os\n'), ((3412, 3439), 'smartlearner.direction_modifiers.ConstantLearningRate', 'ConstantLearningRate', (['(0.001)'], {}), '(0.001)\n', (3432, 3439), False, 'from smartlearner.direction_modifiers import ConstantLearningRate\n'), ((3651, 3696), 'smartlearner.stopping_criteria.MaxEpochStopping', 'stopping_criteria.MaxEpochStopping', (['max_epoch'], {}), '(max_epoch)\n', (3685, 3696), False, 'from smartlearner import stopping_criteria\n'), ((3762, 3788), 'smartlearner.tasks.PrintEpochDuration', 'tasks.PrintEpochDuration', ([], {}), '()\n', (3786, 3788), False, 'from smartlearner import tasks\n'), ((3818, 3847), 'smartlearner.tasks.PrintTrainingDuration', 'tasks.PrintTrainingDuration', ([], {}), '()\n', (3845, 3847), False, 'from smartlearner import tasks\n'), ((4213, 4266), 'smartlearner.tasks.Print', 'tasks.Print', (['"""Avg. training loss: : {}"""', 'avg_loss'], {}), "('Avg. training loss: : {}', avg_loss)\n", (4224, 4266), False, 'from smartlearner import tasks\n'), ((4556, 4643), 'smartlearner.tasks.Print', 'tasks.Print', (['"""Validset - NLL : {0:.2f} ± {1:.2f}"""', 'nll.mean', 'nll.stderror'], {}), "('Validset - NLL : {0:.2f} ± {1:.2f}', nll.mean, nll.\n stderror)\n", (4567, 4643), False, 'from smartlearner import tasks\n'), ((5625, 5653), 'numpy.arange', 'np.arange', (['D'], {'dtype': 'np.int32'}), '(D, dtype=np.int32)\n', (5634, 5653), True, 'import numpy as np\n'), ((6078, 6118), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['p_x', '(1.0)'], {'decimal': '(5)'}), '(p_x, 1.0, decimal=5)\n', (6097, 6118), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((8083, 8110), 'smartlearner.direction_modifiers.ConstantLearningRate', 'ConstantLearningRate', (['(0.001)'], {}), '(0.001)\n', (8103, 8110), False, 'from smartlearner.direction_modifiers import ConstantLearningRate\n'), ((8322, 8367), 'smartlearner.stopping_criteria.MaxEpochStopping', 'stopping_criteria.MaxEpochStopping', (['max_epoch'], {}), '(max_epoch)\n', (8356, 8367), False, 'from smartlearner import stopping_criteria\n'), ((8433, 8459), 'smartlearner.tasks.PrintEpochDuration', 'tasks.PrintEpochDuration', ([], {}), '()\n', (8457, 8459), False, 'from smartlearner import tasks\n'), ((8489, 8518), 'smartlearner.tasks.PrintTrainingDuration', 'tasks.PrintTrainingDuration', ([], {}), '()\n', (8516, 8518), False, 'from smartlearner import tasks\n'), ((8884, 8937), 'smartlearner.tasks.Print', 'tasks.Print', (['"""Avg. training loss: : {}"""', 'avg_loss'], {}), "('Avg. training loss: : {}', avg_loss)\n", (8895, 8937), False, 'from smartlearner import tasks\n'), ((9227, 9314), 'smartlearner.tasks.Print', 'tasks.Print', (['"""Validset - NLL : {0:.2f} ± {1:.2f}"""', 'nll.mean', 'nll.stderror'], {}), "('Validset - NLL : {0:.2f} ± {1:.2f}', nll.mean, nll.\n stderror)\n", (9238, 9314), False, 'from smartlearner import tasks\n'), ((10296, 10324), 'numpy.arange', 'np.arange', (['D'], {'dtype': 'np.int32'}), '(D, dtype=np.int32)\n', (10305, 10324), True, 'import numpy as np\n'), ((10749, 10789), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['p_x', '(1.0)'], {'decimal': '(5)'}), '(p_x, 1.0, decimal=5)\n', (10768, 10789), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((12891, 12918), 'smartlearner.direction_modifiers.ConstantLearningRate', 'ConstantLearningRate', (['(0.001)'], {}), '(0.001)\n', (12911, 12918), False, 'from smartlearner.direction_modifiers import ConstantLearningRate\n'), ((13040, 13085), 'smartlearner.stopping_criteria.MaxEpochStopping', 'stopping_criteria.MaxEpochStopping', (['max_epoch'], {}), '(max_epoch)\n', (13074, 13085), False, 'from smartlearner import stopping_criteria\n'), ((13151, 13177), 'smartlearner.tasks.PrintEpochDuration', 'tasks.PrintEpochDuration', ([], {}), '()\n', (13175, 13177), False, 'from smartlearner import tasks\n'), ((13207, 13236), 'smartlearner.tasks.PrintTrainingDuration', 'tasks.PrintTrainingDuration', ([], {}), '()\n', (13234, 13236), False, 'from smartlearner import tasks\n'), ((13602, 13655), 'smartlearner.tasks.Print', 'tasks.Print', (['"""Avg. training loss: : {}"""', 'avg_loss'], {}), "('Avg. training loss: : {}', avg_loss)\n", (13613, 13655), False, 'from smartlearner import tasks\n'), ((13982, 14069), 'smartlearner.tasks.Print', 'tasks.Print', (['"""Validset - NLL : {0:.2f} ± {1:.2f}"""', 'nll.mean', 'nll.stderror'], {}), "('Validset - NLL : {0:.2f} ± {1:.2f}', nll.mean, nll.\n stderror)\n", (13993, 14069), False, 'from smartlearner import tasks\n'), ((14517, 14529), 'numpy.arange', 'np.arange', (['D'], {}), '(D)\n', (14526, 14529), True, 'import numpy as np\n'), ((15202, 15230), 'numpy.arange', 'np.arange', (['D'], {'dtype': 'np.int32'}), '(D, dtype=np.int32)\n', (15211, 15230), True, 'import numpy as np\n'), ((15655, 15695), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['p_x', '(1.0)'], {'decimal': '(5)'}), '(p_x, 1.0, decimal=5)\n', (15674, 15695), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((16866, 16889), 'convnade.utils.Timer', 'Timer', (['"""Building model"""'], {}), "('Building model')\n", (16871, 16889), False, 'from convnade.utils import Timer, cartesian\n'), ((16913, 17023), 'convnade.DeepConvNADEBuilder', 'DeepConvNADEBuilder', ([], {'image_shape': 'image_shape', 'nb_channels': 'nb_channels', 'use_mask_as_input': 'use_mask_as_input'}), '(image_shape=image_shape, nb_channels=nb_channels,\n use_mask_as_input=use_mask_as_input)\n', (16932, 17023), False, 'from convnade import DeepConvNADE, DeepConvNADEBuilder\n'), ((17569, 17596), 'convnade.utils.Timer', 'Timer', (['"""Building optimizer"""'], {}), "('Building optimizer')\n", (17574, 17596), False, 'from convnade.utils import Timer, cartesian\n'), ((17617, 17682), 'convnade.losses.BinaryCrossEntropyEstimateWithAutoRegressiveMask', 'BinaryCrossEntropyEstimateWithAutoRegressiveMask', (['model', 'trainset'], {}), '(model, trainset)\n', (17665, 17682), False, 'from convnade.losses import BinaryCrossEntropyEstimateWithAutoRegressiveMask\n'), ((17708, 17722), 'smartlearner.optimizers.SGD', 'SGD', ([], {'loss': 'loss'}), '(loss=loss)\n', (17711, 17722), False, 'from smartlearner.optimizers import SGD\n'), ((17814, 17839), 'convnade.utils.Timer', 'Timer', (['"""Building trainer"""'], {}), "('Building trainer')\n", (17819, 17839), False, 'from convnade.utils import Timer, cartesian\n'), ((17871, 17933), 'convnade.batch_schedulers.MiniBatchSchedulerWithAutoregressiveMask', 'MiniBatchSchedulerWithAutoregressiveMask', (['trainset', 'batch_size'], {}), '(trainset, batch_size)\n', (17911, 17933), False, 'from convnade.batch_schedulers import MiniBatchSchedulerWithAutoregressiveMask\n'), ((17957, 17992), 'smartlearner.Trainer', 'Trainer', (['optimizer', 'batch_scheduler'], {}), '(optimizer, batch_scheduler)\n', (17964, 17992), False, 'from smartlearner import Trainer, Dataset, Model\n'), ((18217, 18249), 'smartlearner.views.MonitorVariable', 'views.MonitorVariable', (['loss.loss'], {}), '(loss.loss)\n', (18238, 18249), False, 'from smartlearner import views\n'), ((18273, 18308), 'smartlearner.tasks.AveragePerEpoch', 'tasks.AveragePerEpoch', (['loss_monitor'], {}), '(loss_monitor)\n', (18294, 18308), False, 'from smartlearner import tasks\n'), ((18329, 18360), 'smartlearner.tasks.Accumulator', 'tasks.Accumulator', (['loss_monitor'], {}), '(loss_monitor)\n', (18346, 18360), False, 'from smartlearner import tasks\n'), ((18382, 18418), 'smartlearner.tasks.Logger', 'tasks.Logger', (['loss_monitor', 'avg_loss'], {}), '(loss_monitor, avg_loss)\n', (18394, 18418), False, 'from smartlearner import tasks\n'), ((21633, 21656), 'convnade.utils.Timer', 'Timer', (['"""Building model"""'], {}), "('Building model')\n", (21638, 21656), False, 'from convnade.utils import Timer, cartesian\n'), ((21680, 21790), 'convnade.DeepConvNADEBuilder', 'DeepConvNADEBuilder', ([], {'image_shape': 'image_shape', 'nb_channels': 'nb_channels', 'use_mask_as_input': 'use_mask_as_input'}), '(image_shape=image_shape, nb_channels=nb_channels,\n use_mask_as_input=use_mask_as_input)\n', (21699, 21790), False, 'from convnade import DeepConvNADE, DeepConvNADEBuilder\n'), ((22336, 22363), 'convnade.utils.Timer', 'Timer', (['"""Building optimizer"""'], {}), "('Building optimizer')\n", (22341, 22363), False, 'from convnade.utils import Timer, cartesian\n'), ((22384, 22449), 'convnade.losses.BinaryCrossEntropyEstimateWithAutoRegressiveMask', 'BinaryCrossEntropyEstimateWithAutoRegressiveMask', (['model', 'trainset'], {}), '(model, trainset)\n', (22432, 22449), False, 'from convnade.losses import BinaryCrossEntropyEstimateWithAutoRegressiveMask\n'), ((22475, 22489), 'smartlearner.optimizers.SGD', 'SGD', ([], {'loss': 'loss'}), '(loss=loss)\n', (22478, 22489), False, 'from smartlearner.optimizers import SGD\n'), ((22581, 22606), 'convnade.utils.Timer', 'Timer', (['"""Building trainer"""'], {}), "('Building trainer')\n", (22586, 22606), False, 'from convnade.utils import Timer, cartesian\n'), ((22638, 22700), 'convnade.batch_schedulers.MiniBatchSchedulerWithAutoregressiveMask', 'MiniBatchSchedulerWithAutoregressiveMask', (['trainset', 'batch_size'], {}), '(trainset, batch_size)\n', (22678, 22700), False, 'from convnade.batch_schedulers import MiniBatchSchedulerWithAutoregressiveMask\n'), ((22724, 22759), 'smartlearner.Trainer', 'Trainer', (['optimizer', 'batch_scheduler'], {}), '(optimizer, batch_scheduler)\n', (22731, 22759), False, 'from smartlearner import Trainer, Dataset, Model\n'), ((22984, 23016), 'smartlearner.views.MonitorVariable', 'views.MonitorVariable', (['loss.loss'], {}), '(loss.loss)\n', (23005, 23016), False, 'from smartlearner import views\n'), ((23040, 23075), 'smartlearner.tasks.AveragePerEpoch', 'tasks.AveragePerEpoch', (['loss_monitor'], {}), '(loss_monitor)\n', (23061, 23075), False, 'from smartlearner import tasks\n'), ((23096, 23127), 'smartlearner.tasks.Accumulator', 'tasks.Accumulator', (['loss_monitor'], {}), '(loss_monitor)\n', (23113, 23127), False, 'from smartlearner import tasks\n'), ((23149, 23185), 'smartlearner.tasks.Logger', 'tasks.Logger', (['loss_monitor', 'avg_loss'], {}), '(loss_monitor, avg_loss)\n', (23161, 23185), False, 'from smartlearner import tasks\n'), ((24503, 24518), 'convnade.utils.Timer', 'Timer', (['"""Saving"""'], {}), "('Saving')\n", (24508, 24518), False, 'from convnade.utils import Timer, cartesian\n'), ((24646, 24662), 'convnade.utils.Timer', 'Timer', (['"""Loading"""'], {}), "('Loading')\n", (24651, 24662), False, 'from convnade.utils import Timer, cartesian\n'), ((28488, 28515), 'smartlearner.direction_modifiers.ConstantLearningRate', 'ConstantLearningRate', (['(0.001)'], {}), '(0.001)\n', (28508, 28515), False, 'from smartlearner.direction_modifiers import ConstantLearningRate\n'), ((28866, 28892), 'smartlearner.tasks.PrintEpochDuration', 'tasks.PrintEpochDuration', ([], {}), '()\n', (28890, 28892), False, 'from smartlearner import tasks\n'), ((28922, 28951), 'smartlearner.tasks.PrintTrainingDuration', 'tasks.PrintTrainingDuration', ([], {}), '()\n', (28949, 28951), False, 'from smartlearner import tasks\n'), ((29317, 29370), 'smartlearner.tasks.Print', 'tasks.Print', (['"""Avg. training loss: : {}"""', 'avg_loss'], {}), "('Avg. training loss: : {}', avg_loss)\n", (29328, 29370), False, 'from smartlearner import tasks\n'), ((29401, 29446), 'smartlearner.stopping_criteria.MaxEpochStopping', 'stopping_criteria.MaxEpochStopping', (['max_epoch'], {}), '(max_epoch)\n', (29435, 29446), False, 'from smartlearner import stopping_criteria\n'), ((4338, 4403), 'convnade.losses.BinaryCrossEntropyEstimateWithAutoRegressiveMask', 'BinaryCrossEntropyEstimateWithAutoRegressiveMask', (['model', 'validset'], {}), '(model, validset)\n', (4386, 4403), False, 'from convnade.losses import BinaryCrossEntropyEstimateWithAutoRegressiveMask\n'), ((9009, 9074), 'convnade.losses.BinaryCrossEntropyEstimateWithAutoRegressiveMask', 'BinaryCrossEntropyEstimateWithAutoRegressiveMask', (['model', 'validset'], {}), '(model, validset)\n', (9057, 9074), False, 'from convnade.losses import BinaryCrossEntropyEstimateWithAutoRegressiveMask\n'), ((13727, 13792), 'convnade.losses.BinaryCrossEntropyEstimateWithAutoRegressiveMask', 'BinaryCrossEntropyEstimateWithAutoRegressiveMask', (['model', 'validset'], {}), '(model, validset)\n', (13775, 13792), False, 'from convnade.losses import BinaryCrossEntropyEstimateWithAutoRegressiveMask\n'), ((17510, 17553), 'smartlearner.initializers.UniformInitializer', 'initer.UniformInitializer', ([], {'random_seed': '(1234)'}), '(random_seed=1234)\n', (17535, 17553), True, 'import smartlearner.initializers as initer\n'), ((17771, 17798), 'smartlearner.direction_modifiers.ConstantLearningRate', 'ConstantLearningRate', (['(0.001)'], {}), '(0.001)\n', (17791, 17798), False, 'from smartlearner.direction_modifiers import ConstantLearningRate\n'), ((18065, 18091), 'smartlearner.tasks.PrintEpochDuration', 'tasks.PrintEpochDuration', ([], {}), '()\n', (18089, 18091), False, 'from smartlearner import tasks\n'), ((18125, 18154), 'smartlearner.tasks.PrintTrainingDuration', 'tasks.PrintTrainingDuration', ([], {}), '()\n', (18152, 18154), False, 'from smartlearner import tasks\n'), ((18552, 18605), 'smartlearner.tasks.Print', 'tasks.Print', (['"""Avg. training loss: : {}"""', 'avg_loss'], {}), "('Avg. training loss: : {}', avg_loss)\n", (18563, 18605), False, 'from smartlearner import tasks\n'), ((19017, 19104), 'smartlearner.tasks.Print', 'tasks.Print', (['"""Validset - NLL : {0:.2f} ± {1:.2f}"""', 'nll.mean', 'nll.stderror'], {}), "('Validset - NLL : {0:.2f} ± {1:.2f}', nll.mean, nll.\n stderror)\n", (19028, 19104), False, 'from smartlearner import tasks\n'), ((19134, 19179), 'smartlearner.stopping_criteria.MaxEpochStopping', 'stopping_criteria.MaxEpochStopping', (['nb_epochs'], {}), '(nb_epochs)\n', (19168, 19179), False, 'from smartlearner import stopping_criteria\n'), ((22277, 22320), 'smartlearner.initializers.UniformInitializer', 'initer.UniformInitializer', ([], {'random_seed': '(1234)'}), '(random_seed=1234)\n', (22302, 22320), True, 'import smartlearner.initializers as initer\n'), ((22538, 22565), 'smartlearner.direction_modifiers.ConstantLearningRate', 'ConstantLearningRate', (['(0.001)'], {}), '(0.001)\n', (22558, 22565), False, 'from smartlearner.direction_modifiers import ConstantLearningRate\n'), ((22832, 22858), 'smartlearner.tasks.PrintEpochDuration', 'tasks.PrintEpochDuration', ([], {}), '()\n', (22856, 22858), False, 'from smartlearner import tasks\n'), ((22892, 22921), 'smartlearner.tasks.PrintTrainingDuration', 'tasks.PrintTrainingDuration', ([], {}), '()\n', (22919, 22921), False, 'from smartlearner import tasks\n'), ((23319, 23372), 'smartlearner.tasks.Print', 'tasks.Print', (['"""Avg. training loss: : {}"""', 'avg_loss'], {}), "('Avg. training loss: : {}', avg_loss)\n", (23330, 23372), False, 'from smartlearner import tasks\n'), ((23784, 23871), 'smartlearner.tasks.Print', 'tasks.Print', (['"""Validset - NLL : {0:.2f} ± {1:.2f}"""', 'nll.mean', 'nll.stderror'], {}), "('Validset - NLL : {0:.2f} ± {1:.2f}', nll.mean, nll.\n stderror)\n", (23795, 23871), False, 'from smartlearner import tasks\n'), ((23901, 23946), 'smartlearner.stopping_criteria.MaxEpochStopping', 'stopping_criteria.MaxEpochStopping', (['nb_epochs'], {}), '(nb_epochs)\n', (23935, 23946), False, 'from smartlearner import stopping_criteria\n'), ((18685, 18750), 'convnade.losses.BinaryCrossEntropyEstimateWithAutoRegressiveMask', 'BinaryCrossEntropyEstimateWithAutoRegressiveMask', (['model', 'validset'], {}), '(model, validset)\n', (18733, 18750), False, 'from convnade.losses import BinaryCrossEntropyEstimateWithAutoRegressiveMask\n'), ((23452, 23517), 'convnade.losses.BinaryCrossEntropyEstimateWithAutoRegressiveMask', 'BinaryCrossEntropyEstimateWithAutoRegressiveMask', (['model', 'validset'], {}), '(model, validset)\n', (23500, 23517), False, 'from convnade.losses import BinaryCrossEntropyEstimateWithAutoRegressiveMask\n'), ((6000, 6014), 'numpy.array', 'np.array', (['nlls'], {}), '(nlls)\n', (6008, 6014), True, 'import numpy as np\n'), ((10671, 10685), 'numpy.array', 'np.array', (['nlls'], {}), '(nlls)\n', (10679, 10685), True, 'import numpy as np\n'), ((15577, 15591), 'numpy.array', 'np.array', (['nlls'], {}), '(nlls)\n', (15585, 15591), True, 'import numpy as np\n')] |
from pyrep.objects.vision_sensor import VisionSensor
import numpy as np
import cv2
class Camera(VisionSensor):
def __init__(self):
super().__init__('camera')
# enable camera sensor
#self.set_explicit_handling(1)
#self.handle_explicitly()
# compute vision sensor intrinsic matrix
# [ax 0 u0
# 0 ay v0
# 0 0 1]
self.ax = 2*np.tan(np.radians(self.get_perspective_angle()/2))/self.get_resolution()[0] # f/dx
self.ay = self.ax # f/dy
self.u0 = self.get_resolution()[0]/2 # u0
self.v0 = self.get_resolution()[1]/2 # v0
self.H = np.array([[0,1,0,1.1],
[1,0,0,0],
[0,0,-1,1.8],
[0,0,0,1]])
def capture_bgr(self):
img = cv2.cvtColor(self.capture_rgb(),cv2.COLOR_RGB2BGR)*255
return np.array(img,dtype=np.uint8)
def uv2XYZ(self,depth_img,u,v):
Z = depth_img[v,u]
return np.array([Z*(u-self.u0)*self.ax, Z*(v-self.v0)*self.ay, Z, 1])
| [
"numpy.array"
] | [((637, 708), 'numpy.array', 'np.array', (['[[0, 1, 0, 1.1], [1, 0, 0, 0], [0, 0, -1, 1.8], [0, 0, 0, 1]]'], {}), '([[0, 1, 0, 1.1], [1, 0, 0, 0], [0, 0, -1, 1.8], [0, 0, 0, 1]])\n', (645, 708), True, 'import numpy as np\n'), ((889, 918), 'numpy.array', 'np.array', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (897, 918), True, 'import numpy as np\n'), ((997, 1071), 'numpy.array', 'np.array', (['[Z * (u - self.u0) * self.ax, Z * (v - self.v0) * self.ay, Z, 1]'], {}), '([Z * (u - self.u0) * self.ax, Z * (v - self.v0) * self.ay, Z, 1])\n', (1005, 1071), True, 'import numpy as np\n')] |
"""
This file contains classes which have functions to optimize the queries to ask the human.
"""
from typing import Callable, List, Tuple
import itertools
import numpy as np
from scipy.spatial import ConvexHull
import warnings
from aprel.basics import Trajectory, TrajectorySet
from aprel.learning import Belief, SamplingBasedBelief, User, SoftmaxUser
from aprel.learning import Query, PreferenceQuery, WeakComparisonQuery, FullRankingQuery
from aprel.querying import mutual_information, volume_removal, disagreement, regret, random, thompson
from aprel.utils import kMedoids, dpp_mode, default_query_distance
class QueryOptimizer:
"""
An abstract class for query optimizer frameworks.
Attributes:
acquisition_functions (Dict): keeps name-function pairs for the acquisition functions. If new acquisition
functions are implemented, they should be added to this dictionary.
"""
def __init__(self):
self.acquisition_functions = {'mutual_information': mutual_information,
'volume_removal': volume_removal,
'disagreement': disagreement,
'regret': regret,
'random': random,
'thompson': thompson}
class QueryOptimizerDiscreteTrajectorySet(QueryOptimizer):
"""
Query optimization framework that assumes a discrete set of trajectories is available. The query optimization
is then performed over this discrete set.
Parameters:
trajectory_set (TrajectorySet): The set of trajectories from which the queries will be optimized. This set
defines the possible set of trajectories that may show up in the optimized query.
Attributes:
trajectory_set (TrajectorySet): The set of trajectories from which the queries are optimized. This set
defines the possible set of trajectories that may show up in the optimized query.
"""
def __init__(self, trajectory_set: TrajectorySet):
super(QueryOptimizerDiscreteTrajectorySet, self).__init__()
self.trajectory_set = trajectory_set
def argplanner(self, user: User) -> int:
"""
Given a user model, returns the index of the trajectory that best fits the user in the trajectory set.
Args:
user (User): The user object for whom the optimal trajectory is being searched.
Returns:
int: The index of the optimal trajectory in the trajectory set.
"""
if isinstance(user, SoftmaxUser):
return np.asscalar(np.argmax(user.reward(self.trajectory_set)))
raise NotImplementedError("The planner has not been implemented for the given user model.")
def planner(self, user: User) -> Trajectory:
"""
Given a user model, returns the trajectory in the trajectory set that best fits the user.
Args:
user (User): The user object for whom the optimal trajectory is being searched.
Returns:
Trajectory: The optimal trajectory in the trajectory set.
"""
return self.trajectory_set[self.argplanner(user)]
def optimize(self,
acquisition_func_str: str,
belief: Belief,
initial_query: Query,
batch_size: int = 1,
optimization_method: str = 'exhaustive_search',
**kwargs) -> Tuple[List[Query], np.array]:
"""
This function generates the optimal query or the batch of queries to ask to the user given a belief
distribution about them. It also returns the acquisition function values of the optimized queries.
Args:
acquisition_func_str (str): the name of the acquisition function used to decide the value of each query.
Currently implemented options are:
- `disagreement`: Based on `Katz. et al. (2019) <https://arxiv.org/abs/1907.05575>`_.
- `mutual_information`: Based on `Bıyık et al. (2019) <https://arxiv.org/abs/1910.04365>`_.
- `random`: Randomly chooses a query.
- `regret`: Based on `Wilde et al. (2020) <https://arxiv.org/abs/2005.04067>`_.
- `thompson`: Based on `Tucker et al. (2019) <https://arxiv.org/abs/1909.12316>`_.
- `volume_removal`: Based on `Sadigh et al. (2017) <http://m.roboticsproceedings.org/rss13/p53.pdf>`_ and `Bıyık et al. <https://arxiv.org/abs/1904.02209>`_.
belief (Belief): the current belief distribution over the user.
initial_query (Query): an initial query such that the output query will have the same type.
batch_size (int): the number of queries to return.
optimization_method (str): the name of the method used to select queries. Currently implemented options are:
- `exhaustive_search`: Used for exhaustively searching a single query.
- `greedy`: Exhaustively searches for the top :py:attr:`batch_size` queries in terms of the acquisition function.
- `medoids`: Batch generation method based on `Bıyık et al. (2018) <https://arxiv.org/abs/1810.04303>`_.
- `boundary_medoids`: Batch generation method based on `Bıyık et al. (2018) <https://arxiv.org/abs/1810.04303>`_.
- `successive_elimination`: Batch generation method based on `Bıyık et al. (2018) <https://arxiv.org/abs/1810.04303>`_.
- `dpp`: Batch generation method based on `Bıyık et al. (2019) <https://arxiv.org/abs/1906.07975>`_.
**kwargs: extra arguments needed for specific optimization methods or acquisition functions.
Returns:
2-tuple:
- List[Query]: The list of optimized queries. **Note**: Even if :py:attr:`batch_size` is 1, a list is returned.
- numpy.array: An array of floats that keep the acquisition function values corresponding to the output queries.
"""
assert(acquisition_func_str in self.acquisition_functions), 'Unknown acquisition function.'
acquisition_func = self.acquisition_functions[acquisition_func_str]
assert(batch_size > 0 and isinstance(batch_size, int)), 'Invalid batch_size ' + str(batch_size)
assert(optimization_method in ['exhaustive_search', 'greedy', 'medoids', 'boundary_medoids', 'successive_elimination', 'dpp']), 'Unknown optimization_method ' + str(optimization_method)
if batch_size > 1 and optimization_method == 'exhaustive_search':
warnings.warn('Since batch size > 1, ignoring exhaustive search and using greedy batch selection instead.')
optimization_method = 'greedy'
elif batch_size == 1 and optimization_method in ['greedy', 'medoids', 'boundary_medoids', 'successive_elimination', 'dpp']:
warnings.warn('Since batch size == 1, ignoring the batch selection method and using exhaustive search instead.')
optimization_method = 'exhaustive_search'
if optimization_method == 'exhaustive_search':
return self.exhaustive_search(acquisition_func, belief, initial_query, **kwargs)
elif optimization_method == 'greedy':
return self.greedy_batch(acquisition_func, belief, initial_query, batch_size, **kwargs)
elif optimization_method == 'medoids':
return self.medoids_batch(acquisition_func, belief, initial_query, batch_size, **kwargs)
elif optimization_method == 'boundary_medoids':
return self.boundary_medoids_batch(acquisition_func, belief, initial_query, batch_size, **kwargs)
elif optimization_method == 'successive_elimination':
return self.successive_elimination_batch(acquisition_func, belief, initial_query, batch_size, **kwargs)
elif optimization_method == 'dpp':
return self.dpp_batch(acquisition_func, belief, initial_query, batch_size, **kwargs)
raise NotImplementedError('unknown optimization method for QueryOptimizerDiscreteTrajectorySet: ' + optimization_method + '.')
def exhaustive_search(self,
acquisition_func: Callable,
belief: Belief,
initial_query: Query,
**kwargs) -> Tuple[List[Query], np.array]:
"""
Searches over the possible queries to find the singular most optimal query.
Args:
acquisition_func (Callable): the acquisition function to be maximized.
belief (Belief): the current belief distribution over the user.
initial_query (Query): an initial query such that the output query will have the same type.
**kwargs: extra arguments needed for specific acquisition functions.
Returns:
2-tuple:
- List[Query]: The optimal query as a list of one :class:`.Query`.
- numpy.array: An array of floats that keep the acquisition function value corresponding to the output query.
"""
return self.greedy_batch(acquisition_func, belief, initial_query, batch_size=1, **kwargs)
def greedy_batch(self,
acquisition_func: Callable,
belief: Belief,
initial_query: Query,
batch_size: int,
**kwargs) -> Tuple[List[Query], np.array]:
"""
Uses the greedy method to find a batch of queries by selecting the :py:attr:`batch_size` individually most optimal queries.
Args:
acquisition_func (Callable): the acquisition function to be maximized by each individual query.
belief (Belief): the current belief distribution over the user.
initial_query (Query): an initial query such that the output query will have the same type.
batch_size (int): the batch size of the output.
**kwargs: extra arguments needed for specific acquisition functions.
Returns:
2-tuple:
- List[Query]: The optimized batch of queries as a list.
- numpy.array: An array of floats that keep the acquisition function values corresponding to the output queries.
"""
if isinstance(initial_query, PreferenceQuery) or isinstance(initial_query, WeakComparisonQuery) or isinstance(initial_query, FullRankingQuery):
if acquisition_func is random:
best_batch = [initial_query.copy() for _ in range(batch_size)]
for i in range(batch_size):
best_batch[i].slate = self.trajectory_set[np.random.choice(self.trajectory_set.size, size=initial_query.K, replace=False)]
return best_batch, np.array([1. for _ in range(batch_size)])
elif acquisition_func is thompson and isinstance(belief, SamplingBasedBelief):
subsets = np.array([list(tup) for tup in itertools.combinations(np.arange(belief.num_samples), initial_query.K)])
if len(subsets) < batch_size:
batch_size = len(subsets)
warnings.warn('The number of possible queries is smaller than the batch size. Automatically reducing the batch size.')
temp_user = belief.user_model.copy()
planned_traj_ids = []
for sample in belief.samples:
temp_user.params = sample
planned_traj_ids.append(self.argplanner(temp_user))
belief_logprobs = np.array(belief.logprobs)
best_batch = [initial_query.copy() for _ in range(batch_size)]
unique_traj_ids, inverse = np.unique(planned_traj_ids, return_inverse=True)
if len(unique_traj_ids) < initial_query.K:
remaining_ids = np.setdiff1d(np.arange(self.trajectory_set.size), unique_traj_ids)
missing_count = initial_query.K - len(unique_traj_ids)
for i in range(batch_size):
ids = np.append(unique_traj_ids, np.random.choice(remaining_ids, size=missing_count, replace=False))
best_batch[i].slate = self.trajectory_set[ids]
else:
unique_probs = np.array([np.exp(belief_logprobs[inverse==i]).sum() for i in range(len(unique_traj_ids))])
if np.isclose(unique_probs.sum(), 0):
unique_probs = np.ones_like(unique_probs)
unique_probs /= unique_probs.sum()
for i in range(batch_size):
best_batch[i].slate = self.trajectory_set[np.random.choice(unique_traj_ids, size=initial_query.K, replace=False, p=unique_probs)]
return best_batch, np.array([1. for _ in range(batch_size)])
elif acquisition_func is mutual_information or acquisition_func is volume_removal:
subsets = np.array([list(tup) for tup in itertools.combinations(np.arange(self.trajectory_set.size), initial_query.K)])
if len(subsets) < batch_size:
batch_size = len(subsets)
warnings.warn('The number of possible queries is smaller than the batch size. Automatically reducing the batch size.')
vals = []
for ids in subsets:
curr_query = initial_query.copy()
curr_query.slate = self.trajectory_set[ids]
vals.append(acquisition_func(belief, curr_query, **kwargs))
vals = np.array(vals)
inds = np.argpartition(vals, -batch_size)[-batch_size:]
best_batch = [initial_query.copy() for _ in range(batch_size)]
for i in range(batch_size):
best_batch[i].slate = self.trajectory_set[subsets[inds[i]]]
return best_batch, vals[inds]
elif acquisition_func is disagreement and isinstance(belief, SamplingBasedBelief):
assert(initial_query.K == 2), 'disagreement acquisition function works only with pairwise comparison queries, i.e., K must be 2.'
subsets = np.array([list(tup) for tup in itertools.combinations(np.arange(belief.num_samples), initial_query.K)])
if len(subsets) < batch_size:
batch_size = len(subsets)
warnings.warn('The number of possible queries is smaller than the batch size. Automatically reducing the batch size.')
vals = []
belief_samples = np.array(belief.samples)
belief_logprobs = np.array(belief.logprobs)
for ids in subsets:
weights = np.array([sample['weights'] for sample in belief_samples[ids]])
vals.append(acquisition_func(weights, belief_logprobs[ids], **kwargs))
vals = np.array(vals)
inds = np.argpartition(vals, -batch_size)[-batch_size:]
best_batch = [initial_query.copy() for _ in range(batch_size)]
temp_user = belief.user_model.copy()
for i in range(batch_size):
trajectories = []
for best_id in subsets[inds[i]]:
temp_user.params = belief.samples[best_id]
trajectories.append(self.planner(temp_user))
best_batch[i].slate = TrajectorySet(trajectories)
return best_batch, vals[inds]
elif acquisition_func is regret and isinstance(belief, SamplingBasedBelief):
assert(initial_query.K == 2), 'regret acquisition function works only with pairwise comparison queries, i.e., K must be 2.'
subsets = np.array([list(tup) for tup in itertools.combinations(np.arange(belief.num_samples), initial_query.K)])
if len(subsets) < batch_size:
batch_size = len(subsets)
warnings.warn('The number of possible queries is smaller than the batch size. Automatically reducing the batch size.')
temp_user = belief.user_model.copy()
trajectories = []
for sample in belief.samples:
temp_user.params = sample
trajectories.append(self.planner(temp_user))
planned_trajs = TrajectorySet(trajectories)
vals = []
belief_samples = np.array(belief.samples)
belief_logprobs = np.array(belief.logprobs)
for ids in subsets:
weights = np.array([sample['weights'] for sample in belief_samples[ids]])
vals.append(acquisition_func(weights, belief_logprobs[ids], planned_trajs[ids], **kwargs))
vals = np.array(vals)
inds = np.argpartition(vals, -batch_size)[-batch_size:]
best_batch = [initial_query.copy() for _ in range(batch_size)]
for i in range(batch_size):
best_batch[i].slate = planned_trajs[subsets[inds[i]]]
return best_batch, vals[inds]
raise NotImplementedError('greedy batch has not been implemented for the given query and belief types.')
def medoids_batch(self,
acquisition_func: Callable,
belief: Belief,
initial_query: Query,
batch_size: int,
**kwargs) -> Tuple[List[Query], np.array]:
"""
Uses the medoids method to find a batch of queries. See
`Batch Active Preference-Based Learning of Reward Functions <https://arxiv.org/abs/1810.04303>`_ for
more information about the method.
Args:
acquisition_func (Callable): the acquisition function to be maximized by each individual query.
belief (Belief): the current belief distribution over the user.
initial_query (Query): an initial query such that the output query will have the same type.
batch_size (int): the batch size of the output.
**kwargs: Hyperparameters `reduced_size`, `distance`, and extra arguments needed for specific acquisition functions.
- `reduced_size` (int): The hyperparameter `B` in the original method. This method first greedily chooses `B` queries from the feasible set of queries out of the trajectory set, and then applies the medoids selection. Defaults to 100.
- `distance` (Callable): A distance function which returns a pairwise distance matrix (numpy.array) when inputted a list of queries. Defaults to :py:meth:`aprel.utils.batch_utils.default_query_distance`.
Returns:
2-tuple:
- List[Query]: The optimized batch of queries as a list.
- numpy.array: An array of floats that keep the acquisition function values corresponding to the output queries.
"""
kwargs.setdefault('reduced_size', 100)
kwargs.setdefault('distance', default_query_distance)
top_queries, vals = self.greedy_batch(acquisition_func, belief, initial_query, batch_size=kwargs['reduced_size'], **kwargs)
del kwargs['reduced_size']
distances = kwargs['distance'](top_queries, **kwargs)
medoid_ids = kMedoids(distances, batch_size)
if len(medoid_ids) < batch_size:
# There were too many duplicate points, so we ended up with fewer medoids than we needed.
remaining_ids = np.setdiff1d(np.arange(len(vals)), medoid_ids)
remaining_vals = vals[remaining_ids]
missing_count = batch_size - len(medoid_ids)
ids_to_add = remaining_ids[np.argpartition(remaining_vals, -missing_count)[-missing_count:]]
medoid_ids = np.concatenate((medoid_ids, ids_to_add))
return [top_queries[idx] for idx in medoid_ids], vals[medoid_ids]
def boundary_medoids_batch(self,
acquisition_func: Callable,
belief: Belief,
initial_query: Query,
batch_size: int,
**kwargs) -> Tuple[List[Query], np.array]:
"""
Uses the boundary medoids method to find a batch of queries. See
`Batch Active Preference-Based Learning of Reward Functions <https://arxiv.org/abs/1810.04303>`_ for
more information about the method.
Args:
acquisition_func (Callable): the acquisition function to be maximized by each individual query.
belief (Belief): the current belief distribution over the user.
initial_query (Query): an initial query such that the output query will have the same type.
batch_size (int): the batch size of the output.
**kwargs: Hyperparameters `reduced_size`, `distance`, and extra arguments needed for specific acquisition functions.
- `reduced_size` (int): The hyperparameter `B` in the original method. This method first greedily chooses `B` queries from the feasible set of queries out of the trajectory set, and then applies the boundary medoids selection. Defaults to 100.
- `distance` (Callable): A distance function which returns a pairwise distance matrix (numpy.array) when inputted a list of queries. Defaults to :py:meth:`aprel.utils.batch_utils.default_query_distance`.
Returns:
2-tuple:
- List[Query]: The optimized batch of queries as a list.
- numpy.array: An array of floats that keep the acquisition function values corresponding to the output queries.
"""
kwargs.setdefault('reduced_size', 100)
kwargs.setdefault('distance', default_query_distance)
top_queries, vals = self.greedy_batch(acquisition_func, belief, initial_query, batch_size=kwargs['reduced_size'], **kwargs)
del kwargs['reduced_size']
assert initial_query.K == 2, 'Boundary medoids batch selection method does not support large slates, use K = 2.'
feature_dim = initial_query.slate.features_matrix.shape[1]
if feature_dim > 7:
warnings.warn('Feature dimensionality is too high: ' + str(feature_dim) + '. Boundary medoids might be too slow.')
features_diff = [query.slate.features_matrix[0] - query.slate.features_matrix[1] for query in top_queries]
hull = ConvexHull(features_diff)
simplices = np.unique(hull.simplices)
if len(simplices) < batch_size:
# If boundary has fewer points than the batch, then fill it with greedy queries
medoid_ids = simplices
remaining_ids = np.setdiff1d(np.arange(len(vals)), medoid_ids)
remaining_vals = vals[remaining_ids]
missing_count = batch_size - len(simplices)
ids_to_add = remaining_ids[np.argpartition(remaining_vals, -missing_count)[-missing_count:]]
medoid_ids = np.concatenate((medoid_ids, ids_to_add))
else:
# Otherwise, select the medoids among the boundary queries
distances = kwargs['distance']([top_queries[i] for i in simplices], **kwargs)
temp_ids = kMedoids(distances, batch_size)
medoid_ids = simplices[temp_ids]
return [top_queries[idx] for idx in medoid_ids], vals[medoid_ids]
def successive_elimination_batch(self,
acquisition_func: Callable,
belief: Belief,
initial_query: Query,
batch_size: int,
**kwargs) -> Tuple[List[Query], np.array]:
"""
Uses the successive elimination method to find a batch of queries. See
`Batch Active Preference-Based Learning of Reward Functions <https://arxiv.org/abs/1810.04303>`_ for
more information about the method.
Args:
acquisition_func (Callable): the acquisition function to be maximized by each individual query.
belief (Belief): the current belief distribution over the user.
initial_query (Query): an initial query such that the output query will have the same type.
batch_size (int): the batch size of the output.
**kwargs: Hyperparameters `reduced_size`, `distance`, and extra arguments needed for specific acquisition functions.
- `reduced_size` (int): The hyperparameter `B` in the original method. This method first greedily chooses `B` queries from the feasible set of queries out of the trajectory set, and then applies the boundary medoids selection. Defaults to 100.
- `distance` (Callable): A distance function which returns a pairwise distance matrix (numpy.array) when inputted a list of queries. Defaults to :py:meth:`aprel.utils.batch_utils.default_query_distance`.
Returns:
2-tuple:
- List[Query]: The optimized batch of queries as a list.
- numpy.array: An array of floats that keep the acquisition function values corresponding to the output queries.
"""
kwargs.setdefault('reduced_size', 100)
kwargs.setdefault('distance', default_query_distance)
top_queries, vals = self.greedy_batch(acquisition_func, belief, initial_query, batch_size=kwargs['reduced_size'], **kwargs)
del kwargs['reduced_size']
distances = kwargs['distance'](top_queries, **kwargs)
distances[np.isclose(distances, 0)] = np.inf
while len(top_queries) > batch_size:
ij_min = np.where(distances == np.min(distances))
if len(ij_min) > 1 and len(ij_min[0]) > 1:
ij_min = ij_min[0]
elif len(ij_min) > 1:
ij_min = np.array([ij_min[0],ij_min[1]])
if vals[ij_min[0]] < vals[ij_min[1]]:
delete_id = ij_min[1]
else:
delete_id = ij_min[0]
distances = np.delete(distances, delete_id, axis=0)
distances = np.delete(distances, delete_id, axis=1)
vals = np.delete(vals, delete_id)
top_queries = np.delete(top_queries, delete_id, axis=0)
return list(top_queries), vals
def dpp_batch(self,
acquisition_func: Callable,
belief: Belief,
initial_query: Query,
batch_size: int,
**kwargs) -> Tuple[List[Query], np.array]:
"""
Uses the determinantal point process (DPP) based method to find a batch of queries. See
`Batch Active Learning Using Determinantal Point Processes <https://arxiv.org/abs/1906.07975>`_ for
more information about the method.
Args:
acquisition_func (Callable): the acquisition function to be maximized by each individual query.
belief (Belief): the current belief distribution over the user.
initial_query (Query): an initial query such that the output query will have the same type.
batch_size (int): the batch size of the output.
**kwargs: Hyperparameters `reduced_size`, `distance`, `gamma`, and extra arguments needed for specific acquisition functions.
- `reduced_size` (int): The hyperparameter `B` in the original method. This method first greedily chooses `B` queries from the feasible set of queries out of the trajectory set, and then applies the boundary medoids selection. Defaults to 100.
- `distance` (Callable): A distance function which returns a pairwise distance matrix (numpy.array) when inputted a list of queries. Defaults to :py:meth:`aprel.utils.batch_utils.default_query_distance`.
- `gamma` (float): The hyperparameter `gamma` in the original method. The higher gamma, the more important the acquisition function values. The lower gamma, the more important the diversity of queries. Defaults to 1.
Returns:
2-tuple:
- List[Query]: The optimized batch of queries as a list.
- numpy.array: An array of floats that keep the acquisition function values corresponding to the output queries.
"""
kwargs.setdefault('reduced_size', 100)
kwargs.setdefault('distance', default_query_distance)
kwargs.setdefault('gamma', 1.)
top_queries, vals = self.greedy_batch(acquisition_func, belief, initial_query, batch_size=kwargs['reduced_size'], **kwargs)
del kwargs['reduced_size']
vals = vals ** kwargs['gamma']
del kwargs['gamma']
distances = kwargs['distance'](top_queries, **kwargs)
ids = dpp_mode(distances, vals, batch_size)
return [top_queries[i] for i in ids], vals[ids] | [
"aprel.utils.kMedoids",
"aprel.utils.dpp_mode",
"numpy.ones_like",
"numpy.concatenate",
"aprel.basics.TrajectorySet",
"numpy.argpartition",
"numpy.isclose",
"numpy.min",
"numpy.array",
"numpy.arange",
"numpy.exp",
"numpy.random.choice",
"warnings.warn",
"scipy.spatial.ConvexHull",
"numpy... | [((19786, 19817), 'aprel.utils.kMedoids', 'kMedoids', (['distances', 'batch_size'], {}), '(distances, batch_size)\n', (19794, 19817), False, 'from aprel.utils import kMedoids, dpp_mode, default_query_distance\n'), ((22988, 23013), 'scipy.spatial.ConvexHull', 'ConvexHull', (['features_diff'], {}), '(features_diff)\n', (22998, 23013), False, 'from scipy.spatial import ConvexHull\n'), ((23034, 23059), 'numpy.unique', 'np.unique', (['hull.simplices'], {}), '(hull.simplices)\n', (23043, 23059), True, 'import numpy as np\n'), ((29396, 29433), 'aprel.utils.dpp_mode', 'dpp_mode', (['distances', 'vals', 'batch_size'], {}), '(distances, vals, batch_size)\n', (29404, 29433), False, 'from aprel.utils import kMedoids, dpp_mode, default_query_distance\n'), ((6771, 6888), 'warnings.warn', 'warnings.warn', (['"""Since batch size > 1, ignoring exhaustive search and using greedy batch selection instead."""'], {}), "(\n 'Since batch size > 1, ignoring exhaustive search and using greedy batch selection instead.'\n )\n", (6784, 6888), False, 'import warnings\n'), ((20272, 20312), 'numpy.concatenate', 'np.concatenate', (['(medoid_ids, ids_to_add)'], {}), '((medoid_ids, ids_to_add))\n', (20286, 20312), True, 'import numpy as np\n'), ((23537, 23577), 'numpy.concatenate', 'np.concatenate', (['(medoid_ids, ids_to_add)'], {}), '((medoid_ids, ids_to_add))\n', (23551, 23577), True, 'import numpy as np\n'), ((23776, 23807), 'aprel.utils.kMedoids', 'kMedoids', (['distances', 'batch_size'], {}), '(distances, batch_size)\n', (23784, 23807), False, 'from aprel.utils import kMedoids, dpp_mode, default_query_distance\n'), ((26160, 26184), 'numpy.isclose', 'np.isclose', (['distances', '(0)'], {}), '(distances, 0)\n', (26170, 26184), True, 'import numpy as np\n'), ((26652, 26691), 'numpy.delete', 'np.delete', (['distances', 'delete_id'], {'axis': '(0)'}), '(distances, delete_id, axis=0)\n', (26661, 26691), True, 'import numpy as np\n'), ((26716, 26755), 'numpy.delete', 'np.delete', (['distances', 'delete_id'], {'axis': '(1)'}), '(distances, delete_id, axis=1)\n', (26725, 26755), True, 'import numpy as np\n'), ((26775, 26801), 'numpy.delete', 'np.delete', (['vals', 'delete_id'], {}), '(vals, delete_id)\n', (26784, 26801), True, 'import numpy as np\n'), ((26828, 26869), 'numpy.delete', 'np.delete', (['top_queries', 'delete_id'], {'axis': '(0)'}), '(top_queries, delete_id, axis=0)\n', (26837, 26869), True, 'import numpy as np\n'), ((7066, 7188), 'warnings.warn', 'warnings.warn', (['"""Since batch size == 1, ignoring the batch selection method and using exhaustive search instead."""'], {}), "(\n 'Since batch size == 1, ignoring the batch selection method and using exhaustive search instead.'\n )\n", (7079, 7188), False, 'import warnings\n'), ((11862, 11887), 'numpy.array', 'np.array', (['belief.logprobs'], {}), '(belief.logprobs)\n', (11870, 11887), True, 'import numpy as np\n'), ((12027, 12075), 'numpy.unique', 'np.unique', (['planned_traj_ids'], {'return_inverse': '(True)'}), '(planned_traj_ids, return_inverse=True)\n', (12036, 12075), True, 'import numpy as np\n'), ((20181, 20228), 'numpy.argpartition', 'np.argpartition', (['remaining_vals', '(-missing_count)'], {}), '(remaining_vals, -missing_count)\n', (20196, 20228), True, 'import numpy as np\n'), ((23446, 23493), 'numpy.argpartition', 'np.argpartition', (['remaining_vals', '(-missing_count)'], {}), '(remaining_vals, -missing_count)\n', (23461, 23493), True, 'import numpy as np\n'), ((26283, 26300), 'numpy.min', 'np.min', (['distances'], {}), '(distances)\n', (26289, 26300), True, 'import numpy as np\n'), ((26451, 26483), 'numpy.array', 'np.array', (['[ij_min[0], ij_min[1]]'], {}), '([ij_min[0], ij_min[1]])\n', (26459, 26483), True, 'import numpy as np\n'), ((10946, 11025), 'numpy.random.choice', 'np.random.choice', (['self.trajectory_set.size'], {'size': 'initial_query.K', 'replace': '(False)'}), '(self.trajectory_set.size, size=initial_query.K, replace=False)\n', (10962, 11025), True, 'import numpy as np\n'), ((11454, 11582), 'warnings.warn', 'warnings.warn', (['"""The number of possible queries is smaller than the batch size. Automatically reducing the batch size."""'], {}), "(\n 'The number of possible queries is smaller than the batch size. Automatically reducing the batch size.'\n )\n", (11467, 11582), False, 'import warnings\n'), ((13909, 13923), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (13917, 13923), True, 'import numpy as np\n'), ((12184, 12219), 'numpy.arange', 'np.arange', (['self.trajectory_set.size'], {}), '(self.trajectory_set.size)\n', (12193, 12219), True, 'import numpy as np\n'), ((12802, 12828), 'numpy.ones_like', 'np.ones_like', (['unique_probs'], {}), '(unique_probs)\n', (12814, 12828), True, 'import numpy as np\n'), ((13507, 13635), 'warnings.warn', 'warnings.warn', (['"""The number of possible queries is smaller than the batch size. Automatically reducing the batch size."""'], {}), "(\n 'The number of possible queries is smaller than the batch size. Automatically reducing the batch size.'\n )\n", (13520, 13635), False, 'import warnings\n'), ((13947, 13981), 'numpy.argpartition', 'np.argpartition', (['vals', '(-batch_size)'], {}), '(vals, -batch_size)\n', (13962, 13981), True, 'import numpy as np\n'), ((14940, 14964), 'numpy.array', 'np.array', (['belief.samples'], {}), '(belief.samples)\n', (14948, 14964), True, 'import numpy as np\n'), ((14999, 15024), 'numpy.array', 'np.array', (['belief.logprobs'], {}), '(belief.logprobs)\n', (15007, 15024), True, 'import numpy as np\n'), ((15269, 15283), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (15277, 15283), True, 'import numpy as np\n'), ((12418, 12484), 'numpy.random.choice', 'np.random.choice', (['remaining_ids'], {'size': 'missing_count', 'replace': '(False)'}), '(remaining_ids, size=missing_count, replace=False)\n', (12434, 12484), True, 'import numpy as np\n'), ((12998, 13089), 'numpy.random.choice', 'np.random.choice', (['unique_traj_ids'], {'size': 'initial_query.K', 'replace': '(False)', 'p': 'unique_probs'}), '(unique_traj_ids, size=initial_query.K, replace=False, p=\n unique_probs)\n', (13014, 13089), True, 'import numpy as np\n'), ((14762, 14890), 'warnings.warn', 'warnings.warn', (['"""The number of possible queries is smaller than the batch size. Automatically reducing the batch size."""'], {}), "(\n 'The number of possible queries is smaller than the batch size. Automatically reducing the batch size.'\n )\n", (14775, 14890), False, 'import warnings\n'), ((15091, 15154), 'numpy.array', 'np.array', (["[sample['weights'] for sample in belief_samples[ids]]"], {}), "([sample['weights'] for sample in belief_samples[ids]])\n", (15099, 15154), True, 'import numpy as np\n'), ((15307, 15341), 'numpy.argpartition', 'np.argpartition', (['vals', '(-batch_size)'], {}), '(vals, -batch_size)\n', (15322, 15341), True, 'import numpy as np\n'), ((15802, 15829), 'aprel.basics.TrajectorySet', 'TrajectorySet', (['trajectories'], {}), '(trajectories)\n', (15815, 15829), False, 'from aprel.basics import Trajectory, TrajectorySet\n'), ((16759, 16786), 'aprel.basics.TrajectorySet', 'TrajectorySet', (['trajectories'], {}), '(trajectories)\n', (16772, 16786), False, 'from aprel.basics import Trajectory, TrajectorySet\n'), ((16846, 16870), 'numpy.array', 'np.array', (['belief.samples'], {}), '(belief.samples)\n', (16854, 16870), True, 'import numpy as np\n'), ((16905, 16930), 'numpy.array', 'np.array', (['belief.logprobs'], {}), '(belief.logprobs)\n', (16913, 16930), True, 'import numpy as np\n'), ((17195, 17209), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (17203, 17209), True, 'import numpy as np\n'), ((11292, 11321), 'numpy.arange', 'np.arange', (['belief.num_samples'], {}), '(belief.num_samples)\n', (11301, 11321), True, 'import numpy as np\n'), ((16364, 16492), 'warnings.warn', 'warnings.warn', (['"""The number of possible queries is smaller than the batch size. Automatically reducing the batch size."""'], {}), "(\n 'The number of possible queries is smaller than the batch size. Automatically reducing the batch size.'\n )\n", (16377, 16492), False, 'import warnings\n'), ((16997, 17060), 'numpy.array', 'np.array', (["[sample['weights'] for sample in belief_samples[ids]]"], {}), "([sample['weights'] for sample in belief_samples[ids]])\n", (17005, 17060), True, 'import numpy as np\n'), ((17233, 17267), 'numpy.argpartition', 'np.argpartition', (['vals', '(-batch_size)'], {}), '(vals, -batch_size)\n', (17248, 17267), True, 'import numpy as np\n'), ((12624, 12661), 'numpy.exp', 'np.exp', (['belief_logprobs[inverse == i]'], {}), '(belief_logprobs[inverse == i])\n', (12630, 12661), True, 'import numpy as np\n'), ((13339, 13374), 'numpy.arange', 'np.arange', (['self.trajectory_set.size'], {}), '(self.trajectory_set.size)\n', (13348, 13374), True, 'import numpy as np\n'), ((14600, 14629), 'numpy.arange', 'np.arange', (['belief.num_samples'], {}), '(belief.num_samples)\n', (14609, 14629), True, 'import numpy as np\n'), ((16202, 16231), 'numpy.arange', 'np.arange', (['belief.num_samples'], {}), '(belief.num_samples)\n', (16211, 16231), True, 'import numpy as np\n')] |
import os.path
import numpy as np
from sklearn.impute import KNNImputer
from torch.autograd import Variable
import torch
import torch.optim as optim
import pandas as pd
from utils import *
from neural_network import AutoEncoder
import item_response as irt
import random
def load_data(base_path="../data"):
""" Load the data in PyTorch Tensor.
:return: (zero_train_matrix, train_data, valid_data, test_data)
WHERE:
zero_train_matrix: 2D sparse matrix where missing entries are
filled with 0.
train_data: 2D sparse matrix
valid_data: A dictionary {user_id: list,
user_id: list, is_correct: list}
test_data: A dictionary {user_id: list,
user_id: list, is_correct: list}
"""
train_matrix = load_train_sparse(base_path).toarray()
valid_data = load_valid_csv(base_path)
test_data = load_public_test_csv(base_path)
return train_matrix, valid_data, test_data
def bagging(train_matrix):
"""
Read from training csv and randomly select 2/3 of them and
construct training matrix
"""
train_data = pd.read_csv("../data/train_data.csv")
num_std, num_q = train_matrix.shape
num_std_bagged = int(len(train_data) * 2/3)
train_data_sampled = train_data.sample(num_std_bagged, replace=True)
train_matrix = np.empty((num_std, num_q))
train_matrix[:] = np.NaN
data = {"user_id": [],
"question_id": [],
"is_correct": []}
for index in train_data_sampled.index:
row = train_data["user_id"][index]
col = train_data["question_id"][index]
train_matrix[row][col] = train_data["is_correct"][index]
data["question_id"].append(train_data["question_id"][index])
data["user_id"].append(train_data["user_id"][index])
data["is_correct"].append(train_data["is_correct"][index])
return train_data, train_matrix
def eval_knn_base_models(k, train_matrix_bagged, valid_data):
"""
Implement KNN on bagged dataset evaluate the accuracy.
"""
nbrs = KNNImputer(n_neighbors=k) # best performing k = 11
knn_result_matrix = nbrs.fit_transform(train_matrix_bagged)
knn_results = sparse_matrix_predictions(valid_data, knn_result_matrix, threshold=0.5)
return knn_results
def eval_neural_net_base_model(train_matrix_bagged, valid_data, test_data, epoch, k):
"""
Setup hyperparameters for neural net, train and evaluate the accuracy
"""
zero_train_matrix = train_matrix_bagged.copy()
zero_train_matrix[np.isnan(train_matrix_bagged)] = 0
train_matrix_bagged = torch.FloatTensor(train_matrix_bagged)
zero_train_matrix = torch.FloatTensor(zero_train_matrix)
num_questions = train_matrix_bagged.shape[1]
model = AutoEncoder(num_questions, k)
lr = 0.05
lamb = 0.001
train_nn(model, lr, lamb, train_matrix_bagged, zero_train_matrix, epoch)
result_train, valid_acc = evaluate_nn(model, zero_train_matrix, valid_data)
result_test, test_acc = evaluate_nn(model, zero_train_matrix, test_data)
return result_train, result_test
def train_nn(model, lr, lamb, train_data, zero_train_data, epoch):
""" Train the neural network, where the objective also includes
a regularizer.
:param model: Module
:param lr: float
:param lamb: float
:param train_data: 2D FloatTensor
:param zero_train_data: 2D FloatTensor
:param valid_data: Dict
:param epoch: int
:return: None
"""
model.train()
optimizer = optim.SGD(model.parameters(), lr=lr)
num_student = train_data.shape[0]
for epoch in range(epoch):
train_loss = 0.
for user_id in range(num_student):
inputs = Variable(zero_train_data[user_id]).unsqueeze(0)
target = inputs.clone()
optimizer.zero_grad()
output = model(inputs)
nan_mask = np.isnan(train_data[user_id].unsqueeze(0).numpy())
target[0][nan_mask] = output[0][nan_mask]
loss = torch.sum((output - target) ** 2.) + \
(lamb/2)*model.get_weight_norm()
loss.backward()
train_loss += loss.item()
optimizer.step()
def evaluate_nn(model, train_data, valid_data):
""" Evaluate the valid_data on the current model.
:param model: Module
:param train_data: 2D FloatTensor
:param valid_data: A dictionary {user_id: list,
question_id: list, is_correct: list}
:return: Array of predictions and accuracy
"""
# Tell PyTorch you are evaluating the model.
model.eval()
result = []
total = 0
correct = 0
for i, u in enumerate(valid_data["user_id"]):
inputs = Variable(train_data[u]).unsqueeze(0)
output = model(inputs)
guess = output[0][valid_data["question_id"][i]].item() >= 0.5
if guess == valid_data["is_correct"][i]:
correct += 1
result.append(output[0][valid_data["question_id"][i]].item())
total += 1
return result, correct/total
def evaluate_ensemble(data, prediction):
"""
Evaluate the prediction (List[int]) base on the valid/test data (dict)
"""
total_prediction = 0
total_accurate = 0
for i in range(len(data["is_correct"])):
if prediction[i] >= 0.5 and data["is_correct"][i]:
total_accurate += 1
if prediction[i] < 0.5 and not data["is_correct"][i]:
total_accurate += 1
total_prediction += 1
return total_accurate / float(total_prediction)
def predict_irt(data, theta, beta):
"""
return predictions, given theta, beta and data
"""
pred = []
for i, q in enumerate(data["question_id"]):
u = data["user_id"][i]
x = (theta[u] - beta[q]).sum()
p_a = irt.sigmoid(x)
pred.append(p_a >= 0.5)
return np.array(pred)
if __name__ == "__main__":
train_matrix, valid_data, test_data = load_data()
training_data_bagged, train_matrix_bagged = bagging(train_matrix)
result_nn1_valid, result_nn1_test = eval_neural_net_base_model(train_matrix_bagged, valid_data, test_data, epoch=18, k=10)
print(f"Neural Net 1 valid acc: {evaluate_ensemble(valid_data, result_nn1_valid)}")
print(f"Neural Net 1 test acc: {evaluate_ensemble(test_data, result_nn1_test)}")
training_data_bagged, train_matrix_bagged = bagging(train_matrix)
theta, beta, val_acc_lst = irt.irt(training_data_bagged, valid_data, 0.01, 20)
itr1_valid_pred = predict_irt(valid_data, theta, beta)
itr1_test_pred = predict_irt(test_data, theta, beta)
print(f"IRT valid accuracy: {evaluate_ensemble(valid_data, itr1_valid_pred)}")
print(f"IRT test accuracy: {evaluate_ensemble(test_data, itr1_test_pred)}")
training_data_bagged, train_matrix_bagged = bagging(train_matrix)
theta, beta, val_acc_lst = irt.irt(training_data_bagged, valid_data, 0.01, 20)
itr2_valid_pred = predict_irt(valid_data, theta, beta)
itr2_test_pred = predict_irt(test_data, theta, beta)
print(f"IRT valid accuracy: {evaluate_ensemble(valid_data, itr2_valid_pred)}")
print(f"IRT test accuracy: {evaluate_ensemble(test_data, itr2_test_pred)}")
ensemble_predictions = np.asmatrix([result_nn1_valid, itr1_valid_pred, itr2_valid_pred])
average_predictions = np.asarray(ensemble_predictions.mean(axis=0))[0]
validation_accuracy = evaluate_ensemble(valid_data, average_predictions)
print(f"Valid acc with 2*IRT + NN: {validation_accuracy}")
ensemble_predictions = np.asmatrix([result_nn1_test, itr1_test_pred, itr2_test_pred])
average_predictions = np.asarray(ensemble_predictions.mean(axis=0))[0]
test_accuracy = evaluate_ensemble(test_data, average_predictions)
print(f"Test acc with 2*IRT + NN: {test_accuracy}")
| [
"pandas.read_csv",
"numpy.empty",
"torch.autograd.Variable",
"torch.FloatTensor",
"neural_network.AutoEncoder",
"item_response.irt",
"numpy.isnan",
"sklearn.impute.KNNImputer",
"numpy.array",
"numpy.asmatrix",
"item_response.sigmoid",
"torch.sum"
] | [((1103, 1140), 'pandas.read_csv', 'pd.read_csv', (['"""../data/train_data.csv"""'], {}), "('../data/train_data.csv')\n", (1114, 1140), True, 'import pandas as pd\n'), ((1321, 1347), 'numpy.empty', 'np.empty', (['(num_std, num_q)'], {}), '((num_std, num_q))\n', (1329, 1347), True, 'import numpy as np\n'), ((2049, 2074), 'sklearn.impute.KNNImputer', 'KNNImputer', ([], {'n_neighbors': 'k'}), '(n_neighbors=k)\n', (2059, 2074), False, 'from sklearn.impute import KNNImputer\n'), ((2590, 2628), 'torch.FloatTensor', 'torch.FloatTensor', (['train_matrix_bagged'], {}), '(train_matrix_bagged)\n', (2607, 2628), False, 'import torch\n'), ((2653, 2689), 'torch.FloatTensor', 'torch.FloatTensor', (['zero_train_matrix'], {}), '(zero_train_matrix)\n', (2670, 2689), False, 'import torch\n'), ((2752, 2781), 'neural_network.AutoEncoder', 'AutoEncoder', (['num_questions', 'k'], {}), '(num_questions, k)\n', (2763, 2781), False, 'from neural_network import AutoEncoder\n'), ((5822, 5836), 'numpy.array', 'np.array', (['pred'], {}), '(pred)\n', (5830, 5836), True, 'import numpy as np\n'), ((6394, 6445), 'item_response.irt', 'irt.irt', (['training_data_bagged', 'valid_data', '(0.01)', '(20)'], {}), '(training_data_bagged, valid_data, 0.01, 20)\n', (6401, 6445), True, 'import item_response as irt\n'), ((6828, 6879), 'item_response.irt', 'irt.irt', (['training_data_bagged', 'valid_data', '(0.01)', '(20)'], {}), '(training_data_bagged, valid_data, 0.01, 20)\n', (6835, 6879), True, 'import item_response as irt\n'), ((7188, 7253), 'numpy.asmatrix', 'np.asmatrix', (['[result_nn1_valid, itr1_valid_pred, itr2_valid_pred]'], {}), '([result_nn1_valid, itr1_valid_pred, itr2_valid_pred])\n', (7199, 7253), True, 'import numpy as np\n'), ((7498, 7560), 'numpy.asmatrix', 'np.asmatrix', (['[result_nn1_test, itr1_test_pred, itr2_test_pred]'], {}), '([result_nn1_test, itr1_test_pred, itr2_test_pred])\n', (7509, 7560), True, 'import numpy as np\n'), ((2529, 2558), 'numpy.isnan', 'np.isnan', (['train_matrix_bagged'], {}), '(train_matrix_bagged)\n', (2537, 2558), True, 'import numpy as np\n'), ((5764, 5778), 'item_response.sigmoid', 'irt.sigmoid', (['x'], {}), '(x)\n', (5775, 5778), True, 'import item_response as irt\n'), ((4001, 4036), 'torch.sum', 'torch.sum', (['((output - target) ** 2.0)'], {}), '((output - target) ** 2.0)\n', (4010, 4036), False, 'import torch\n'), ((4684, 4707), 'torch.autograd.Variable', 'Variable', (['train_data[u]'], {}), '(train_data[u])\n', (4692, 4707), False, 'from torch.autograd import Variable\n'), ((3699, 3733), 'torch.autograd.Variable', 'Variable', (['zero_train_data[user_id]'], {}), '(zero_train_data[user_id])\n', (3707, 3733), False, 'from torch.autograd import Variable\n')] |
import numpy as np
import torch
from torch.nn import functional as F
from collections import Counter
from sklearn.metrics import f1_score,precision_score,recall_score
import skimage.transform
import matplotlib.pyplot as plt
import pandas as pd
from core.frame_base_measurement import compute_align_MoF_UoI,compute_align_MoF_UoI_bg, compute_align_MoF_UoI_no_align, compute_align_MoF_UoI_bg_no_align
import pdb
import os
from multiprocessing import Process,Queue
def get_list_param_norm(params):
list_norms = []
with torch.no_grad():
for p in params:
list_norms.append(torch.norm(p).cpu().item())
return list_norms
def find_folder_with_pattern(pattern,path_dir):
for path_folder in os.listdir(path_dir):
if pattern in path_folder:
return path_dir+path_folder+'/'
return None
def aggregated_keysteps(subsampled_segment_list, key_step_list):
"""
Function which aggregate the subsampled keysteps assigning the
keystep which is found in majority for each segment
Parameters
----------
subsampled_segment_list: subsampled list of segments
key_step_list: a list denoting the key step number
each frame belongs to.
Returns
-------
batch_aggregated_key_list: list denoting which segment in the aggregated
feature belongs to which key step
"""
#assert aggregated_features.shape[0] == 1
batch_aggregated_key_list = []
for b in range(subsampled_segment_list.shape[0]):
segments = np.unique(subsampled_segment_list[b])
aggregated_key_list = []
for s in segments:
# indicator = []
# for idx in subsampled_segment_list[b]:
# if s == idx:
# indicator.append(True)
# else:
# indicator.append(False)
indicator = subsampled_segment_list[b] == s
segment_keysteps = key_step_list[b][indicator]
unique_keys, key_freq = torch.unique(segment_keysteps, return_counts = True)
max_v, max_k= 0,0
for i in range(len(unique_keys)):
if key_freq[i] > max_v:
max_v = key_freq[i]
max_k = unique_keys[i]
aggregated_key_list.append(max_k)
batch_aggregated_key_list.append(aggregated_key_list)
return torch.tensor(batch_aggregated_key_list) #[T]
def fcsn_preprocess_fbar(fbar_seg,verbose = False): #fbar_seg [bx512xT]
## padding to be multiple of 32
if verbose:
print('before padding fbar {}'.format(fbar_seg.size()))
n_frames = fbar_seg.size(2)
n_pad = 0
if n_frames % 32 != 0: #this guarantees number of frames is at least 32
n_pad = 32-n_frames%32
if n_frames+n_pad < 64: # if number of frames if 32, then make it 64 for batchnorm
n_pad += 32
fbar_seg = F.pad(fbar_seg,(0,n_pad))
if verbose:
print('after padding fbar {}'.format(fbar_seg.size()))
return fbar_seg
def fcsn_preprocess_keystep(keystep_labels,verbose = False): #keysteps [bxT]
## padding to be multiple of 32
if verbose:
print('before padding keystep {}'.format(keystep_labels.size()))
n_frames = keystep_labels.size(1)
n_pad = 0
if n_frames % 32 != 0: #this guarantees number of frames is at least 32
n_pad = 32-n_frames%32
if n_frames+n_pad < 64: # if number of frames if 32, then make it 64 for batchnorm
n_pad += 32
keystep_labels = F.pad(keystep_labels,(0,n_pad))
if verbose:
print('after padding keystep {}'.format(keystep_labels.size()))
return keystep_labels
def get_parameters(model,verbose = True):
params_to_update = []
for name,param in model.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
if verbose:
print("\t",name)
return params_to_update
def get_lr(optimizer):
lr = []
for param_group in optimizer.param_groups:
lr.append(param_group['lr'])
return lr
def get_weight_decay(optimizer):
lr = []
for param_group in optimizer.param_groups:
lr.append(param_group['weight_decay'])
return lr
def convert_keystep_2_keyframe(keystep_labels):
keysframe_labels = torch.clamp(keystep_labels,0,1).long()
return keysframe_labels
def top_k_acc(cat_labels,cat_preds,k):
cat_labels = cat_labels.cpu()
cat_preds = cat_preds.cpu()
idx_sort = torch.argsort(cat_preds,dim = 1,descending=True)
top_k = idx_sort[:,:k]
top_k = top_k.cpu().numpy()
assert top_k.shape[0] == len(cat_labels)
avg_acc = 0
cat_labels = cat_labels.numpy()
for i,cat_label in enumerate(cat_labels):
if cat_label in top_k[i]:
avg_acc+=1
avg_acc /= len(cat_labels)
return avg_acc
def compute_per_class_acc(test_label, predicted_label):
test_label = test_label.cpu().numpy()
predicted_label = predicted_label.cpu().numpy()
predicted_label = np.argmax(predicted_label,-1)
target_classes = np.unique(test_label)
per_class_accuracies = np.zeros(target_classes.shape[0])
for i in range(target_classes.shape[0]):
is_class = test_label == target_classes[i]
per_class_accuracies[i] = np.sum(predicted_label[is_class]==test_label[is_class])/np.sum(is_class)
return np.mean(per_class_accuracies)
def evaluation_align(model,ss_model,dataset_loader_tst,device):
k=1
batch_size = 1
list_ks_pred = []
list_ks_pseudo = []
list_ks_label = []
list_top_k_acc = []
all_cat_labels = []
all_cat_preds = []
segment_per_video = []
counter = 0
print('EVALUATION')
with torch.no_grad():
for data_package in dataset_loader_tst:
model.eval()
counter += 1
cat_labels, cat_names, video, subsampled_feature, subsampled_segment_list, key_step_list, n_og_keysteps \
= data_package['cat_labels'],data_package['cat_names'],data_package['video'],data_package['subsampled_feature'],data_package['subsampled_segment_list'],data_package['key_step_list'],data_package['n_og_keysteps']
# print(video)
if 'feature_hof' in data_package:
feature_hof=data_package['feature_hof'].to(device)
else:
feature_hof = None
flatten_feature = subsampled_feature.view(batch_size,-1,512,7*7).to(device)
#Transposing the flattened features
flatten_feature = torch.transpose(flatten_feature, dim0 = 2, dim1 = 3)
keystep_labels = aggregated_keysteps(subsampled_segment_list, key_step_list)
keystep_labels = fcsn_preprocess_keystep(keystep_labels)
keysteps,cats,_,_ = model(flatten_feature,subsampled_segment_list,feature_hof)
n_keystep_background = n_og_keysteps.item()+1
### evaluation for each video ###
if ss_model is not None:
pred_cat = np.argmax(cats.cpu().numpy(),-1)
fbar_seg = model.forward_middle(flatten_feature,subsampled_segment_list)
_,keystep_pseudo_labels = ss_model.predict(fbar_seg,pred_cat.item())
M = ss_model.M
print("pseudo: N_gt {} M {}".format(n_keystep_background,M))
list_ks_pseudo.append(keystep_pseudo_labels)
# P_pseudo,R_pseudo,F1_pseudo = [-1.0,-1.0,-1.0]
pass
else:
pass
keysteps_pred = torch.argmax(keysteps,dim = 1)
M = keysteps.size(1)
list_ks_pred.append(keysteps_pred)
list_ks_label.append(keystep_labels)
list_top_k_acc.append(top_k_acc(cat_labels,cats,k))
all_cat_labels.append(cat_labels)
all_cat_preds.append(cats)
segment_per_video.append(keysteps_pred.size(1))
out_package = {}
arr_ks_pred = torch.cat(list_ks_pred,dim=1)
arr_ks_label = torch.cat(list_ks_label,dim=1)
if ss_model is not None:
arr_ks_pseudo = torch.cat(list_ks_pseudo,dim=1)
######## evaluate ########
MoF_pred, IoU_pred, P_pred = compute_align_MoF_UoI(keystep_pred=arr_ks_pred,keystep_gt=arr_ks_label,
n_keystep=n_keystep_background,M=M)
MoF_pred_bg, IoU_pred_bg = compute_align_MoF_UoI_bg(keystep_pred=arr_ks_pred,keystep_gt=arr_ks_label,
n_keystep=n_keystep_background,M=M)
if ss_model is not None:
assert ss_model.M == M
MoF_pseudo, IoU_pseudo, P_pseudo = compute_align_MoF_UoI(keystep_pred=arr_ks_pseudo,keystep_gt=arr_ks_label,
n_keystep=n_keystep_background,M=ss_model.M)
MoF_pseudo_bg, IoU_pseudo_bg = compute_align_MoF_UoI_bg(keystep_pred=arr_ks_pseudo,keystep_gt=arr_ks_label,
n_keystep=n_keystep_background,M=ss_model.M)
else:
MoF_pseudo, IoU_pseudo, P_pseudo,MoF_pseudo_bg, IoU_pseudo_bg = [-1,-1,-1,-1,-1]
all_cat_labels = torch.cat(all_cat_labels,dim = 0)
all_cat_preds = torch.cat(all_cat_preds,dim = 0)
per_class_acc = compute_per_class_acc(all_cat_labels,all_cat_preds)
######## evaluate ########
out_package['list_top_k_acc'] = list_top_k_acc
out_package['per_class_acc'] = per_class_acc
out_package['R_pred'] = MoF_pred
out_package['P_pred'] = MoF_pred_bg
out_package['R_pseudo'] = MoF_pseudo
out_package['P_pseudo'] = MoF_pseudo_bg
return out_package
class Logger:
def __init__(self,filename,cols,is_save=True):
self.df = pd.DataFrame()
self.cols = cols
self.filename=filename
self.is_save=is_save
def add(self,values):
self.df=self.df.append(pd.DataFrame([values],columns=self.cols),ignore_index=True)
def get_len(self):
return len(self.df)
def save(self):
if self.is_save:
self.df.to_csv(self.filename)
def get_max(self,col):
return np.max(self.df[col])
def is_max(self,col):
return self.df[col].iloc[-1] >= np.max(self.df[col]) | [
"numpy.sum",
"numpy.argmax",
"torch.argmax",
"torch.cat",
"numpy.mean",
"torch.no_grad",
"core.frame_base_measurement.compute_align_MoF_UoI_bg",
"numpy.unique",
"torch.nn.functional.pad",
"pandas.DataFrame",
"numpy.max",
"core.frame_base_measurement.compute_align_MoF_UoI",
"torch.unique",
... | [((720, 740), 'os.listdir', 'os.listdir', (['path_dir'], {}), '(path_dir)\n', (730, 740), False, 'import os\n'), ((2358, 2397), 'torch.tensor', 'torch.tensor', (['batch_aggregated_key_list'], {}), '(batch_aggregated_key_list)\n', (2370, 2397), False, 'import torch\n'), ((2892, 2919), 'torch.nn.functional.pad', 'F.pad', (['fbar_seg', '(0, n_pad)'], {}), '(fbar_seg, (0, n_pad))\n', (2897, 2919), True, 'from torch.nn import functional as F\n'), ((3539, 3572), 'torch.nn.functional.pad', 'F.pad', (['keystep_labels', '(0, n_pad)'], {}), '(keystep_labels, (0, n_pad))\n', (3544, 3572), True, 'from torch.nn import functional as F\n'), ((4527, 4575), 'torch.argsort', 'torch.argsort', (['cat_preds'], {'dim': '(1)', 'descending': '(True)'}), '(cat_preds, dim=1, descending=True)\n', (4540, 4575), False, 'import torch\n'), ((5068, 5098), 'numpy.argmax', 'np.argmax', (['predicted_label', '(-1)'], {}), '(predicted_label, -1)\n', (5077, 5098), True, 'import numpy as np\n'), ((5124, 5145), 'numpy.unique', 'np.unique', (['test_label'], {}), '(test_label)\n', (5133, 5145), True, 'import numpy as np\n'), ((5173, 5206), 'numpy.zeros', 'np.zeros', (['target_classes.shape[0]'], {}), '(target_classes.shape[0])\n', (5181, 5206), True, 'import numpy as np\n'), ((5428, 5457), 'numpy.mean', 'np.mean', (['per_class_accuracies'], {}), '(per_class_accuracies)\n', (5435, 5457), True, 'import numpy as np\n'), ((8211, 8241), 'torch.cat', 'torch.cat', (['list_ks_pred'], {'dim': '(1)'}), '(list_ks_pred, dim=1)\n', (8220, 8241), False, 'import torch\n'), ((8260, 8291), 'torch.cat', 'torch.cat', (['list_ks_label'], {'dim': '(1)'}), '(list_ks_label, dim=1)\n', (8269, 8291), False, 'import torch\n'), ((8455, 8568), 'core.frame_base_measurement.compute_align_MoF_UoI', 'compute_align_MoF_UoI', ([], {'keystep_pred': 'arr_ks_pred', 'keystep_gt': 'arr_ks_label', 'n_keystep': 'n_keystep_background', 'M': 'M'}), '(keystep_pred=arr_ks_pred, keystep_gt=arr_ks_label,\n n_keystep=n_keystep_background, M=M)\n', (8476, 8568), False, 'from core.frame_base_measurement import compute_align_MoF_UoI, compute_align_MoF_UoI_bg, compute_align_MoF_UoI_no_align, compute_align_MoF_UoI_bg_no_align\n'), ((8652, 8768), 'core.frame_base_measurement.compute_align_MoF_UoI_bg', 'compute_align_MoF_UoI_bg', ([], {'keystep_pred': 'arr_ks_pred', 'keystep_gt': 'arr_ks_label', 'n_keystep': 'n_keystep_background', 'M': 'M'}), '(keystep_pred=arr_ks_pred, keystep_gt=arr_ks_label,\n n_keystep=n_keystep_background, M=M)\n', (8676, 8768), False, 'from core.frame_base_measurement import compute_align_MoF_UoI, compute_align_MoF_UoI_bg, compute_align_MoF_UoI_no_align, compute_align_MoF_UoI_bg_no_align\n'), ((9452, 9484), 'torch.cat', 'torch.cat', (['all_cat_labels'], {'dim': '(0)'}), '(all_cat_labels, dim=0)\n', (9461, 9484), False, 'import torch\n'), ((9506, 9537), 'torch.cat', 'torch.cat', (['all_cat_preds'], {'dim': '(0)'}), '(all_cat_preds, dim=0)\n', (9515, 9537), False, 'import torch\n'), ((526, 541), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (539, 541), False, 'import torch\n'), ((1512, 1549), 'numpy.unique', 'np.unique', (['subsampled_segment_list[b]'], {}), '(subsampled_segment_list[b])\n', (1521, 1549), True, 'import numpy as np\n'), ((5781, 5796), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5794, 5796), False, 'import torch\n'), ((8344, 8376), 'torch.cat', 'torch.cat', (['list_ks_pseudo'], {'dim': '(1)'}), '(list_ks_pseudo, dim=1)\n', (8353, 8376), False, 'import torch\n'), ((8924, 9048), 'core.frame_base_measurement.compute_align_MoF_UoI', 'compute_align_MoF_UoI', ([], {'keystep_pred': 'arr_ks_pseudo', 'keystep_gt': 'arr_ks_label', 'n_keystep': 'n_keystep_background', 'M': 'ss_model.M'}), '(keystep_pred=arr_ks_pseudo, keystep_gt=arr_ks_label,\n n_keystep=n_keystep_background, M=ss_model.M)\n', (8945, 9048), False, 'from core.frame_base_measurement import compute_align_MoF_UoI, compute_align_MoF_UoI_bg, compute_align_MoF_UoI_no_align, compute_align_MoF_UoI_bg_no_align\n'), ((9148, 9276), 'core.frame_base_measurement.compute_align_MoF_UoI_bg', 'compute_align_MoF_UoI_bg', ([], {'keystep_pred': 'arr_ks_pseudo', 'keystep_gt': 'arr_ks_label', 'n_keystep': 'n_keystep_background', 'M': 'ss_model.M'}), '(keystep_pred=arr_ks_pseudo, keystep_gt=\n arr_ks_label, n_keystep=n_keystep_background, M=ss_model.M)\n', (9172, 9276), False, 'from core.frame_base_measurement import compute_align_MoF_UoI, compute_align_MoF_UoI_bg, compute_align_MoF_UoI_no_align, compute_align_MoF_UoI_bg_no_align\n'), ((10036, 10050), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10048, 10050), True, 'import pandas as pd\n'), ((10460, 10480), 'numpy.max', 'np.max', (['self.df[col]'], {}), '(self.df[col])\n', (10466, 10480), True, 'import numpy as np\n'), ((1984, 2034), 'torch.unique', 'torch.unique', (['segment_keysteps'], {'return_counts': '(True)'}), '(segment_keysteps, return_counts=True)\n', (1996, 2034), False, 'import torch\n'), ((4339, 4372), 'torch.clamp', 'torch.clamp', (['keystep_labels', '(0)', '(1)'], {}), '(keystep_labels, 0, 1)\n', (4350, 4372), False, 'import torch\n'), ((5343, 5400), 'numpy.sum', 'np.sum', (['(predicted_label[is_class] == test_label[is_class])'], {}), '(predicted_label[is_class] == test_label[is_class])\n', (5349, 5400), True, 'import numpy as np\n'), ((5399, 5415), 'numpy.sum', 'np.sum', (['is_class'], {}), '(is_class)\n', (5405, 5415), True, 'import numpy as np\n'), ((6644, 6692), 'torch.transpose', 'torch.transpose', (['flatten_feature'], {'dim0': '(2)', 'dim1': '(3)'}), '(flatten_feature, dim0=2, dim1=3)\n', (6659, 6692), False, 'import torch\n'), ((7746, 7775), 'torch.argmax', 'torch.argmax', (['keysteps'], {'dim': '(1)'}), '(keysteps, dim=1)\n', (7758, 7775), False, 'import torch\n'), ((10202, 10243), 'pandas.DataFrame', 'pd.DataFrame', (['[values]'], {'columns': 'self.cols'}), '([values], columns=self.cols)\n', (10214, 10243), True, 'import pandas as pd\n'), ((10552, 10572), 'numpy.max', 'np.max', (['self.df[col]'], {}), '(self.df[col])\n', (10558, 10572), True, 'import numpy as np\n'), ((598, 611), 'torch.norm', 'torch.norm', (['p'], {}), '(p)\n', (608, 611), False, 'import torch\n')] |
""" For use in dumping single frame ground truths of Apollo training Dataset
Adapted from https://github.com/ClementPinard/SfmLearner-Pytorch/blob/0caec9ed0f83cb65ba20678a805e501439d2bc25/data/kitti_raw_loader.py
Authors:
<NAME>, <EMAIL>, 2020
<NAME>, <EMAIL>, 2019
Date:
2020/07/15
"""
from __future__ import division
import numpy as np
from pathlib import Path
from tqdm import tqdm
# import scipy.misc
from collections import Counter
from pebble import ProcessPool
import multiprocessing as mp
ratio_CPU = 0.8
default_number_of_process = int(ratio_CPU * mp.cpu_count())
import os, sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(ROOT_DIR)
import traceback
import coloredlogs, logging
logging.basicConfig()
logger = logging.getLogger()
coloredlogs.install(level="INFO", logger=logger)
import cv2
from dump_tools.utils_kitti import (
scale_P,
)
# import dsac_tools.utils_misc as utils_misc
# from dsac_tools.utils_misc import crop_or_pad_choice
# from utils_good import *
from glob import glob
# from utils_kitti import load_as_float, load_as_array, load_sift, load_SP
import yaml
# DEEPSFM_PATH = "/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils/deepSfm"
# sys.path.append(DEEPSFM_PATH)
import torch
# from kitti_odo_loader import KittiOdoLoader
from kitti_seq_loader import kitti_seq_loader
# from kitti_odo_loader import (
# dump_sift_match_idx,
# get_sift_match_idx_pair,
# dump_SP_match_idx,
# get_SP_match_idx_pair,
# read_odo_calib_file,
# )
## apollo specific
from apollo.eval_pose import eval_pose
# from apollo_seq_loader import apollo_seq_loader
class apollo_train_loader(kitti_seq_loader):
def __init__(
self,
dataset_dir,
img_height=2710,
img_width=3384,
cam_ids=["5"], # no usage in TUM
get_X=False,
get_pose=False,
get_sift=False,
get_SP=False,
sift_num=2000,
if_BF_matcher=False,
save_npy=True,
delta_ijs=[1]
):
# original size: (H2710, W3384)
self.dataset_dir = Path(dataset_dir)
self.img_height = img_height
self.img_width = img_width
self.cam_ids = cam_ids[0] # ["1"] # no use in TUM
logging.info(f"cam_id: {cam_ids}")
assert self.cam_ids in ["5"], "Support left camera only!"
self.cid_to_num = {"1": 1, "2": 2, "5": 5, "6": 6}
## testing set maps to val.txt
self.split_mapping = {"train": "train", "test": "val"}
self.debug = False
if self.debug:
coloredlogs.install(level="DEBUG", logger=logger) # original info
flat_list = lambda x: [item for sublist in x for item in sublist]
if self.debug: # you can edit the split txt
## small dataset for debuggin
split_folder = "split_small"
self.train_seqs = ["Road11"] # the folders after the dataset_dir
self.test_seqs = ["Road11"]
else:
split_folder = "split"
## dataset names
# self.train_seqs = ["Road16"] # the folders after the dataset_dir
# self.test_seqs = ["Road16"]
self.train_seqs = ["Road11"] # the folders after the dataset_dir
self.test_seqs = ["Road11"]
## prepare training seqs
self.train_rel_records = [
np.genfromtxt(
f"{dataset_dir}/{seq}/{split_folder}/{self.split_mapping['train']}.txt",
dtype="str",
)
for seq in self.train_seqs
]
print(f"self.train_rel_records: {self.train_rel_records}")
self.train_rel_records = flat_list(self.train_rel_records)
## prepare testing seqs
self.test_rel_records = [
np.genfromtxt(
f"{dataset_dir}/{seq}/{split_folder}/{self.split_mapping['test']}.txt",
dtype="str",
)
for seq in self.test_seqs
]
self.test_rel_records = flat_list(self.test_rel_records)
logging.info(f"train_seqs: {self.train_seqs}, test_seqs: {self.test_seqs}, train_rel_records: {self.train_rel_records}, test_rel_records: {self.test_rel_records}")
self.get_X = get_X
self.get_pose = get_pose
self.get_sift = get_sift
self.get_SP = get_SP
self.save_npy = save_npy
self.delta_ijs = delta_ijs
if self.save_npy:
logging.info("+++ Dumping as npy")
else:
logging.info("+++ Dumping as h5")
if self.get_sift:
self.sift_num = sift_num
self.if_BF_matcher = if_BF_matcher
self.sift = cv2.xfeatures2d.SIFT_create(
nfeatures=self.sift_num, contrastThreshold=1e-5
)
self.scenes = {
"train": [],
"test": [],
"train_rel_records": self.train_rel_records,
"test_rel_records": self.test_rel_records,
}
if self.get_SP:
self.prapare_SP()
# no need two functions
self.collect_train_folders()
self.collect_test_folders()
@staticmethod
def filter_list(list, select_word=""):
return [l for l in list if select_word in l]
def read_images_files_from_folder(
self, drive_path, seq_folders, file="train.txt", cam_id=1
):
"""
seq_folders: list of relative paths from drive_path to the image folders
"""
flat_list = lambda x: [item for sublist in x for item in sublist]
print(f"drive_path: {drive_path}")
## given that we have matched time stamps
# arr = np.genfromtxt(f'{drive_path}/{file}',dtype='str') # [N, 1(path)]
img_folders = np.char.add(str(drive_path) + "/image/", seq_folders)
img_folders = np.char.add(img_folders, f"/Camera {cam_id}")
logging.info(f"img_folders: {img_folders}")
img_files = [glob(f"{folder}/*.jpg") for folder in img_folders]
img_files = flat_list(img_files)
img_files = sorted(img_files)
## no time stamps
print(f"img_files: {img_files[0]}")
return img_files
def collect_train_folders(self):
for seq in self.train_seqs:
seq_dir = os.path.join(self.dataset_dir, seq)
self.scenes["train"].append(seq_dir)
def collect_test_folders(self):
for seq in self.test_seqs:
seq_dir = os.path.join(self.dataset_dir, seq)
self.scenes["test"].append(seq_dir)
def load_image(self, scene_data, tgt_idx, show_zoom_info=True):
# use different image filename
img_file = Path(scene_data["img_files"][tgt_idx])
if not img_file.is_file():
logging.warning("Image %s not found!" % img_file)
return None, None, None
img_ori = cv2.imread(str(img_file))
img_ori = cv2.cvtColor(img_ori, cv2.COLOR_BGR2RGB)
if [self.img_height, self.img_width] == [img_ori.shape[0], img_ori.shape[1]]:
return img_ori, (1.0, 1.0), img_ori
else:
zoom_y = self.img_height / img_ori.shape[0]
zoom_x = self.img_width / img_ori.shape[1]
if show_zoom_info:
logging.warning(
"[%s] Zooming the image (H%d, W%d) with zoom_yH=%f, zoom_xW=%f to (H%d, W%d)."
% (
img_file,
img_ori.shape[0],
img_ori.shape[1],
zoom_y,
zoom_x,
self.img_height,
self.img_width,
)
)
# img = scipy.misc.imresize(img_ori, (self.img_height, self.img_width))
img = cv2.resize(img_ori, (self.img_width, self.img_height))
return img, (zoom_x, zoom_y), img_ori
def get_calib_file_from_folder(self, foldername):
""" get camera intrinsics file
"""
for i in self.cam_ids:
if i == 1 or i == 2:
calib_file = f"{foldername}/camera_params/Camera_{i}.cam"
else:
calib_file = f"{foldername}/camera_params/Camera\ {i}.cam"
return calib_file
@staticmethod
def get_pose_to_dict(pose_path, cam_id=1):
""" get ground truth camera poses
"""
print(f"pose_path: {pose_path}")
eval_agent = eval_pose({})
if cam_id == 1 or cam_id == 2:
pose_files = glob(f"{pose_path}/**/Camera_{cam_id}.txt")
else:
pose_files = glob(f"{pose_path}/**/Camera {cam_id}.txt")
logging.debug(f"pose: {pose_files[0]}")
# calib_data = []
pose_dict = {}
for i, f in enumerate(pose_files):
print(f"file: {f}")
data = eval_agent.load_pose_file(f, sep=" ")
# print(f"data: {data}")
pose_dict.update(data)
return pose_dict
@staticmethod
def get_pose_from_pose_dict(pose_dict, img_files):
from apollo.utils import euler_angles_to_rotation_matrix
"""
input:
pose: list of poses(np) [[[roll,pitch,yaw,x,y,z]], ...]
"""
# poses = [pose_dict[Path(f).name] for f in img_files]
poses = []
for f in img_files:
pose = pose_dict[Path(f).name].flatten()
# print(f"pose: {pose}")
rot = euler_angles_to_rotation_matrix(pose[:3])
trans = pose[3:6]
pose_mat = np.concatenate((rot, trans.reshape(-1, 1)), axis=1)
poses.append(pose_mat.flatten())
return np.array(poses)
def collect_scene_from_drive(self, drive_path, split="train", skip_dumping=False):
# adapt for Euroc dataset
train_scenes = []
split_mapping = self.split_mapping
# split_mapping = {'train': 'train', 'test': 'val'}
logging.info(f"Gathering {split} for {drive_path} ...")
for c in self.cam_ids:
scene_data = {
"cid": c,
"cid_num": self.cid_to_num[c],
"dir": Path(drive_path),
"rel_path": f"{split}/{Path(drive_path).name}_{c}",
}
# img_dir = os.path.join(drive_path, 'image_%d'%scene_data['cid_num'])
# scene_data['img_files'] = sorted(glob(img_dir + '/*.png'))
split_folder = "trainval_split" if self.debug else "split"
scene_data["img_files"] = self.read_images_files_from_folder(
self.scenes[f"{split}"][0],
self.scenes[f"{split}_rel_records"],
file=f"",
cam_id=c,
)
# scene_data["depth_files"] = self.read_images_files_from_folder(
# drive_path, scene_data, folder="depth"
# )
scene_data["N_frames"] = len(scene_data["img_files"])
assert scene_data["N_frames"] != 0, "No file found for %s!" % drive_path
scene_data["frame_ids"] = [
"{:06d}".format(i) for i in range(scene_data["N_frames"])
]
## Get gt poses
pose_dict = self.get_pose_to_dict(
f"{str(drive_path)}/pose/{Path(self.scenes[f'{split}_rel_records'][0]).parent.name}",
cam_id=c,
)
poses = self.get_pose_from_pose_dict(pose_dict, scene_data["img_files"])
assert scene_data["N_frames"] == poses.shape[0], (
"scene_data[N_frames]!=poses.shape[0], %d!=%d"
% (scene_data["N_frames"], poses.shape[0])
)
logging.info(f"N_frames: {scene_data['N_frames']}, n_poses: {poses.shape[0]}")
scene_data["poses"] = poses
## read images
img_shape = None
zoom_xy = None
show_zoom_info = True
if not skip_dumping:
for idx in tqdm(range(scene_data["N_frames"])):
img, zoom_xy, img_ori = self.load_image(scene_data, idx, show_zoom_info)
# print(f"zoom_xy: {zoom_xy}")
if idx % 100 == 0:
logging.info(
f"img: {img.shape}, img_ori: {img_ori.shape}, zoom_xy: {zoom_xy}"
)
show_zoom_info = False
if img is None and idx == 0:
logging.warning("0 images in %s. Skipped." % drive_path)
return []
else:
if img_shape is not None:
assert img_shape == img.shape, (
"Inconsistent image shape in seq %s!" % drive_path
)
else:
img_shape = img.shape
else:
logging.warning(f"skip dumping images!!")
img_shape = [1,1,3]
img_ori = np.zeros((1,1,3)) ## dummy image
zoom_xy = [1,1]
logging.debug(f"img_shape: {img_shape}")
scene_data["calibs"] = {
"im_shape": [img_shape[0], img_shape[1]],
"zoom_xy": zoom_xy,
"rescale": True if zoom_xy != (1.0, 1.0) else False,
}
# Get geo params from the RAW dataset calibs
if c == "1" or c == "2": # for kitti
calib_file = os.path.join(self.get_calib_file_from_folder(drive_path))
logging.info(f"calibration file: {calib_file}")
P_rect_noScale = self.get_cam_cali(calib_file)
elif c == "5" or c == "6": # for apollo
from apollo.data import ApolloScape
from apollo.utils import intrinsic_vec_to_mat
apo_data = ApolloScape()
intr_vect = apo_data.get_intrinsic(
image_name=False, camera_name=f"Camera_{c}"
)
K = intrinsic_vec_to_mat(intr_vect, img_ori.shape)
P_rect_noScale = np.concatenate((K, [[0], [0], [0]]), axis=1)
logging.info(f"P_rect_noScale: {P_rect_noScale}")
P_rect_noScale, P_rect_scale = self.get_P_rect(
P_rect_noScale, scene_data["calibs"]
)
P_rect_ori_dict = {c: P_rect_scale}
intrinsics = P_rect_ori_dict[c][:, :3]
logging.debug(f"intrinsics: {intrinsics}")
# calibs_rects = self.get_rect_cams(intrinsics, P_rect_ori_dict[c])
calibs_rects = {"Rtl_gt": np.eye(4)} # only one camera, no extrinsics
## dummy matrices
cam_2rect_mat = np.eye(4) # extrinsics for cam2
velo2cam_mat = np.eye(4)
cam2body_mat = np.eye(3)
scene_data["calibs"].update(
{
"K": intrinsics,
"P_rect_ori_dict": P_rect_ori_dict,
"P_rect_noScale": P_rect_noScale, # add for read and process 3d points
"cam_2rect": cam_2rect_mat,
"velo2cam": velo2cam_mat,
"cam2body_mat": cam2body_mat,
}
)
scene_data["calibs"].update(calibs_rects)
# extrinsic matrix for cameraN to this camera
scene_data["Rt_cam2_gt"] = scene_data["calibs"]["Rtl_gt"]
logging.debug(f'scene_data["Rt_cam2_gt"]: {scene_data["Rt_cam2_gt"]}')
train_scenes.append(scene_data)
return train_scenes
def get_cam_cali(self, calib_file):
""" get calibration matrix
"""
calib_data = np.genfromtxt(calib_file, delimiter="=", comments="[", dtype=str)
fu, fv, cu, cv = (
float(calib_data[3, 1]),
float(calib_data[4, 1]),
float(calib_data[5, 1]),
float(calib_data[6, 1]),
)
K = np.array([[fu, 0, cu], [0, fv, cv], [0, 0, 1]])
P_rect_ori = np.concatenate((K, [[0], [0], [0]]), axis=1)
return P_rect_ori
def get_P_rect(self, P_rect_ori, calibs):
""" rescale the camera calibration matrix
"""
# rescale the camera matrix
if calibs["rescale"]:
P_rect_scale = scale_P(
P_rect_ori, calibs["zoom_xy"][0], calibs["zoom_xy"][1]
)
else:
P_rect_scale = P_rect_ori
return P_rect_ori, P_rect_scale
@staticmethod
def load_velo(scene_data, tgt_idx, calib_K=None):
"""
create point clouds from depth image, return array of points
return:
np [N, 3] (3d points)
"""
depth_file = scene_data["depth_files"][tgt_idx]
color_file = scene_data["img_files"][tgt_idx]
def get_point_cloud_from_images(color_file, depth_file, calib_K=None):
from PIL import Image
depth = Image.open(depth_file)
rgb = Image.open(color_file)
points = []
## parameters
if calib_K is None:
focalLength = 525.0
centerX = 319.5
centerY = 239.5
else:
focalLength = (calib_K[0, 0] + calib_K[1, 1]) / 2
centerX = calib_K[0, 2]
centerY = calib_K[1, 2]
logging.debug(
f"get calibration matrix for retrieving points: focalLength = {focalLength}, centerX = {centerX}, centerY = {centerY}"
)
scalingFactor = 5000.0
for v in range(rgb.size[1]):
for u in range(rgb.size[0]):
color = rgb.getpixel((u, v))
Z = depth.getpixel((u, v)) / scalingFactor
if Z == 0:
continue
X = (u - centerX) * Z / focalLength
Y = (v - centerY) * Z / focalLength
# points.append("%f %f %f %d %d %d 0\n"%(X,Y,Z,color[0],color[1],color[2]))
points.append([X, Y, Z])
logging.debug(f"points: {points[:3]}")
return np.array(points)
pass
###
if Path(color_file).is_file() is False or Path(depth_file).is_file() is False:
logging.warning(
f"color file {color_file} or depth file {depth_file} not found!"
)
return None
xyz_points = get_point_cloud_from_images(
color_file, depth_file, calib_K=calib_K
)
# xyz_points = np.ones((10,3)) ######!!!
logging.debug(f"xyz: {xyz_points[0]}, {xyz_points.shape}")
return xyz_points
def loadConfig(filename):
import yaml
with open(filename, "r") as f:
config = yaml.load(f)
return config
if __name__ == "__main__":
from apollo_train_loader import apollo_train_loader as seq_loader
# test pose
# from apollo_train_loader import get_pose_to_dict
dataset_dir = "/newfoundland/yyjau/apollo/train_seq_1/"
data_loader = seq_loader(dataset_dir)
pose_path = "/newfoundland/yyjau/apollo/train_seq_1/Road11/pose/GZ20180310B"
pose_dict = data_loader.get_pose_to_dict(pose_path, cam_id=5)
pass
| [
"yaml.load",
"pathlib.Path",
"glob.glob",
"os.path.join",
"multiprocessing.cpu_count",
"sys.path.append",
"os.path.abspath",
"cv2.cvtColor",
"logging.warning",
"os.path.dirname",
"numpy.genfromtxt",
"cv2.resize",
"dump_tools.utils_kitti.scale_P",
"numpy.char.add",
"apollo.eval_pose.eval_... | [((671, 696), 'os.path.dirname', 'os.path.dirname', (['BASE_DIR'], {}), '(BASE_DIR)\n', (686, 696), False, 'import os, sys\n'), ((697, 722), 'sys.path.append', 'sys.path.append', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (712, 722), False, 'import os, sys\n'), ((770, 791), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (789, 791), False, 'import coloredlogs, logging\n'), ((801, 820), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (818, 820), False, 'import coloredlogs, logging\n'), ((821, 869), 'coloredlogs.install', 'coloredlogs.install', ([], {'level': '"""INFO"""', 'logger': 'logger'}), "(level='INFO', logger=logger)\n", (840, 869), False, 'import coloredlogs, logging\n'), ((633, 658), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (648, 658), False, 'import os, sys\n'), ((19113, 19136), 'apollo_train_loader.apollo_train_loader', 'seq_loader', (['dataset_dir'], {}), '(dataset_dir)\n', (19123, 19136), True, 'from apollo_train_loader import apollo_train_loader as seq_loader\n'), ((573, 587), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (585, 587), True, 'import multiprocessing as mp\n'), ((2135, 2152), 'pathlib.Path', 'Path', (['dataset_dir'], {}), '(dataset_dir)\n', (2139, 2152), False, 'from pathlib import Path\n'), ((2293, 2327), 'logging.info', 'logging.info', (['f"""cam_id: {cam_ids}"""'], {}), "(f'cam_id: {cam_ids}')\n", (2305, 2327), False, 'import coloredlogs, logging\n'), ((4089, 4262), 'logging.info', 'logging.info', (['f"""train_seqs: {self.train_seqs}, test_seqs: {self.test_seqs}, train_rel_records: {self.train_rel_records}, test_rel_records: {self.test_rel_records}"""'], {}), "(\n f'train_seqs: {self.train_seqs}, test_seqs: {self.test_seqs}, train_rel_records: {self.train_rel_records}, test_rel_records: {self.test_rel_records}'\n )\n", (4101, 4262), False, 'import coloredlogs, logging\n'), ((5853, 5898), 'numpy.char.add', 'np.char.add', (['img_folders', 'f"""/Camera {cam_id}"""'], {}), "(img_folders, f'/Camera {cam_id}')\n", (5864, 5898), True, 'import numpy as np\n'), ((5907, 5950), 'logging.info', 'logging.info', (['f"""img_folders: {img_folders}"""'], {}), "(f'img_folders: {img_folders}')\n", (5919, 5950), False, 'import coloredlogs, logging\n'), ((6684, 6722), 'pathlib.Path', 'Path', (["scene_data['img_files'][tgt_idx]"], {}), "(scene_data['img_files'][tgt_idx])\n", (6688, 6722), False, 'from pathlib import Path\n'), ((6918, 6958), 'cv2.cvtColor', 'cv2.cvtColor', (['img_ori', 'cv2.COLOR_BGR2RGB'], {}), '(img_ori, cv2.COLOR_BGR2RGB)\n', (6930, 6958), False, 'import cv2\n'), ((8460, 8473), 'apollo.eval_pose.eval_pose', 'eval_pose', (['{}'], {}), '({})\n', (8469, 8473), False, 'from apollo.eval_pose import eval_pose\n'), ((8673, 8712), 'logging.debug', 'logging.debug', (['f"""pose: {pose_files[0]}"""'], {}), "(f'pose: {pose_files[0]}')\n", (8686, 8712), False, 'import coloredlogs, logging\n'), ((9664, 9679), 'numpy.array', 'np.array', (['poses'], {}), '(poses)\n', (9672, 9679), True, 'import numpy as np\n'), ((9939, 9994), 'logging.info', 'logging.info', (['f"""Gathering {split} for {drive_path} ..."""'], {}), "(f'Gathering {split} for {drive_path} ...')\n", (9951, 9994), False, 'import coloredlogs, logging\n'), ((15707, 15772), 'numpy.genfromtxt', 'np.genfromtxt', (['calib_file'], {'delimiter': '"""="""', 'comments': '"""["""', 'dtype': 'str'}), "(calib_file, delimiter='=', comments='[', dtype=str)\n", (15720, 15772), True, 'import numpy as np\n'), ((15970, 16017), 'numpy.array', 'np.array', (['[[fu, 0, cu], [0, fv, cv], [0, 0, 1]]'], {}), '([[fu, 0, cu], [0, fv, cv], [0, 0, 1]])\n', (15978, 16017), True, 'import numpy as np\n'), ((16040, 16084), 'numpy.concatenate', 'np.concatenate', (['(K, [[0], [0], [0]])'], {'axis': '(1)'}), '((K, [[0], [0], [0]]), axis=1)\n', (16054, 16084), True, 'import numpy as np\n'), ((18649, 18707), 'logging.debug', 'logging.debug', (['f"""xyz: {xyz_points[0]}, {xyz_points.shape}"""'], {}), "(f'xyz: {xyz_points[0]}, {xyz_points.shape}')\n", (18662, 18707), False, 'import coloredlogs, logging\n'), ((18832, 18844), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (18841, 18844), False, 'import yaml\n'), ((2619, 2668), 'coloredlogs.install', 'coloredlogs.install', ([], {'level': '"""DEBUG"""', 'logger': 'logger'}), "(level='DEBUG', logger=logger)\n", (2638, 2668), False, 'import coloredlogs, logging\n'), ((3414, 3522), 'numpy.genfromtxt', 'np.genfromtxt', (['f"""{dataset_dir}/{seq}/{split_folder}/{self.split_mapping[\'train\']}.txt"""'], {'dtype': '"""str"""'}), '(\n f"{dataset_dir}/{seq}/{split_folder}/{self.split_mapping[\'train\']}.txt",\n dtype=\'str\')\n', (3427, 3522), True, 'import numpy as np\n'), ((3822, 3929), 'numpy.genfromtxt', 'np.genfromtxt', (['f"""{dataset_dir}/{seq}/{split_folder}/{self.split_mapping[\'test\']}.txt"""'], {'dtype': '"""str"""'}), '(\n f"{dataset_dir}/{seq}/{split_folder}/{self.split_mapping[\'test\']}.txt",\n dtype=\'str\')\n', (3835, 3929), True, 'import numpy as np\n'), ((4482, 4516), 'logging.info', 'logging.info', (['"""+++ Dumping as npy"""'], {}), "('+++ Dumping as npy')\n", (4494, 4516), False, 'import coloredlogs, logging\n'), ((4543, 4576), 'logging.info', 'logging.info', (['"""+++ Dumping as h5"""'], {}), "('+++ Dumping as h5')\n", (4555, 4576), False, 'import coloredlogs, logging\n'), ((4711, 4788), 'cv2.xfeatures2d.SIFT_create', 'cv2.xfeatures2d.SIFT_create', ([], {'nfeatures': 'self.sift_num', 'contrastThreshold': '(1e-05)'}), '(nfeatures=self.sift_num, contrastThreshold=1e-05)\n', (4738, 4788), False, 'import cv2\n'), ((5972, 5995), 'glob.glob', 'glob', (['f"""{folder}/*.jpg"""'], {}), "(f'{folder}/*.jpg')\n", (5976, 5995), False, 'from glob import glob\n'), ((6294, 6329), 'os.path.join', 'os.path.join', (['self.dataset_dir', 'seq'], {}), '(self.dataset_dir, seq)\n', (6306, 6329), False, 'import os, sys\n'), ((6473, 6508), 'os.path.join', 'os.path.join', (['self.dataset_dir', 'seq'], {}), '(self.dataset_dir, seq)\n', (6485, 6508), False, 'import os, sys\n'), ((6770, 6819), 'logging.warning', 'logging.warning', (["('Image %s not found!' % img_file)"], {}), "('Image %s not found!' % img_file)\n", (6785, 6819), False, 'import coloredlogs, logging\n'), ((7810, 7864), 'cv2.resize', 'cv2.resize', (['img_ori', '(self.img_width, self.img_height)'], {}), '(img_ori, (self.img_width, self.img_height))\n', (7820, 7864), False, 'import cv2\n'), ((8538, 8581), 'glob.glob', 'glob', (['f"""{pose_path}/**/Camera_{cam_id}.txt"""'], {}), "(f'{pose_path}/**/Camera_{cam_id}.txt')\n", (8542, 8581), False, 'from glob import glob\n'), ((8621, 8664), 'glob.glob', 'glob', (['f"""{pose_path}/**/Camera {cam_id}.txt"""'], {}), "(f'{pose_path}/**/Camera {cam_id}.txt')\n", (8625, 8664), False, 'from glob import glob\n'), ((9457, 9498), 'apollo.utils.euler_angles_to_rotation_matrix', 'euler_angles_to_rotation_matrix', (['pose[:3]'], {}), '(pose[:3])\n', (9488, 9498), False, 'from apollo.utils import euler_angles_to_rotation_matrix\n'), ((11657, 11735), 'logging.info', 'logging.info', (['f"""N_frames: {scene_data[\'N_frames\']}, n_poses: {poses.shape[0]}"""'], {}), '(f"N_frames: {scene_data[\'N_frames\']}, n_poses: {poses.shape[0]}")\n', (11669, 11735), False, 'import coloredlogs, logging\n'), ((13102, 13142), 'logging.debug', 'logging.debug', (['f"""img_shape: {img_shape}"""'], {}), "(f'img_shape: {img_shape}')\n", (13115, 13142), False, 'import coloredlogs, logging\n'), ((14178, 14227), 'logging.info', 'logging.info', (['f"""P_rect_noScale: {P_rect_noScale}"""'], {}), "(f'P_rect_noScale: {P_rect_noScale}')\n", (14190, 14227), False, 'import coloredlogs, logging\n'), ((14467, 14509), 'logging.debug', 'logging.debug', (['f"""intrinsics: {intrinsics}"""'], {}), "(f'intrinsics: {intrinsics}')\n", (14480, 14509), False, 'import coloredlogs, logging\n'), ((14731, 14740), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (14737, 14740), True, 'import numpy as np\n'), ((14791, 14800), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (14797, 14800), True, 'import numpy as np\n'), ((14828, 14837), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (14834, 14837), True, 'import numpy as np\n'), ((15454, 15526), 'logging.debug', 'logging.debug', (['f"""scene_data["Rt_cam2_gt"]: {scene_data[\'Rt_cam2_gt\']}"""'], {}), '(f\'scene_data["Rt_cam2_gt"]: {scene_data[\\\'Rt_cam2_gt\\\']}\')\n', (15467, 15526), False, 'import coloredlogs, logging\n'), ((16314, 16377), 'dump_tools.utils_kitti.scale_P', 'scale_P', (['P_rect_ori', "calibs['zoom_xy'][0]", "calibs['zoom_xy'][1]"], {}), "(P_rect_ori, calibs['zoom_xy'][0], calibs['zoom_xy'][1])\n", (16321, 16377), False, 'from dump_tools.utils_kitti import scale_P\n'), ((16963, 16985), 'PIL.Image.open', 'Image.open', (['depth_file'], {}), '(depth_file)\n', (16973, 16985), False, 'from PIL import Image\n'), ((17004, 17026), 'PIL.Image.open', 'Image.open', (['color_file'], {}), '(color_file)\n', (17014, 17026), False, 'from PIL import Image\n'), ((18139, 18177), 'logging.debug', 'logging.debug', (['f"""points: {points[:3]}"""'], {}), "(f'points: {points[:3]}')\n", (18152, 18177), False, 'import coloredlogs, logging\n'), ((18197, 18213), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (18205, 18213), True, 'import numpy as np\n'), ((18343, 18429), 'logging.warning', 'logging.warning', (['f"""color file {color_file} or depth file {depth_file} not found!"""'], {}), "(\n f'color file {color_file} or depth file {depth_file} not found!')\n", (18358, 18429), False, 'import coloredlogs, logging\n'), ((7265, 7473), 'logging.warning', 'logging.warning', (["('[%s] Zooming the image (H%d, W%d) with zoom_yH=%f, zoom_xW=%f to (H%d, W%d).'\n % (img_file, img_ori.shape[0], img_ori.shape[1], zoom_y, zoom_x, self.\n img_height, self.img_width))"], {}), "(\n '[%s] Zooming the image (H%d, W%d) with zoom_yH=%f, zoom_xW=%f to (H%d, W%d).'\n % (img_file, img_ori.shape[0], img_ori.shape[1], zoom_y, zoom_x, self.\n img_height, self.img_width))\n", (7280, 7473), False, 'import coloredlogs, logging\n'), ((10149, 10165), 'pathlib.Path', 'Path', (['drive_path'], {}), '(drive_path)\n', (10153, 10165), False, 'from pathlib import Path\n'), ((12903, 12944), 'logging.warning', 'logging.warning', (['f"""skip dumping images!!"""'], {}), "(f'skip dumping images!!')\n", (12918, 12944), False, 'import coloredlogs, logging\n'), ((13007, 13026), 'numpy.zeros', 'np.zeros', (['(1, 1, 3)'], {}), '((1, 1, 3))\n', (13015, 13026), True, 'import numpy as np\n'), ((13567, 13614), 'logging.info', 'logging.info', (['f"""calibration file: {calib_file}"""'], {}), "(f'calibration file: {calib_file}')\n", (13579, 13614), False, 'import coloredlogs, logging\n'), ((14628, 14637), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (14634, 14637), True, 'import numpy as np\n'), ((17390, 17533), 'logging.debug', 'logging.debug', (['f"""get calibration matrix for retrieving points: focalLength = {focalLength}, centerX = {centerX}, centerY = {centerY}"""'], {}), "(\n f'get calibration matrix for retrieving points: focalLength = {focalLength}, centerX = {centerX}, centerY = {centerY}'\n )\n", (17403, 17533), False, 'import coloredlogs, logging\n'), ((13872, 13885), 'apollo.data.ApolloScape', 'ApolloScape', ([], {}), '()\n', (13883, 13885), False, 'from apollo.data import ApolloScape\n'), ((14040, 14086), 'apollo.utils.intrinsic_vec_to_mat', 'intrinsic_vec_to_mat', (['intr_vect', 'img_ori.shape'], {}), '(intr_vect, img_ori.shape)\n', (14060, 14086), False, 'from apollo.utils import intrinsic_vec_to_mat\n'), ((14120, 14164), 'numpy.concatenate', 'np.concatenate', (['(K, [[0], [0], [0]])'], {'axis': '(1)'}), '((K, [[0], [0], [0]]), axis=1)\n', (14134, 14164), True, 'import numpy as np\n'), ((12198, 12277), 'logging.info', 'logging.info', (['f"""img: {img.shape}, img_ori: {img_ori.shape}, zoom_xy: {zoom_xy}"""'], {}), "(f'img: {img.shape}, img_ori: {img_ori.shape}, zoom_xy: {zoom_xy}')\n", (12210, 12277), False, 'import coloredlogs, logging\n'), ((12448, 12504), 'logging.warning', 'logging.warning', (["('0 images in %s. Skipped.' % drive_path)"], {}), "('0 images in %s. Skipped.' % drive_path)\n", (12463, 12504), False, 'import coloredlogs, logging\n'), ((18255, 18271), 'pathlib.Path', 'Path', (['color_file'], {}), '(color_file)\n', (18259, 18271), False, 'from pathlib import Path\n'), ((18294, 18310), 'pathlib.Path', 'Path', (['depth_file'], {}), '(depth_file)\n', (18298, 18310), False, 'from pathlib import Path\n'), ((9378, 9385), 'pathlib.Path', 'Path', (['f'], {}), '(f)\n', (9382, 9385), False, 'from pathlib import Path\n'), ((10206, 10222), 'pathlib.Path', 'Path', (['drive_path'], {}), '(drive_path)\n', (10210, 10222), False, 'from pathlib import Path\n'), ((11261, 11305), 'pathlib.Path', 'Path', (["self.scenes[f'{split}_rel_records'][0]"], {}), "(self.scenes[f'{split}_rel_records'][0])\n", (11265, 11305), False, 'from pathlib import Path\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 22 07:09:33 2022
@author: owner
"""
# Most recent version: 23 May 2022 by <NAME>
# University of Colorado Boulder
# Advisors: <NAME> and <NAME>
# Major working functions for analysis of SDO/EVE 304 Angstrom light curves,
# SDO/AIA 1600 Angstrom images and ribbon masks, and SDO/HMI magnetograms.
# Includes algorithm for determination of separation and elongation of both
# ribbons relative to the polarity inversion line. Flares able to beanalyzed
# are contained in the RibbonDB database (Kazachenko et al. 2017). Prior to
# running this script, the user should obtain flare light curves and times
# corresponding to the modeled flares in this database, for which the
# impulsiveness index has been determined previously.
# The polarity inversion line is determined by convolving the HMI masks
# associated for each flare with a Gaussian of predetermined length, then
# finding the major region of intersection between these and using the
# resulting heatmap to fit a fourth-order polynomial. Details of separation
# and elongation methods relative to the PIL are included below.
# Reconnection rates and ribbon areas for both positive and negative ribbons
# are determined, and the values corresponding to the rise phase of the flare
# (with some flare-specific variation) are fit to an exponential model, in
# order to prepare for modeling efforts of flare heating particularly in the
# rise phase.
# Separation and elongation values (perpendicular and parallel PIL-relative
# motion, respectively) are used to find separation and elongation rates,
# through which significant periods of these two phases of ribbon motion can
# be identified.
# Plotting and data presentation routines are also below, which includes an
# animation showing the timing of separation, elongation, and chromospheric
# line light curves.
# Addition of shear quantification code, 29 April 2022
# Addition of four-paneled figure comparing HXR, AIA, EVE, and shear
# quantification values with coordinated timestamps included.
from os.path import dirname, join as pjoin
import scipy.io as sio
from scipy.io import readsav
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animat
import datetime
from scipy.spatial.distance import cdist
import scipy.signal
import matplotlib.dates as mdates
from astropy.convolution import convolve, Gaussian2DKernel
import time as timepkg
from matplotlib import font_manager
def conv_facts():
"""
Conversion factors for images.
Returns
-------
X : list
Meshgrid of x values for image coordinates.
Y : list
Meshgrid of y values for image coordinates.
conv_f : float
Conversion factor between pixels and megameters.
xarr_Mm : list
x-coordinates, in megameters.
yarr_Mm : list
y-coordinates, in megameters.
"""
pix_to_arcsec = 0.6 # asec/pix
arcsec_to_radians = 1 / 206265 # rad/asec
radians_to_Mm = 149598 # Mm/rad
conv_f = pix_to_arcsec * arcsec_to_radians * radians_to_Mm # Mm/pix
xarr_Mm = np.zeros(800)
yarr_Mm = np.zeros(800)
for i in range(800):
xarr_Mm[i] = (i-400)*conv_f
yarr_Mm[i] = (i-400)*conv_f
X, Y = np.meshgrid(xarr_Mm, yarr_Mm)
return X, Y, conv_f, xarr_Mm, yarr_Mm
def exponential(x, a, b):
"""
Defines exponential function.
Parameters
----------
x : float
Input x value for function.
a : float
Amplitude of exponential function.
b : float
Second parameter of exponential function.
Returns
-------
float
Output of exponential function.
"""
return a * np.exp(b * x)
def exponential_neg(x, a, b):
"""
Negative amplitude exponential function.
Parameters
----------
x : float
Input x value for function.
a : float
Amplitude of exponential function.
b : float
Second parameter of exponential function.
Returns
-------
float
Output of exponential function.
"""
return -a * np.exp(b * x)
def curve_length(curve):
"""
Sum of Euclidean distances between points
"""
return np.sum(np.sqrt(np.sum((curve[:-1] - curve[1:])**2, axis=1)))
def datenum_to_datetime(datenum):
"""
Convert Matlab datenum into Python datetime.
Parameters
----------
datenum : float
Datenum value.
Returns
-------
ret : datetime
Converted datetime value.
"""
days = datenum % 1
ret = datetime.datetime.fromordinal(int(datenum)) + \
datetime.timedelta(days=days) - datetime.timedelta(days=366)
return ret
def datenum(d):
"""
Convert from ordinal to datenum.
"""
return 366 + d.toordinal() + (d - datetime.datetime.
fromordinal(d.toordinal())).\
total_seconds()/(24*60*60)
def find_nearest(array, value):
"""
Find nearest value in array to a value.
Parameters
----------
array : list
Array of values to search through.
value : float
Value to find the nearest element in array closest to.
Returns
-------
float
Nearest value in array to "value"
"""
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx]
def format_time():
"""
Time formatter.
Returns
-------
string
Formating for times.
"""
t = datetime.datetime.now()
s = t.strftime('%Y-%m-%d %H:%M:%S.%f')
return s[:-3]
def find_nearest_ind(array, value):
"""
Find index of element in array closest to value.
Parameters
----------
array : list
Array of values to search through.
value : float
Value to find the nearest element in array closest to.
Returns
-------
idx: int
Index of nearest value in array to "value"
"""
array = np.asarray(array)
idx = np.nanargmin(np.abs(array - value))
return idx
def load_variables(bestflarefile, year, mo, day, sthr, stmin, arnum, xclnum,
xcl):
"""
Load variables from HMI and AIA files.
Parameters
----------
bestflarefile : string
Path to file containing information about the best-performing flares.
year : int
Year of event.
mo : int
Month of event.
day : int
Day of event.
sthr : int
Hour of event start
stmin : int
Minute of event start.
arnum : int
Active region number.
xclnum : int
X-ray class number.
xcl : str
X-ray class.
Returns
-------
sav_data_aia : AttrDict
Dictionary containing all of the saved parameters in the AIA file.
sav_data : AttrDict
Dictionary containing all of the saved parameters in the HMI file.
best304 : dict
Dictionary containing the SDO/EVE 304 Angstrom data of the
best-performing flares in ribbonDB.
start304 : list
Array containing the start times for the flares in best304.
peak304 : list
Array containing the peak times for the flares in best304.
end304 : list
Array containing the end times for the flares in best304.
eventindices : list
Indices of best flares in best304.
times304 : list
Time points for all flares in best304.
curves304 : list
Light curves for all flares in best304.
aia_cumul8 : list
Cumulative ribbon masks from AIA.
aia_step8 : list
Instantaneous ribbon masks from AIA
last_cumul8 : list
The last image in the cumulative mask array.
hmi_dat : list
HMI image prior to the flare, assumed to be the same configuration
throughout the flare.
last_mask : list
The last ribbon mask, multiplied by the HMI image for polarity.
"""
data_dir = pjoin(dirname(sio.__file__), 'tests', 'data')
# Load matlab file, get 304 light curves and start/peak/end times for flare
best304 = sio.loadmat(bestflarefile)
start304 = best304['starttimes_corr_more'][:, 0]
peak304 = best304['maxtimes_corr_more'][:, 0]
end304 = best304['endtimes_corr_more'][:, 0]
eventindices = best304['starttimes_corr_more'][:, 1]
times304 = best304['event_times_more']
curves304 = best304['event_curves_more']
sav_fname_aia = pjoin(data_dir, "/Users/owner/Desktop/Final_Selection/"
"AIA_Files/aia1600blos" + str(year).zfill(4) +
str(mo).zfill(2) + str(day).zfill(2) + "_" +
str(sthr).zfill(2) + str(stmin).zfill(2) + "_" +
str(arnum).zfill(5) + "_"+xcl + str(xclnum) + ".sav")
sav_data_aia = readsav(sav_fname_aia)
sav_fname = ("/Users/owner/Desktop/CU_Research/HMI_files/posfile" +
str(year).zfill(4) + str(mo).zfill(2) + str(day).zfill(2) +
"_" + str(sthr).zfill(2) + str(stmin).zfill(2) + "_" +
str(arnum).zfill(5) + "_"+xcl + str(xclnum) +
"_cut08_sat5000.00_brad.sav")
sav_data = readsav(sav_fname)
aia_cumul8 = sav_data.pos8
last_cumul8 = aia_cumul8[-1, :, :] # Last frame
hmi_dat = sav_data.hmi
last_mask = last_cumul8 * hmi_dat
aia_step8 = sav_data.inst_pos8
return sav_data_aia, sav_data, best304, start304, peak304, end304, \
eventindices, times304, curves304, aia_cumul8, aia_step8, \
last_cumul8, hmi_dat, last_mask
def pos_neg_masking(aia_cumul8, aia_step8, hmi_dat, last_mask):
"""
Masking of positive and negative ribbons according to HMI polarity.
Parameters
----------
aia_cumul8 : list
Cumulative ribbon masks.
aia_step8 : list
Instantaneous ribbon masks.
hmi_dat : list
HMI image prior to the flare, assumed to be the same configuration
throughout the flare.
last_mask : list
The last ribbon mask, multiplied by the HMI image for polarity.
Returns
-------
hmi_cumul_mask1 : list
Cumulative magnetic field strength masking estimates for all flare
images.
hmi_step_mask1 : list
Instantaneous magnetic field strength masking estimates for all flare
images.
hmi_pos_mask_c : list
Single-frame mask for negative HMI magnetic field, populated with 1.
hmi_neg_mask_c : list
Single-frame mask for negative HMI magnetic field, populated with -1.
"""
hmi_cumul_mask = np.zeros(np.shape(aia_cumul8))
hmi_cumul_mask1 = np.zeros(np.shape(aia_cumul8))
# Find HMI masks for all frames - cumulative
for i in range(len(aia_cumul8)):
frame = np.squeeze(aia_cumul8[i, :, :])
hmi_cumul_mask[i, :, :] = frame * hmi_dat
# Convert to positive and negative polarities
for i in range(len(hmi_cumul_mask)):
for j in range(len(hmi_cumul_mask[0])):
for k in range(len(hmi_cumul_mask[1])):
if hmi_cumul_mask[i, j, k] > 0:
hmi_cumul_mask1[i, j, k] = 1
elif hmi_cumul_mask[i, j, k] < 0:
hmi_cumul_mask1[i, j, k] = -1
else:
hmi_cumul_mask1[i, j, k] = 0
hmi_step_mask = np.zeros(np.shape(aia_step8))
hmi_step_mask1 = np.zeros(np.shape(aia_step8))
# Find HMI masks for all frames - instantaneous
for i in range(len(aia_step8)):
frame = np.squeeze(aia_step8[i, :, :])
hmi_step_mask[i, :, :] = frame * hmi_dat
# Convert to positive and negative polarities
for i in range(len(hmi_step_mask)):
for j in range(len(hmi_step_mask[0])):
for k in range(len(hmi_step_mask[1])):
if hmi_step_mask[i, j, k] > 0:
hmi_step_mask1[i, j, k] = 1
elif hmi_step_mask[i, j, k] < 0:
hmi_step_mask1[i, j, k] = -1
else:
hmi_step_mask1[i, j, k] = 0
# Single-frame masks for positive and negative ribbons
hmi_pos_mask_c = np.zeros(np.shape(hmi_dat))
hmi_neg_mask_c = np.zeros(np.shape(hmi_dat))
for i in range(len(hmi_dat)):
for j in range(len(hmi_dat[0])):
if last_mask[i, j] > 0:
hmi_pos_mask_c[i, j] = 1
hmi_neg_mask_c[i, j] = 0
elif last_mask[i, j] < 0:
hmi_pos_mask_c[i, j] = 0
hmi_neg_mask_c[i, j] = -1
else:
hmi_pos_mask_c[i, j] = 0
hmi_neg_mask_c[i, j] = 0
return hmi_cumul_mask1, hmi_step_mask1, hmi_pos_mask_c, hmi_neg_mask_c
def spur_removal_sep(hmi_neg_mask_c, hmi_pos_mask_c, pos_crit=3,
neg_crit=3, pt_range=[-2, -1, 1, 2], ihi=800, ilo=0,
jhi=800, jlo=0, ihi2=800, ilo2=0, jhi2=800,
jlo2=0, ihi3=800, jlo3=0):
"""
Spur removal in ribbon masks for the perpendicular motion identification.
Removes regions where both negative and positive pixels exist.
Parameters
----------
hmi_neg_mask_c : list
Single-frame mask for negative HMI magnetic field, populated with -1.
hmi_pos_mask_c : list
Single-frame mask for negative HMI magnetic field, populated with 1.
pos_crit : int, optional
Number of positive points surrounding a negative pixel for which the
negative pixel should be removed. The default is 3.
neg_crit : int, optional
Number of positive points surrounding a negative pixel for which the
negative pixel should be removed. The default is 3.
pt_range : list, optional
Pixels to search around each pixel for opposite polarity. The default
is [-2,-1,1,2].
ihi : int, optional
Upper i-limit for allowance of pixel masks, negative. The default is
800.
ilo : int, optional
Lower i-limit for allowance of pixel masks, negative. The default is
0.
jhi : int, optional
Upper j-limit for allowance of pixel masks, negative. The default is
800.
jlo : int, optional
Lower j-limit for allowance of pixel masks, negative. The default is
0.
ihi2 : int, optional
Upper i-limit for allowance of pixel masks, positive. The default is
800.
ilo2 : int, optional
Lower i-limit for allowance of pixel masks, positive. The default is
0.
jhi2 : int, optional
Upper j-limit for allowance of pixel masks, positive. The default is
800.
jlo2 : int, optional
Lower j-limit for allowance of pixel masks, positive. The default is
0.
ihi3 : int, optional
Special limit for highest impulsiveness flare. The default is 800
jlo3 : int, optional
Special limit for highest impulsiveness flare. The default is 0.
Returns
-------
neg_rem : list
The negative polarity HMI image, with spurs removed.
pos_rem : list
The positive polarity HMI image, with spurs removed.
"""
neg_rem = np.zeros(np.shape(hmi_neg_mask_c))
pos_rem = np.zeros(np.shape(hmi_pos_mask_c))
# If > neg_crit positive pixels surround a negative pixel, remove negative
# pixel.
for i in range(len(neg_rem) - 2):
for j in range(len(neg_rem[0]) - 2):
n = 0
if hmi_neg_mask_c[i, j] == -1:
for k in pt_range:
for h in pt_range:
if hmi_pos_mask_c[i + k, j - h] == 1:
n = n + 1
if n > neg_crit or i > ihi or i < ilo or j < jlo or j > jhi\
or (i > ihi3 and j < jlo3):
neg_rem[i, j] = 0
else:
neg_rem[i, j] = -1
else:
neg_rem[i, j] = 0
# If > pos_crit negative pixels surround a positive pixel, remove positive
# pixel.
for i in range(len(pos_rem) - 2):
for j in range(len(pos_rem[0]) - 2):
n = 0
if hmi_pos_mask_c[i, j] == 1:
for k in pt_range:
for h in pt_range:
if hmi_neg_mask_c[i + k, j - h] == -1:
n = n + 1
if n > pos_crit or j > jhi2 or j < jlo2 or i < ilo2 or\
i > ihi2:
pos_rem[i, j] = 0
else:
pos_rem[i, j] = 1
else:
pos_rem[i, j] = 0
return neg_rem, pos_rem
def gauss_conv(pos_rem, neg_rem, sigma=10):
"""
Convolve HMI images with a Gaussian of specified width.
Parameters
----------
neg_rem : list
The negative polarity HMI image, with spurs removed.
pos_rem : list
The positive polarity HMI image, with spurs removed.
sigma : int, optional
Width of the Gaussian to convolve with images. The default is 10.
Returns
-------
hmi_con_pos_c : list
Positive HMI, convolved with Gaussian.
hmi_con_neg_c : list
Negative HMI, convolved with Gaussian.
pil_mask_c : list
PIL mask found by multiplying positive and negative polarity PIL masks.
"""
gauss_kernel = Gaussian2DKernel(sigma)
hmi_con_pos_c = convolve(pos_rem, gauss_kernel)
hmi_con_neg_c = convolve(neg_rem, gauss_kernel)
# PIL mask is found by intersection of negative and positive HMI masks
pil_mask_c = hmi_con_pos_c * hmi_con_neg_c
return hmi_con_pos_c, hmi_con_neg_c, pil_mask_c
def pil_gen(pil_mask_c, hmi_dat, threshperc=0.05, lx=800, ly=800,
polyor=4):
"""
Generate PIL polynomial.
Parameters
----------
pil_mask_c : list
PIL mask.
hmi_dat : list
Array of HMI values associated with the flare.
threshperc: float, optional
Percentage of maximum PIL mask value to allow into the polynomial fit.
The default is 0.05.
lx : int, optional
Length of array in x direction. The default is 800.
ly : list, optional
Length of array in y direction. The default is 800.
polyor : int, optional
Order of fitting polynomial. The default is 4.
Returns
-------
pil_mask_c : list
PIL mask.
ivs : list
x-values for PIL polynomial.
dvs : list
y-values for PIL polynomial.
hmik : list
HMI image, divided by 1000 for unit conversion.
"""
# Make PIL mask positive
pil_mask_c = -1.0 * pil_mask_c
# Threshold for fitting of PIL polynomial
thresh = threshperc * np.amax(pil_mask_c)
# Isolate pixels certainly within the mask
xc, yc = np.where(pil_mask_c > thresh)
# Fitting of fourth-order polynomial to chosen pixels and generation of
# PIL polynomial arrays
x = np.linspace(0, lx, lx)
y = np.linspace(0, ly, ly)
coeffs = np.polyfit(y[yc], x[xc], polyor)
ivs = y[yc]
dvs = 0
for i in range(len(coeffs)):
dvs += coeffs[i] * ivs**(polyor - i)
hmik = hmi_dat/1000
return pil_mask_c, ivs, dvs, hmik
def mask_sep(aia_step8, hmi_dat):
"""
Masking of each image for each time step, for use in separation value
determination.
Parameters
----------
aia_step8 : list
Instantaneous AIA ribbon masks, c=8.
hmi_dat : list
SDO/HMI magnetic field data for flare.
Returns
-------
aia8_pos : list
Contains only the positive ribbon masks for each time step.
aia8_neg : list
Contains only the negative ribbon masks for each time step.
"""
aia8 = aia_step8
aia8_pos = np.zeros(np.shape(aia8))
aia8_neg = np.zeros(np.shape(aia8))
# Separate positive and negative ribbons into different arrays
for i in range(len(aia8)):
for j in range(len(aia8[0])):
for k in range(len(aia8[1])):
if aia8[i, j, k] == 1 and hmi_dat[j, k] > 0:
aia8_pos[i, j, k] = 1
elif aia8[i, j, k] == 1 and hmi_dat[j, k] < 0:
aia8_neg[i, j, k] = 1
return aia8_pos, aia8_neg
def spur_removal_sep2(aia8_pos, aia8_neg, pos_crit=3, neg_crit=3,
pt_range=[-2, -1, 1, 2], jhi=800, jlo=0, khi=800,
klo=0, jhi2=800, jlo2=0, khi2=800, klo2=0):
"""
Second step in removal of spurs from mask images for separation code. Limit
window where ribbons are considered for PIL-relative perpendicular motion.
Parameters
----------
aia8_pos : list
Output of mask_sep, containing positive mask isolated.
aia8_neg : list
Output of mask_sep, containing negative mask isolated
pos_crit : int, optional
Number of points surrounding another which will be allowed in the
positive ribbon. The default is 3.
neg_crit : int, optional
Number of points surrounding another which will be allowed in the
negative ribbon. The default is 3.
pt_range : list, optional
Range of points around which to search for other pixels of the same
polarity. The default is [-2,-1,1,2].
jhi : int, optional
Upper j-limit for allowance of pixel masks, negative. The default is
800.
jlo : int, optional
Lower j-limit for allowance of pixel masks, negative. The default is
0.
khi : int, optional
Upper k-limit for allowance of pixel masks, negative. The default is
800.
klo : int, optional
Lower k-limit for allowance of pixel masks, negative. The default is
0.
jhi2 : int, optional
Upper j-limit for allowance of pixel masks, positive. The default is
800.
jlo2 : int, optional
Lower j-limit for allowance of pixel masks, positive. The default is
0.
khi2 : int, optional
Upper k-limit for allowance of pixel masks, positive. The default is
800.
klo2 : int, optional
Lower k-limit for allowance of pixel masks, positive. The default is
0.
Returns
-------
pos_rem0 : list
Masks with spurious pixels removed, positive ribbon.
neg_rem0 : list
Masks with spurious pixels removed, negative ribbon.
"""
neg_rem0 = np.zeros(np.shape(aia8_pos))
pos_rem0 = np.zeros(np.shape(aia8_neg))
for i in range(len(neg_rem0)):
for j in range(len(neg_rem0[0]) - 2):
for k in range(len(neg_rem0[1]) - 2):
n = 0
if aia8_neg[i, j, k] == 1:
for h in pt_range:
for m in pt_range:
if aia8_neg[i, j + h, k + m] == 1:
n = n + 1
if n > neg_crit and j < jhi and j > jlo and k > klo \
and k < khi:
neg_rem0[i, j, k] = 1
else:
neg_rem0[i, j, k] = 0
else:
neg_rem0[i, j, k] = 0
for i in range(len(pos_rem0)):
for j in range(len(pos_rem0[0]) - 2):
for k in range(len(pos_rem0[1]) - 2):
n = 0
if aia8_pos[i, j, k] == 1:
for h in pt_range:
for m in pt_range:
if aia8_pos[i, j + h, k + m] == 1:
n = n + 1
if (n > pos_crit) and k < khi and k > klo and j > jlo and \
j < jhi:
pos_rem0[i, j, k] = 1
else:
pos_rem0[i, j, k] = 0
else:
pos_rem0[i, j, k] = 0
return pos_rem0, neg_rem0
def spur_removal_sepopt3(aia8_pos, aia8_neg, pos_crit=3, neg_crit=3,
pt_range=[-2, -1, 1, 2], jhi=800, jlo=0, khi=800,
klo=0, jhi2=800, jlo2=0, khi2=800, klo2=0):
"""
Second step in removal of spurs from mask images for separation code -
option for highest impulsiveness flare. Limit window where ribbons are
considered for PIL-relative perpendicular motion.
Parameters
----------
aia8_pos : list
Output of mask_sep, containing positive mask isolated.
aia8_neg : list
Output of mask_sep, containing negative mask isolated
pos_crit : int, optional
Number of points surrounding another which will be allowed in the
positive ribbon. The default is 3.
neg_crit : int, optional
Number of points surrounding another which will be allowed in the
negative ribbon. The default is 3.
pt_range : list, optional
Range of points around which to search for other pixels of the same
polarity. The default is [-2,-1,1,2].
jhi : int, optional
Upper j-limit for allowance of pixel masks, negative. The default is
800.
jlo : int, optional
Lower j-limit for allowance of pixel masks, negative. The default is
0.
khi : int, optional
Upper k-limit for allowance of pixel masks, negative. The default is
800.
klo : int, optional
Lower k-limit for allowance of pixel masks, negative. The default is
0.
jhi2 : int, optional
Upper j-limit for allowance of pixel masks, positive. The default is
800.
jlo2 : int, optional
Lower j-limit for allowance of pixel masks, positive. The default is
0.
khi2 : int, optional
Upper k-limit for allowance of pixel masks, positive. The default is
800.
klo2 : int, optional
Lower k-limit for allowance of pixel masks, positive. The default is
0.
Returns
-------
pos_rem0 : list
Masks with spurious pixels removed, positive ribbon.
neg_rem0 : list
Masks with spurious pixels removed, negative ribbon.
"""
neg_rem0 = np.zeros(np.shape(aia8_pos))
pos_rem0 = np.zeros(np.shape(aia8_neg))
for i in range(len(neg_rem0)):
for j in range(len(neg_rem0[0]) - 2):
for k in range(len(neg_rem0[1]) - 2):
n = 0
if aia8_neg[i, j, k] == 1:
for h in pt_range:
for m in pt_range:
if aia8_neg[i, j + h, k + m] == 1:
n = n + 1
if (n > neg_crit) and (j < jhi and j > jlo and k > klo and
k < khi):
neg_rem0[i, j, k] = 1
else:
neg_rem0[i, j, k] = 0
if (j > 400 and k > 400 and k < 425):
neg_rem0[i, j, k] = 0
else:
neg_rem0[i, j, k] = 0
for i in range(len(pos_rem0)):
for j in range(len(pos_rem0[0]) - 2):
for k in range(len(pos_rem0[1]) - 2):
n = 0
if aia8_pos[i, j, k] == 1:
for h in pt_range:
for m in pt_range:
if aia8_pos[i, j + h, k + m] == 1:
n = n + 1
if n > pos_crit and k < khi and k > klo and j > jlo and\
j < jhi:
pos_rem0[i, j, k] = 1
else:
pos_rem0[i, j, k] = 0
else:
pos_rem0[i, j, k] = 0
return pos_rem0, neg_rem0
def separation(aia_step8, ivs, dvs, pos_rem0, neg_rem0):
"""
Algorithm for determination of parallel motion for positive and negative
ribbons.
Parameters
----------
aia_step8 : list
Instantaneous AIA ribbon masks, c=8.
ivs : list
x-values for PIL polynomial.
dvs : list
y-values for PIL polynomial.
aia8_pos : list
Contains only the positive ribbon masks for each time step.
aia8_neg : list
Contains only the negative ribbon masks for each time step.
Returns
-------
distpos_med : list
Parallel distance of positive ribbon from PIL, median of all pixel
distances.
distpos_mean : list
Parallel distance of positive ribbon from PIL, mean of all pixel
distances.
distneg_med : list
Parallel distance of negative ribbon from PIL, median of all pixel
distances.
distpos_mean : list
Parallel distance of negative ribbon from PIL, mean of all pixel
distances.
"""
# Create array of PIL mask values
pil = list(zip(ivs, dvs))
distpos_med = np.zeros(len(aia_step8))
distneg_med = np.zeros(len(aia_step8))
distpos_mean = np.zeros(len(aia_step8))
distneg_mean = np.zeros(len(aia_step8))
# Main working function for separation
for i in range(len(aia_step8)):
posframe = pos_rem0[i, :, :]
negframe = neg_rem0[i, :, :]
xpos, ypos = np.where(posframe == 1)
xneg, yneg = np.where(negframe == 1)
pos_ops = list(zip(ypos, xpos))
neg_ops = list(zip(yneg, xneg))
if len(pos_ops) > 0:
# Distance from positive pixels to each of the PIL pixels
allpos = cdist(pos_ops, pil)
# Set the minimum for each pixel first
allpos_min = np.amin(allpos, axis=1)
# Median and mean of distances
distpos_med[i] = np.median(allpos_min)
distpos_mean[i] = np.mean(allpos_min)
if len(neg_ops) > 0:
# Same as in positive pixels
allneg = cdist(neg_ops, pil)
allneg_min = np.amin(allneg, axis=1)
distneg_med[i] = np.median(allneg_min)
distneg_mean[i] = np.mean(allneg_min)
return distpos_med, distpos_mean, distneg_med, distpos_mean
def mask_elon(aia_cumul8, hmi_dat):
"""
Masking for elongation algorithm.
Parameters
----------
aia_cumul8 : list
Cumulative ribbon masks, c=8.
hmi_dat : list
SDO/HMI image data for flare.
Returns
-------
aia8_pos_2 : list
Contains only the positive cumulative ribbon masks for each time step.
aia8_neg_2 : list
Contains only the negative cumulative ribbon masks for each time step.
"""
aia8_a = aia_cumul8
aia8_pos_2 = np.zeros(np.shape(aia8_a))
aia8_neg_2 = np.zeros(np.shape(aia8_a))
# Separation of cumulative ribbon masks into separate arrays for opposite
# polarity
for i, j, k in np.ndindex(aia8_a.shape):
if aia8_a[i, j, k] == 1 and hmi_dat[j, k] > 0:
aia8_pos_2[i, j, k] = 1
elif aia8_a[i, j, k] == 1 and hmi_dat[j, k] < 0:
aia8_neg_2[i, j, k] = 1
return aia8_pos_2, aia8_neg_2
def spur_removal_elon(aia8_pos_2, aia8_neg_2, pos_crit=3, neg_crit=3,
pt_range=[-2, -1, 1, 2], jhi=800, jlo=0, khi=800,
klo=0, jhi2=800, jlo2=0, khi2=800, klo2=0):
"""
Removal of isolated regions of very few pixels in all time step images.
Parameters
----------
aia8_pos_2 : list
Contains only the positive cumulative ribbon masks for each time step.
aia8_neg_2 : list
Contains only the negative cumulative ribbon masks for each time step.
pos_crit : list, optional
The number of pixels in positive ribbon within a region above which the
point is allowed to remain in the image. The default is 3.
neg_crit : list, optional
The number of pixels in negative ribbon within a region above which the
point is allowed to remain in the image. The default is 3.
pt_range : list, optional
Pixels to search around each pixel for the same polarity. The default
is [-2,-1,1,2].
jhi : int, optional
Upper j-limit for allowance of pixel masks, negative. The default is
800.
jlo : int, optional
Lower j-limit for allowance of pixel masks, negative. The default is
0.
khi : int, optional
Upper k-limit for allowance of pixel masks, negative. The default is
800.
klo : int, optional
Lower k-limit for allowance of pixel masks, negative. The default is
0.
jhi2 : int, optional
Upper j-limit for allowance of pixel masks, positive. The default is
800.
jlo2 : int, optional
Lower j-limit for allowance of pixel masks, positive. The default is
0.
khi2 : int, optional
Upper k-limit for allowance of pixel masks, positive. The default is
800.
klo2 : int, optional
Lower k-limit for allowance of pixel masks, positive. The default is
0.
Returns
-------
neg_rem1 : list
Vetted positive ribbon with the above criteria for each pixel.
pos_rem1 : list
Vetted negative ribbon with the above criteria for each pixel.
"""
neg_rem1 = np.zeros(np.shape(aia8_pos_2))
pos_rem1 = np.zeros(np.shape(aia8_neg_2))
# If neg_crit number of pixels not exceeded in a certain region, remove
# central pixel - for negative mask, then positive mask, at each time step
for i in range(len(neg_rem1)):
for j in range(len(neg_rem1[0]) - 2):
for k in range(len(neg_rem1[1]) - 2):
n = 0
if aia8_neg_2[i, j, k] == 1:
for h in pt_range:
for m in pt_range:
if aia8_neg_2[i, j + h, k + m] == 1:
n = n + 1
if (n > neg_crit) and k > klo and k < khi and j > jlo and\
j < jhi:
neg_rem1[i, j, k] = 1
else:
neg_rem1[i, j, k] = 0
else:
neg_rem1[i, j, k] = 0
for i in range(len(pos_rem1)):
for j in range(len(pos_rem1[0]) - 2):
for k in range(len(pos_rem1[1]) - 2):
n = 0
if aia8_pos_2[i, j, k] == 1:
for h in pt_range:
for m in pt_range:
if aia8_pos_2[i, j + h, k + m] == 1:
n = n + 1
if n > pos_crit and k > klo2 and k < khi2 and j > jlo2 and\
j < jhi2:
pos_rem1[i, j, k] = 1
else:
pos_rem1[i, j, k] = 0
else:
pos_rem1[i, j, k] = 0
return neg_rem1, pos_rem1
def lim_pil(ivs, dvs):
"""
Limt of the inversion line within a certain number of pixels from the
median image value.
Parameters
----------
ivs : list
x-values for PIL polynomial.
dvs : list
y-values for PIL polynomial.
Returns
-------
ivs_lim : list
Vetted x-values for PIL polynomial.
dvs_lim : list
Vetted y-values for PIL polynomial.
med_x : int
Median x pixel in image.
med_y : int
Median y pixel in image.
"""
med_x = np.median(ivs)
med_y = np.median(dvs)
ivs_lim = []
dvs_lim = []
for i in range(len(ivs)):
if not (ivs[i] < (med_x - 200)) and not (ivs[i] > (med_x + 200)):
ivs_lim.append(ivs[i])
dvs_lim.append(dvs[i])
return ivs_lim, dvs_lim, med_x, med_y
def rib_lim_elon(aia8_pos_2, aia8_neg_2, pos_rem1, neg_rem1, med_x, med_y,
ylim0_pos, ylim1_pos, ylim0_neg, ylim1_neg, xlim0_pos,
xlim1_pos, xlim0_neg, xlim1_neg):
"""
Limiting of ribbons for processing with elongation algorithm.
Parameters
----------
aia8_pos_2 : list
Contains only the positive cumulative ribbon masks for each time step.
aia8_neg_2 : list
Contains only the negative cumulative ribbon masks for each time step.
neg_rem1 : list
Vetted positive ribbon with the above criteria for each pixel.
pos_rem1 : list
Vetted negative ribbon with the above criteria for each pixel.
med_x : int
Median x pixel in image.
med_y : int
Median y pixel in image.
ylim0_pos : int
Lower y-limit for positive ribbon.
ylim1_pos : int
Upper y-limit for positive ribbon
ylim0_neg : int
Lower y-limit for negative ribbon
ylim1_neg : int
Upper y-limit for negative ribbon.
xlim0_pos : int
Lower x-limit for positive ribbon
xlim1_pos : int
Upper x-limit for positive ribbon
xlim0_neg : int
Lower x-limit for negative ribbon
xlim1_neg : int
Upper x-limit for negative ribbon
Returns
-------
aia_pos_rem : list
Isolated positive ribbon masks.
aia_neg_rem : list
Isolated negative ribbon masks.
"""
aia_pos_rem = np.zeros(np.shape(aia8_pos_2))
aia_neg_rem = np.zeros(np.shape(aia8_neg_2))
# Limit the negative ribbon image to a certain region in the image
for i in range(len(aia8_neg_2)):
for j in range(ylim0_neg, ylim1_neg):
for k in range(xlim0_neg, xlim1_neg):
if neg_rem1[i, j, k] > 0:
aia_neg_rem[i, j, k] = 1
# Limit the positive ribbon image to a certain region in the image
for i in range(len(aia8_pos_2)):
for j in range(ylim0_pos, ylim1_pos):
for k in range(xlim0_pos, xlim1_pos):
if pos_rem1[i, j, k] > 0:
aia_pos_rem[i, j, k] = 1
return aia_pos_rem, aia_neg_rem
def split_rib(aia_pos_rem, aia_neg_rem, split_pos, split_neg):
rib_pos_1 = np.zeros(np.shape(aia_pos_rem))
rib_pos_2 = np.zeros(np.shape(aia_pos_rem))
for i in range(len(aia_pos_rem)):
for j in range(len(aia_pos_rem[0])):
for k in range(len(aia_pos_rem[1])):
if aia_pos_rem[i, j, k] == 1 and k < split_pos:
rib_pos_1[i, j, k] = 1
elif aia_pos_rem[i, j, k] == 1 and k > split_pos:
rib_pos_2[i, j, k] = 1
rib_neg_1 = np.zeros(np.shape(aia_neg_rem))
rib_neg_2 = np.zeros(np.shape(aia_neg_rem))
for i in range(len(aia_neg_rem)):
for j in range(len(aia_neg_rem[0])):
for k in range(len(aia_neg_rem[1])):
if aia_neg_rem[i, j, k] == 1 and k < split_neg:
rib_neg_1[i, j, k] = 1
elif aia_neg_rem[i, j, k] == 1 and k > split_neg:
rib_neg_2[i, j, k] = 1
return rib_pos_1, rib_pos_2, rib_neg_1, rib_neg_2
def find_rib_coordinates(aia_pos_rem, aia_neg_rem):
"""
Find coordinates of extreme limits of positive and negative ribbons.
Parameters
----------
aia_pos_rem : list
Isolated positive ribbon masks.
aia_neg_rem : list
Isolated negative ribbon masks.
Returns
-------
lr_coord_neg : list
Extreme limits of negative ribbon for each time step.
lr_coord_pos : list
Extreme limits of positive ribbon for each time step.
"""
lr_coord_pos = np.zeros([len(aia_pos_rem), 4])
lr_coord_neg = np.zeros([len(aia_neg_rem), 4])
for i in range(len(aia_pos_rem)):
left_x = 0
left_y = 0
right_x = 0
right_y = 0
for k in range(len(aia_pos_rem[1])):
for j in range(len(aia_pos_rem[0])):
# Extreme limit to the left of the ribbon, in positive ribbon
if aia_pos_rem[i, j, k] == 1:
left_x = k
left_y = j
break
if left_x != 0:
break
for k in range(len(aia_pos_rem[1]) - 1, 0, -1):
for j in range(len(aia_pos_rem[0])):
# Extreme limit to the right of the ribbon, in positive ribbon
if aia_pos_rem[i, j, k] == 1:
right_x = k
right_y = j
break
if right_x != 0:
break
lr_coord_pos[i, :] = [left_x, left_y, right_x, right_y]
for i in range(len(aia_neg_rem)):
left_x = 0
left_y = 0
right_x = 0
right_y = 0
for k in range(len(aia_neg_rem[1])):
for j in range(len(aia_neg_rem[0])):
# Extreme limit to the left of the ribbon, in negative ribbon
if aia_neg_rem[i, j, k] == 1:
left_x = k
left_y = j
break
if left_x != 0:
break
for k in range(len(aia_neg_rem[1]) - 1, 0, -1):
for j in range(len(aia_neg_rem[0])):
# Extreme limit to the right of the ribbon, in negative ribbon
if aia_neg_rem[i, j, k] == 1:
right_x = k
right_y = j
break
if right_x != 0:
break
lr_coord_neg[i, :] = [left_x, left_y, right_x, right_y]
return lr_coord_neg, lr_coord_pos
def sort_pil(ivs_lim, dvs_lim):
"""
Sort PIL coordinates in ascending order.
Parameters
----------
ivs_lim : list
Vetted x-values for PIL polynomial.
dvs_lim : list
Vetted y-values for PIL polynomial.
Returns
-------
ivs_sort : list
Sorted x-values for PIL polynomial.
dvs_sort : list
Sorted y-values for PIL polynomial.
sortedpil : list
Sorted ordered pairs for PIL polynomial.
"""
pil_sort = np.vstack((ivs_lim, dvs_lim)).T
sortedpil = pil_sort[pil_sort[:, 0].argsort()]
ivs_sort = sortedpil[:, 0]
dvs_sort = sortedpil[:, 1]
return ivs_sort, dvs_sort, sortedpil
def elon_dist_arrays(lr_coord_pos, lr_coord_neg, ivs_lim, dvs_lim, ivs_sort,
dvs_sort):
"""
Create array for distances of limits of ribbon masks from PIL for each
time step.
Parameters
----------
lr_coord_neg : list
Extreme limits of negative ribbon for each time step.
lr_coord_pos : list
Extreme limits of positive ribbon for each time step.
ivs_lim : list
Vetted x-values for PIL polynomial.
dvs_lim : list
Vetted y-values for PIL polynomial.
ivs_sort : list
Sorted x-values for PIL polynomial.
dvs_sort : list
Sorted y-values for PIL polynomial.
Returns
-------
pil_right_near_pos : list
Closest PIL point to the "right" edge of positive ribbon for each time
step.
pil_left_near_pos : list
Closest PIL point to the "left" edge of positive ribbon for each time
step.
pil_right_near_neg : list
Closest PIL point to the "right" edge of negative ribbon for each time
step.
pil_left_near_neg : list
Closest PIL point to the "left" edge of negative ribbon for each time
step.
"""
left_pil_dist_pos = np.zeros([len(lr_coord_pos), len(ivs_sort)])
right_pil_dist_pos = np.zeros([len(lr_coord_pos), len(ivs_sort)])
left_pil_dist_neg = np.zeros([len(lr_coord_neg), len(ivs_sort)])
right_pil_dist_neg = np.zeros([len(lr_coord_neg), len(ivs_sort)])
pil_left_near_neg = np.zeros([len(left_pil_dist_neg), 3])
pil_right_near_neg = np.zeros([len(right_pil_dist_neg), 3])
pil_left_near_pos = np.zeros([len(left_pil_dist_pos), 3])
pil_right_near_pos = np.zeros([len(right_pil_dist_pos), 3])
# The following loops determine the distance from all limit pixels to all
# pixels corresponding to the PIL and stores in arrays. The first three
# loops correspond to the positive ribbon, for all time steps.
for i in range(len(lr_coord_pos)):
left_x, left_y, right_x, right_y = lr_coord_pos[i]
for j in range(len(ivs_sort)):
left_pil_dist_pos[i, j] = \
np.sqrt((left_x - ivs_sort[j])**2 + (left_y - dvs_sort[j])**2)
right_pil_dist_pos[i, j] = \
np.sqrt((right_x - ivs_sort[j])**2 + (right_y -
dvs_sort[j])**2)
# The minimum of the distances to each of the extreme limits is found.
for i in range(len(left_pil_dist_pos)):
ind = np.where(left_pil_dist_pos[i] == np.min(left_pil_dist_pos[i]))
pil_left_near_pos[i, :] \
= [ivs_lim[ind[0][0]], dvs_sort[ind[0][0]], ind[0][0]]
for j in range(len(right_pil_dist_pos)):
ind = np.where(right_pil_dist_pos[j] == np.min(right_pil_dist_pos[j]))
pil_right_near_pos[j, :] \
= [ivs_lim[ind[0][0]], dvs_sort[ind[0][0]], ind[0][0]]
# Identical method as above, for the negative ribbon.
for i in range(len(lr_coord_neg)):
left_x, left_y, right_x, right_y = lr_coord_neg[i]
for j in range(len(ivs_sort)):
left_pil_dist_neg[i, j] \
= np.sqrt((left_x - ivs_sort[j])**2 + (left_y -
dvs_sort[j])**2)
right_pil_dist_neg[i, j] \
= np.sqrt((right_x - ivs_sort[j])**2 + (right_y -
dvs_sort[j])**2)
for i in range(len(left_pil_dist_neg)):
ind = np.where(left_pil_dist_neg[i] == np.min(left_pil_dist_neg[i]))
pil_left_near_neg[i, :] \
= [ivs_lim[ind[0][0]], dvs_sort[ind[0][0]], ind[0][0]]
for j in range(len(right_pil_dist_neg)):
ind = np.where(right_pil_dist_neg[j] == np.min(right_pil_dist_neg[j]))
pil_right_near_neg[j, :] \
= [ivs_lim[ind[0][0]], dvs_sort[ind[0][0]], ind[0][0]]
return pil_right_near_pos, pil_left_near_pos, pil_right_near_neg, \
pil_left_near_neg
def elongation(pil_right_near_pos, pil_left_near_pos, pil_right_near_neg,
pil_left_near_neg, sortedpil):
"""
Script determining the perpendicular extent of positive and negative
ribbons for each time step.
Parameters
----------
pil_right_near_pos : list
Closest PIL point to the "right" edge of positive ribbon for each time
step.
pil_left_near_pos : list
Closest PIL point to the "left" edge of positive ribbon for each time
step.
pil_right_near_neg : list
Closest PIL point to the "right" edge of negative ribbon for each time
step.
pil_left_near_neg : list
Closest PIL point to the "left" edge of negative ribbon for each time
step.
sortedpil : list
Sorted ordered pairs for PIL polynomial.
Returns
-------
lens_pos : list
Parallel extent of positive ribbon for each time step.
lens_neg : list
Parallel extent of negative ribbon for each time step.
"""
lens_pos = []
lens_neg = []
# The curve length of the PIL between two points - the closest to each of
# the limits of each of the ribbon - is used as the elongation value for
# each time step.
for i in range(len(pil_right_near_pos)):
leftin = int(pil_left_near_pos[i, 2])
rightin = int(pil_right_near_pos[i, 2])
curvei = sortedpil[leftin:rightin, :]
lens_pos.append(curve_length(curvei))
for i in range(len(pil_right_near_neg)):
leftin = int(pil_left_near_neg[i, 2])
rightin = int(pil_right_near_neg[i, 2])
curvei = sortedpil[leftin:rightin, :]
lens_neg.append(curve_length(curvei))
return lens_pos, lens_neg
def convert_to_Mm(lens_pos, dist_pos, lens_neg, dist_neg, conv_f):
"""
Converts elongation and separation values, determined beforehand, to units
of Mm. Also determines time derivative (rate of change) of perpendicular
and parallel motion values.
Parameters
----------
lens_pos : list
Parallel extent of positive ribbon for each time step.
dist_pos : list
Perpendicular motion values for positive ribbon, either median or mean.
lens_neg : list
Parallel extent of negative ribbon for each time step.
dist_neg : list
Perpendicular motion values for negative ribbon, either median or mean.
conv_f : float
Conversion factor from pixels to Mm.
Returns
-------
lens_pos_Mm : list
Perpendicular extent of positive ribbon for each time step, in Mm.
lens_neg_Mm : list
Parallel extent of positive ribbon for each time step in Mm.
distpos_Mm : list
Perpendicular extent of negative ribbon for each time step, in Mm.
distneg_Mm : list
Parallel extent of positive ribbon for each time step in Mm.
dneg_len : list
Time derivative of negative ribbon elongation.
dpos_len : list
Time derivative of positive ribbon elongation.
dneg_dist : list
Time derivative of negative ribbon separation.
dpos_dist : list
Time derivative of positive ribbon separation.
"""
lens_pos_Mm = np.zeros(np.shape(lens_pos))
lens_neg_Mm = np.zeros(np.shape(lens_neg))
distpos_Mm = np.zeros(np.shape(dist_pos))
distneg_Mm = np.zeros(np.shape(dist_neg))
for i in range(len(lens_pos)):
lens_pos_Mm[i] = lens_pos[i] * conv_f
lens_neg_Mm[i] = lens_neg[i] * conv_f
distpos_Mm[i] = dist_pos[i] * conv_f
distneg_Mm[i] = dist_neg[i] * conv_f
dneg_len = np.diff(lens_neg_Mm) / 24.
dpos_len = np.diff(lens_pos_Mm) / 24.
dneg_dist = np.diff(distneg_Mm) / 24.
dpos_dist = np.diff(distpos_Mm) / 24.
return lens_pos_Mm, lens_neg_Mm, distpos_Mm, distneg_Mm, dneg_len, \
dpos_len, dneg_dist, dpos_dist
def prep_304_1600_parameters(sav_data_aia, sav_data, eventindices, flnum,
start304, peak304, end304, times304, curves304,
outflag=0):
"""
Preps parameters for 304 Angstrom images, in addition to some datetime
processing for 1600 Angstrom SDO/AIA images.
Parameters
----------
sav_data_aia : list
AIA 1600 images processed from .sav file.
sav_data : list
HMI images processed from .sav file.
eventindices : list
RibbonDB event indices for pre-determined best-performing flares
relative to approximate rise and decay phase models.
flnum : int
Event index for flare in question.
start304 : list
Array containing the start times for the flares in best304.
peak304 : list
Array containing the peak times for the flares in best304.
end304 : list
Array containing the end times for the flares in best304.
times304 : list
Time points for all flares in best304.
curves304 : list
Light curves for all flares in best304.
outflag: int, optional
Flag if the flare is not in the original list of "best performing".
The number corresponds to RibbonDB flare number. The default is 0, in
which case the flare exists in the database.
Returns
-------
startin : int
Array index for the start of the flare.
peakin : int
Array index for the peak of the flare.
endin : int
Array index for the end of the flare.
times : arr
Array of times for the flare, from AIA datafile.
s304 : int
Nearest index in AIA data to start time of the flare from EUV 304 light
curve.
e304 : int
Nearest index in AIA data to end time of the flare from EUV 304 light
curve.
filter_304 : list
Smoothed 304 light curve using scipy's medfilt function, with kernel
size of 5.
med304 : float
Median of 304 Angstrom light curve.
std304 : float
Standard deviation of 304 Angstrom light curve.
timelab : list
Preparation of time labels for future plotting of light curves.
aiadat : list
AIA data for each time step.
nt : int
Number of time steps (or images).
dn1600 : list
Datenum values for 1600 Angstrom data.
time304 : list
Times corresponding to selected flare from 304 Angstrom data.
times1600 : list
Times corresponding to selected flare from 1600 Angstrom data.
"""
xlo = sav_data_aia.x1los
xhi = sav_data_aia.x2los
ylo = sav_data_aia.y1los
yhi = sav_data_aia.y2los
aiadat = sav_data_aia.aia1600
time = sav_data.tim
nt = len(time)
nx = aiadat.shape[1]
ny = aiadat.shape[2]
t1 = str(sav_data.tim[0])
t2 = str(sav_data.tim[-1])
# Conversion of string times into usable floats
tst = float(t1[14:15:1]) + (float(t1[17:18:1])/60) +\
(float(t1[20:24:1])/3600)
tend = float(t2[14:15:1]) + (float(t2[17:18:1])/60) +\
(float(t2[20:24:1])/3600)
times = np.linspace(tst, tend, nt)
x = np.linspace(xlo, xhi, nx)
y = np.linspace(ylo, yhi, ny)
x, y = np.meshgrid(x, y)
times1600 = np.empty(nt, dtype=datetime.datetime)
sum1600 = np.empty(nt)
dn1600 = np.empty(nt)
for i in range(nt):
timechoi = str(sav_data.tim[i])
times1600[i] = datetime.datetime.strptime(timechoi[2:21],
'20%y-%m-%dT%H:%M:%S')
dn1600[i] = datenum(times1600[i])
timestep = aiadat[i, :, :]
sum1600[i] = timestep.sum()
# if flare not in list
if outflag == 1242:
file1242 = '/Users/owner/Desktop/CU_Research/twelvefortytwo.mat'
ev304 = sio.loadmat(file1242)
curve304_0 = ev304['smspl']
time304_0 = ev304['windowthr']
st304 = ev304['tst']
peak304 = ev304['maxt']
end304 = ev304['tend']
curve304 = []
time304 = []
for i in range(len(curve304_0)):
curve304.append(curve304_0[i][0])
time304.append(time304_0[0][i])
startin = np.where(dn1600 == find_nearest(dn1600, st304))
peakin = np.where(dn1600 == find_nearest(dn1600, peak304))
endin = np.where(dn1600 == find_nearest(dn1600, end304))
elif outflag == 0:
# Find index of nearest index to flare number in 304 flares array
ind = (np.isclose(eventindices, flnum))
index = np.where(ind)[0][0]
# Light curve for selected flare
curve304 = curves304[index]
time304 = times304[index]
# Time indices for 1600A data - time series not identical
startin = np.where(dn1600 == find_nearest(dn1600, start304[ind][0]))
peakin = np.where(dn1600 == find_nearest(dn1600, peak304[ind][0]))
endin = np.where(dn1600 == find_nearest(dn1600, end304[ind][0]))
# Integrate over all pixels in 1600A line
for i in range(nt):
timestep = aiadat[i, :, :]
sum1600[i] = timestep.sum()
for i in range(nt):
timechoi = str(sav_data.tim[i])
times1600[i] = datetime.datetime.strptime(timechoi[2:21],
'20%y-%m-%dT%H:%M:%S')
# Time indices for 304 - nearest to dn1600 points found
s304 = find_nearest_ind(time304, min(dn1600))
e304 = find_nearest_ind(time304, max(dn1600))
filter_304 = scipy.signal.medfilt(curve304, kernel_size=5)
med304 = np.median(curve304)
std304 = np.std(curve304)
# Remove 304 Angstrom pixels below a threshold - these will be outliers.
# Only applies to one flare studied as of 14 March 2022
for i in range(len(curve304)):
if curve304[i] < 0.54:
curve304[i] = 'NaN'
timelab = np.empty(nt)
timelabs = range(0, 24 * len(times), 24)
for i in range(len(timelabs)):
timelab[i] = timelabs[i] / 60
return startin, peakin, endin, times, s304, e304, filter_304, med304, \
std304, timelab, aiadat, nt, dn1600, time304, times1600
def img_mask(aia8_pos, aia8_neg, aiadat, nt):
"""
Mapping of positive and negative masks onto AIA data and sum of pixel
numbers for each ribbon for each time step.
Parameters
----------
aia8_pos : list
Contains only the positive ribbon masks for each time step.
aia8_neg : list
Contains only the negative ribbon masks for each time step.
aiadat : list
AIA 1600 Angstrom data for each time step.
nt : int
Number of time steps for the flare.
Returns
-------
posrib : list
Time step images for positive ribbon.
negrib : list
Time step images for negative ribbon.
pos1600 : list
Summed pixel numbers in positive ribbon for each time step.
neg1600 : list
Summed pixel numbers in negative ribbon for each time step.
"""
posrib = np.zeros(np.shape(aia8_pos))
negrib = np.zeros(np.shape(aia8_neg))
# Positive ribbon masks - actual values from AIA, not 0/1
for i in range(len(aia8_pos)):
posrib[i, :, :] = aia8_pos[i, :, :] * aiadat[i, :, :]
# Negative
for j in range(len(aia8_neg)):
negrib[j, :, :] = aia8_neg[j, :, :] * aiadat[j, :, :]
pos1600 = np.empty(nt)
neg1600 = np.empty(nt)
for i in range(nt):
timesteppos = posrib[i, :, :]
pos1600[i] = timesteppos.sum()
timestepneg = negrib[i, :, :]
neg1600[i] = timestepneg.sum()
return posrib, negrib, pos1600, neg1600
def load_from_file(flnum, pick=True):
"""
Option to load separation and elongation values from saved values in file.
Parameters
----------
flnum : int
RibbonDB index for selected flare.
pick : bool, optional
allow_pickle input for np.load. The default is True.
Returns
-------
dt1600 : list
1600 Angstrom datetime values for flare.
pos1600 : list
Summed pixels in positive ribbon for 1600 Angstrom data.
neg1600 : list
Summed pixels in positive ribbon for 1600 Angstrom data.
time304 : list
Time series for 304 Angstrom data.
filter_304 : list
Smoothed 304 Angstrom light curve.
distpos_Mm : list
Separation values for positive ribbon in Mm.
distneg_Mm : list
Separation values for negative ribbon in Mm.
lens_pos_Mm : list
Elongation values for positive ribbon in Mm.
lens_neg_Mm : list
Elongation values for negative ribbon in Mm.
ivs : list
x-coordinates for PIL polynomial.
dvs : list
y-coordinates for PIL polynomial.
"""
# Pickle is not ideal, but all data in these files are only variables saved
# by <NAME>, Spring 2022
ev = np.load(flnum, allow_pickle=pick)
dt1600 = ev['dt1600']
pos1600 = ev['pos1600']
neg1600 = ev['neg1600']
time304 = ev['time304']
filter_304 = ev['filter_304']
distpos_Mm = ev['distpos_Mm']
distneg_Mm = ev['distneg_Mm']
lens_pos_Mm = ev['lens_pos_Mm']
lens_neg_Mm = ev['lens_neg_Mm']
ivs = ev['ivs']
dvs = ev['dvs']
return dt1600, pos1600, neg1600, time304, filter_304, distpos_Mm, \
distneg_Mm, lens_pos_Mm, lens_neg_Mm, ivs, dvs
def elon_periods(dpos_len, dneg_len, pos_crit=1, neg_crit=1, zer_pos_c=2,
zer_neg_c=2, n_min=1, m_min=1):
"""
Determination of periods of elongation for positive and negative ribbons
from time series.
Parameters
----------
dpos_len : list
Time derivative of positive ribbon elongation.
dneg_len : list
Time derivative of negative ribbon elongation.
pos_crit : int, optional
Number of points beyond which an "extended period" of elongation is
defined, positive ribbon. The default is 1.
neg_crit : int, optional
Number of points beyond which an "extended period" of elongation is
defined, negative ribbon. The default is 1.
zer_pos_c : int, optional
Number of zero-derivative points beyond which an "extended period" of
elongation is said to end, positive ribbon. The default is 2.
neg_crit : int, optional
Number of points beyond which an "extended period" of elongation is
defined, negative ribbon. The default is 1.
Returns
-------
elonperiod_start_pos : list
Determined start times for elongation in positive ribbon.
elonperiod_end_pos : list
Determined end times for elongation in positive ribbon.
elonperiod_start_neg : list
Determined start times for elongation in negative ribbon.
elonperiod_end_neg : list
Determined end times for elongation in negative ribbon.
"""
elonfiltpos = dpos_len
elonfiltneg = dneg_len
elonperiod_start_pos = []
elonperiod_end_pos = []
elonperiod_start_neg = []
elonperiod_end_neg = []
n = 0
m = 0
zer_n = 0
zer_m = 0
for i in range(len(elonfiltpos)):
if elonfiltpos[i] > 0:
n += 1
if n == 1:
time = i
# Tripped if extended period of elongation, not already recorded
if n > pos_crit and time not in elonperiod_start_pos:
elonperiod_start_pos.append(time)
elif elonfiltpos[i] <= 0:
if n > n_min:
zer_n += 1
# If rate of change returns to 0 for several points
if zer_n > zer_pos_c:
elonperiod_end_pos.append(i)
n = 0
zer_n = 0
else:
n = 0
continue
# Comments identical to above method
for j in range(len(elonfiltneg)):
if elonfiltneg[j] > 0:
m += 1
if m == 1:
time = j
if m > neg_crit and time not in elonperiod_start_neg:
elonperiod_start_neg.append(time)
elif elonfiltneg[j] <= 0:
if m > m_min:
zer_m += 1
if zer_m > zer_neg_c:
elonperiod_end_neg.append(j)
m = 0
zer_m = 0
elif zer_m > 1:
m = 0
continue
# Remove repeated values
elonperiod_start_pos = list(set(elonperiod_start_pos))
elonperiod_end_pos = list(set(elonperiod_end_pos))
elonperiod_start_neg = list(set(elonperiod_start_neg))
elonperiod_end_neg = list(set(elonperiod_end_neg))
elonperiod_start_pos.sort()
elonperiod_end_pos.sort()
elonperiod_start_neg.sort()
elonperiod_end_neg.sort()
return elonperiod_start_pos, elonperiod_end_pos, elonperiod_start_neg, \
elonperiod_end_neg
def sep_periods(dpos_dist, dneg_dist, start=20, kernel_size=3, pos_crit=3,
neg_crit=3, zer_pos_c=3, zer_neg_c=3):
"""
Determination of periods of separation for each ribbon from time
derivatives of separation data.
Parameters
----------
dneg_dist : list
Time derivative of negative ribbon separation.
dpos_dist : list
Time derivative of positive ribbon separation.
start : int
Index where to start plotting and processing.
kernel_size : int, optional
Kernel size for scipy medfilt of separation curves. The default is 3.
Returns
-------
sepperiod_start_pos : list
Start times for periods of extended separation, positive ribbon.
sepperiod_end_pos : list
End times for periods of extended separation, positive ribbon.
sepperiod_start_neg : list
Start times for periods of extended separation, negative ribbon.
sepperiod_end_neg : list
End times for periods of extended separation, negative ribbon.
"""
# Separation values are much more variable, so smoothing is necessary
sepfiltpos = scipy.signal.medfilt(dpos_dist, kernel_size=kernel_size)
sepfiltneg = scipy.signal.medfilt(dneg_dist, kernel_size=kernel_size)
sepperiod_start_pos = []
sepperiod_end_pos = []
sepperiod_start_neg = []
sepperiod_end_neg = []
n = 0
m = 0
for i in range(start, len(sepfiltpos)):
if sepfiltpos[i] > 0:
n += 1
if n == 1:
time = i
# Append if extended period of separation - more stringent than
# elongation, justified by above smoothing
if n > pos_crit and time not in sepperiod_start_pos:
sepperiod_start_pos.append(time)
elif sepfiltpos[i] <= 0:
# Identify if rate of change has not changed for some time
if n > zer_pos_c:
sepperiod_end_pos.append(i)
n = 0
else:
n = 0
continue
# Same method as above
for i in range(start, len(sepfiltneg)):
if sepfiltneg[i] > 0:
m += 1
if m == 1:
time = i
if m > neg_crit and time not in sepperiod_start_neg:
sepperiod_start_neg.append(time)
elif sepfiltneg[i] <= 0:
if m > zer_neg_c:
sepperiod_end_neg.append(i)
m = 0
else:
m = 0
continue
# Remove repeated values
sepperiod_start_pos = list(set(sepperiod_start_pos))
sepperiod_end_pos = list(set(sepperiod_end_pos))
sepperiod_start_neg = list(set(sepperiod_start_neg))
sepperiod_end_neg = list(set(sepperiod_end_neg))
sepperiod_start_pos.sort()
sepperiod_end_pos.sort()
sepperiod_start_neg.sort()
sepperiod_end_neg.sort()
return sepperiod_start_pos, sepperiod_end_pos, sepperiod_start_neg, \
sepperiod_end_neg
def prep_times(dn1600, time304):
"""
Convert datenum to datetime values for animation and presentation.
Parameters
----------
dn1600 : list
Datenum values for 1600 Angstrom data.
time304 : list
Datenum values for 304 Angstrom data.
Returns
-------
dt1600 : list
Datetime values for 1600 Angstrom data.
dt304 : list
Datetime values for 304 Angstrom data.
"""
dt1600 = []
dt304 = []
for i in range(len(dn1600)):
dt1600.append(datenum_to_datetime(dn1600[i]))
for i in range(len(time304)):
if np.isnan(time304[i]):
dt304.append(datenum_to_datetime(time304[0]))
else:
dt304.append(datenum_to_datetime(time304[i]))
return dt1600, dt304
# BEGIN PLOTTING ROUTINES #
def lc_plot(times, nt, time304, filter_304, s304, e304, dn1600, pos1600,
neg1600, lens_pos_Mm, lens_neg_Mm, distpos_Mm, distneg_Mm, aiadat,
hmi_cumul_mask1, dt304, timelab, conv_f, ivs, dvs, year, mo, day,
arnum, xcl, xclnum, X, Y, xarr_Mm, yarr_Mm, dt1600, flag=1,
stsep=25, stelon=1, lolim=0, hilim=1):
"""
Animation plotting, with 1600 Angstrom, 304 Angstrom, and HMI data.
Parameters
----------
times : list
Times corresponding to each AIA time step.
nt : list
Number of image times.
time304 : list
Series of times for 304 Angstrom data.
filter_304 : list
Smoothed 304 Angstrom data.
s304 : int
Nearest index in AIA data to start time of the flare from EUV 304 light
curve.
e304 : int
Nearest index in AIA data to end time of the flare from EUV 304 light
curve.
dn1600 : list
Datenum values for 1600 Angstrom data.
pos1600 : list
Summed pixel numbers in positive ribbon for each time step.
neg1600 : list
Summed pixel numbers in negative ribbon for each time step.
lens_pos_Mm : list
Perpendicular extent of positive ribbon for each time step, in Mm.
lens_neg_Mm : list
Parallel extent of positive ribbon for each time step in Mm.
distpos_Mm : list
Perpendicular extent of negative ribbon for each time step, in Mm.
distneg_Mm : list
Parallel extent of positive ribbon for each time step in Mm.
aiadat : list
AIA data for each time step.
hmi_cumul_mask1 : list
Cumulative magnetic field strength masking estimates for all flare
images.
dt304 : list
Datetime values for 304 Angstrom data.
timelab : list
Points for labels in time axis.
conv_f : float
Conversion factor from pixels to Mm.
ivs : list
x-coordinates for PIL polynomial
dvs : list
y-coordinates for PIL polynomial
year : int
Year of event.
mo : int
Month of event.
day : int
Day of event.
arnum : int
Active region number.
xcl : str
x-ray class identifier (C, M, X)
xclnum : float
x-ray class identifier, number.
X : list
Meshgrid of x values for image coordinates.
Y : list
Meshgrid of y values for image coordinates.
xarr_Mm : list
x-coordinates, in megameters.
yarr_Mm : list
y-coordinates, in megameters.
dt1600 : list
Datetime values for 1600 Angstrom data.
flag : int, optional
0 (plot only first five frames) or 1 (plot all frames). The default is
1.
stsep: int, optional
Start index for separation curve. Default is 25.
stelon: int, optional
Start index for elongation curve. Default is 1.
lolim: float, optional
Start value for second y-axis on sep/elon plot. Default is 0.
hilim: float, optional
End value for second y-axis on sep/elon plot. Default is 1, which
triggers 140*conv_f.
Returns
-------
col1 : list
List comprising AIA data plot.
col2 : list
List comprising AIA contourmap.
lc304 : list
List comprising 304 Angstrom light curve.
lc1600 : list
List comprising 1600 Angstrom light curve.
sep : list
List comprising positive ribbon separation plot.
sep2 : list
List comprising negative ribbon separation plot.
elon : list
List comprising positive ribbon elongation plot.
elon2 : list
List comprising negative ribbon elongation plot.
"""
if hilim == 1:
hilim = 140*conv_f
# Extremes of chromospheric line light curves
min304 = min(filter_304[s304: e304])
max304 = max(filter_304[s304: e304])
minpos1600 = min(pos1600)
maxpos1600 = max(pos1600)
minneg1600 = min(neg1600)
maxneg1600 = max(neg1600)
# Normalize for light curve comparison
norm304 = (filter_304 - min304) / (max304 - min304)
normpos1600 = (pos1600 - minpos1600) / (maxpos1600 - minpos1600)
normneg1600 = (neg1600 - minneg1600) / (maxneg1600 - minneg1600)
scalefac = max(pos1600) / max(neg1600)
# Initialize plot
fig = plt.figure(figsize=(25, 12))
gs = fig.add_gridspec(9, 9)
ax1 = fig.add_subplot(gs[:, 5:])
ax2 = fig.add_subplot(gs[0:4, 0:4])
ax0 = fig.add_subplot(gs[5:, 0:4])
# Elongation plots
lns1 = ax0.plot(dn1600[stelon:], lens_pos_Mm[stelon:], '-+', c='red',
markersize=10, label='Pos. Elongation')
lns2 = ax0.plot(dn1600[stelon:], lens_neg_Mm[stelon:], '-+', c='blue',
markersize=10, label='Neg. Elongation')
ax5 = ax0.twinx()
ax5.cla()
lns4 = ax5
# Separation plots
lns5 = ax0.plot(dt1600[stsep:], distpos_Mm[stsep:], '-.', c='red',
markersize=10, label='Pos. Separation')
ax0.plot(dt1600[stsep:], distneg_Mm[stsep:], '-.', c='blue',
markersize=10, label='Neg. Separation')
# Plot 1600 Angstrom pcolormesh images, as well as HMI images
col1 = ax1.pcolormesh(X, Y, np.log10(aiadat[0, :, :]), cmap='pink',
shading='auto')
col2 = ax1.contour(X, Y, hmi_cumul_mask1[0, :, :], cmap='seismic')
# Plot 304 Angstrom light curve
lc304 = ax2.plot(dt304, norm304, color='black', linewidth=1,
label=r'Norm. 304$\AA$ Light Curve')
ax3 = ax2.twinx()
# Plot 1600 Angstrom light curve
lc1600 = ax3.plot(dt1600, normpos1600, linewidth=3, color='red',
label=r'Norm. 1600$\AA$ Light Curve, +')
lc1600 = ax3.plot(dt1600, normneg1600, linewidth=3, color='blue',
label=r'Norm. 1600$\AA$ Light Curve, +')
ax1.set_title(str(year) + "-" + str(mo) + "-" + str(day) + ", AR" +
str(arnum)+"\n" + xcl + str(xclnum) + " Class Flare\n",
font='Times New Roman', fontsize=25)
ax2.set_title(r'304$\AA$ and 1600$\AA$ Light Curves', fontsize=25)
ax0.set_title('Ribbon Separation and Elongation', fontsize=25)
ax0.legend(fontsize=15)
ax0.grid()
ax2.set_xlim([dn1600[0], dn1600[-1]])
ax3.set_xlim([dn1600[0], dn1600[-1]])
ax0.set_xlim([timelab[0], timelab[-1]])
# Plot PIL on 1600 Angstrom and HMI panel
ax1.scatter(ivs, dvs, color='k', s=1)
lines, labels = ax0.get_legend_handles_labels()
lines2, labels2 = ax5.get_legend_handles_labels()
ax0.legend(lines + lines2, labels + labels2)
ax5.set_ylim([lolim, hilim])
def animate(t):
ax1.cla()
ax2.cla()
ax0.cla()
ax5 = ax0.twinx()
ax5.cla()
# Plot 1600 Angstrom image
col1 = ax1.pcolormesh(X, Y, np.log10(aiadat[t, :, :]), cmap='pink',
shading='auto')
# HMI contour over 1600 Angstrom image
col2 = ax1.contour(X, Y, hmi_cumul_mask1[t, :, :], cmap='seismic')
ax1.set_xlabel('Horizontal Distance from Image Center [Mm]',
fontsize=15)
ax1.set_ylabel('Vertical Distance from Image Center [Mm]',
fontsize=15)
# Separation curves
sep = ax0.plot(dt1600[stsep:], distpos_Mm[stsep:], '-.', c='red',
markersize=10, label='Pos. Separation')
sep2 = ax0.plot(dt1600[stsep:], distneg_Mm[stsep:], '-.', c='blue',
markersize=10, label='Neg. Separation')
ax1.scatter((ivs-400) * conv_f, (dvs-400) * conv_f, color='k', s=1)
# Elongation curves
elon = ax5.plot(dt1600[stelon:], lens_pos_Mm[stelon:], '-+', c='red',
markersize=10, label='Pos. Elongation')
elon2 = ax5.plot(dt1600[stelon:], lens_neg_Mm[stelon:], '-+',
c='blue', markersize=10, label='Neg. Elongation')
ax1.set_xlim([-250 * conv_f, 250 * conv_f])
ax1.set_ylim([-250 * conv_f, 250 * conv_f])
# 304 Angstrom light curve
lc304 = ax2.plot(dt304, norm304, '-x', color='black', linewidth=1,
label=r'304$\AA$')
ax3 = ax2.twinx()
# 1600 Angstrom light curve
lc1600 = ax3.plot(dt1600, normpos1600, linewidth=3, color='red',
label=r'1600$\AA$, +')
lc1600 = ax3.plot(dt1600, normneg1600, linewidth=3, color='blue',
label=r'1600$\AA$, -')
ax2.set_xlim([dt1600[0], dt1600[-1]])
ax2.set_ylim([-0.05, 1.05])
ax3.set_ylim([-0.05, 1.05])
myFmt = mdates.DateFormatter('%H:%M')
ax2.xaxis.set_major_formatter(myFmt)
ax3.xaxis.set_major_formatter(myFmt)
ax0.xaxis.set_major_formatter(myFmt)
ax5.xaxis.set_major_formatter(myFmt)
textstr = r'1600$\AA$ +/- Factor: ' + str(round(scalefac, 3))
ax2.text(2 * (max(dt1600) - min(dt1600)) / 5 + min(dt1600), 0.1,
textstr, fontsize=12, bbox=dict(boxstyle="square",
facecolor="white",
ec="k", lw=1,
pad=0.3))
ax2.set_xlabel(['Time since 00:00 UT [min], ' + year + '-' + mo + '-'
+ day], fontsize=15)
ax2.set_xlabel(['Time since 00:00 UT [min], ' + year + '-' + mo + '-'
+ day], fontsize=15)
ax2.set_ylabel(r'Norm. Integ. Count, 1600$\AA$', color='purple',
fontsize=15)
lines, labels = ax2.get_legend_handles_labels()
lines2, labels2 = ax3.get_legend_handles_labels()
ax2.legend(lines + lines2, labels + labels2, loc='lower right')
ax2.grid(linestyle='dashed')
ax3.grid(linestyle='dashdot')
ax2.axvline(dt1600[t], linewidth=4, color='black')
ax0.axvline(dt1600[t], linewidth=4, color='black')
ax0.axvline(dt1600[t], linewidth=4, color='black')
ax1.set_title(str(year) + "-" + str(mo) + "-" + str(day) + ", AR" +
str(arnum) + ", " + xcl + str(xclnum) + " Class Flare",
fontsize=25)
ax2.set_title(r'304$\AA$ and 1600$\AA$ Light Curves', fontsize=25)
ax0.set_xlim([dt1600[0], dt1600[-1]])
ax0.set_xlabel(['Time since 00:00 UT [min], ' + year + '-' + mo + '-' +
day], fontsize=15)
ax0.set_ylabel('Separation [Mm]', fontsize=15)
ax5.set_ylabel('Elongation [Mm]', fontsize=15)
ax0.set_title('Ribbon Separation and Elongation', fontsize=25)
ax0.legend(fontsize=15)
ax0.grid()
ax1.text(57, 95, str(dt1600[t].hour).zfill(2) + ':' +
str(dt1600[t].minute).zfill(2) + '.' +
str(dt1600[t].second).zfill(2) + ' UT', fontsize=20,
bbox=dict(boxstyle="square", facecolor="white", ec="k",
lw=1, pad=0.3))
lines, labels = ax0.get_legend_handles_labels()
lines2, labels2 = ax5.get_legend_handles_labels()
ax0.legend(lines + lines2, labels + labels2, loc='lower right')
ax5.set_ylim([lolim, hilim])
return col1, col2, lc304, lc1600, sep, sep2, elon, elon2
# Option to only include first few frames for debugging purposes
if flag == 1:
ani = animat.FuncAnimation(fig, animate, frames=np.shape(aiadat)[0],
interval=20, repeat_delay=0)
elif flag == 0:
ani = animat.FuncAnimation(fig, animate, frames=5, interval=20,
repeat_delay=0)
ani.save(['/Users/owner/Desktop/' + mo + '_' + day + '_' + year + '.gif'],
dpi=200)
return None
def mask_plotting(X, Y, pos_rem, neg_rem, xarr_Mm, yarr_Mm, flnum):
"""
Plotting of HMI image masks.
Parameters
----------
X : list
Meshgrid of x values for image coordinates.
Y : list
Meshgrid of y values for image coordinates.
pos_rem : list
The positive polarity HMI image, with spurs removed.
neg_rem : list
The negative polarity HMI image, with spurs removed.
xarr_Mm : list
x-coordinates, in megameters.
yarr_Mm : list
y-coordinates, in megameters.
flnum : int
RibbonDB index of flare in question.
Returns
-------
None.
"""
fig1, ax1 = plt.subplots(figsize=(6, 6))
# Plot positive mask, with pixel vetting
ax1.pcolormesh(X, Y, pos_rem, cmap='bwr', vmin=-1, vmax=1)
ax1.set_title('Positive Mask', font="Times New Roman", fontsize=22,
fontweight='bold')
ax1.set_xlim([xarr_Mm[200], xarr_Mm[600]])
ax1.set_ylim([yarr_Mm[200], yarr_Mm[600]])
ax1.set_xlabel('Horizontal Distance from Image Center [Mm]', fontsize=17)
ax1.set_ylabel('Vertical Distance from Image Center [Mm]', fontsize=17)
ax1.tick_params(labelsize=15)
fig2, ax2 = plt.subplots(figsize=(6, 6))
# Plot negative mask, with pixel vetting
ax2.set_xlabel('Horizontal Distance from Image Center [Mm]', fontsize=17)
ax2.set_ylabel('Vertical Distance from Image Center [Mm]', fontsize=17)
ax2.tick_params(labelsize=15)
ax2.pcolormesh(X, Y, neg_rem, cmap='bwr', vmin=-1, vmax=1)
ax2.set_title('Negative Mask', font="Times New Roman", fontsize=22,
fontweight='bold')
ax2.set_xlim([xarr_Mm[200], xarr_Mm[600]])
ax2.set_ylim([yarr_Mm[200], yarr_Mm[600]])
fig1.savefig(str(flnum) + '_pos_mask.png')
fig2.savefig(str(flnum) + '_neg_mask.png')
return None
def convolution_mask_plotting(X, Y, hmi_con_pos_c, hmi_con_neg_c, pil_mask_c,
xarr_Mm, yarr_Mm, flnum, xlim=[200, 600],
ylim=[200, 600]):
"""
Plots masks, convolved with Gassian of width specified above.
Parameters
----------
X : list
Meshgrid of x values for image coordinates.
Y : list
Meshgrid of y values for image coordinates.
hmi_con_pos_c : list
Positive HMI, convolved with Gaussian.
hmi_con_neg_c : list
Negative HMI, convolved with Gaussian.
pil_mask_c : list
PIL mask.
xarr_Mm : list
x-coordinates, in megameters.
yarr_Mm : list
y-coordinates, in megameters.
flnum : int
RibbonDB index of flare in question.
xlim : list, optional
Limts of x-coordinates to plot. The default is [200,600].
ylim : list, optional
Limits of y-coordinates to plot. The default is [200,600].
Returns
-------
None.
"""
fig1, ax1 = plt.subplots(figsize=(6, 6))
ax1.pcolormesh(X, Y, hmi_con_pos_c, cmap='bwr', vmin=-1, vmax=1)
ax1.set_title('Positive Mask Convolution', font="Times New Roman",
fontsize=22, fontweight='bold')
ax1.set_xlim([xarr_Mm[200], xarr_Mm[600]])
ax1.set_ylim([yarr_Mm[200], yarr_Mm[600]])
ax1.set_xlabel('Horizontal Distance from Image Center [Mm]', fontsize=17)
ax1.set_ylabel('Vertical Distance from Image Center [Mm]', fontsize=17)
ax1.tick_params(labelsize=15)
fig2, ax2 = plt.subplots(figsize=(6, 6))
ax2.tick_params(labelsize=15)
ax2.pcolormesh(X, Y, hmi_con_neg_c, cmap='bwr', vmin=-1, vmax=1)
ax2.set_xlabel('Horizontal Distance from Image Center [Mm]', fontsize=17)
ax2.set_ylabel('Vertical Distance from Image Center [Mm]', fontsize=17)
ax2.set_title('Negative Mask Convolution', font="Times New Roman",
fontsize=22, fontweight='bold')
ax2.set_xlim([xarr_Mm[xlim[0]], xarr_Mm[xlim[1]]])
ax2.set_ylim([yarr_Mm[ylim[0]], yarr_Mm[ylim[1]]])
fig3, ax3 = plt.subplots()
ax3.pcolormesh(X, Y, pil_mask_c)
ax3.set_title('Polarity Inversion Line Mask', font="Times New Roman",
fontsize=22, fontweight='bold')
ax3.tick_params(labelsize=15)
ax3.set_xlim([xarr_Mm[xlim[0]], xarr_Mm[xlim[1]]])
ax3.set_ylim([yarr_Mm[ylim[0]], yarr_Mm[ylim[1]]])
fig1.savefig(str(flnum) + '_pos_conv_mask.png')
fig2.savefig(str(flnum) + '_neg_conv_mask.png')
fig3.savefig(str(flnum) + '_PIL_conv_mask.png')
return None
def pil_poly_plot(X, Y, pil_mask_c, hmi_dat, ivs, dvs, conv_f, xarr_Mm,
yarr_Mm, flnum, xlim=[200, 600], ylim=[200, 600]):
"""
Plotting of PIL polynomial over mask.
Parameters
----------
X : list
Meshgrid of x values for image coordinates.
Y : list
Meshgrid of y values for image coordinates.
pil_mask_c : list
PIL mask.
hmi_dat : list
HMI data image for flare in question.
ivs : list
x-coordinates for PIL polynomial.
dvs : list
y-coordinates for PIL polynomial.
conv_f : float
Conversion factor from pixels to Mm.
xarr_Mm : list
x-coordinates, in megameters.
yarr_Mm : list
y-coordinates, in megameters.
flnum : int
RibbonDB index of flare in question.
xlim : list, optional
Limts of x-coordinates to plot. The default is [200,600].
ylim : list, optional
Limits of y-coordinates to plot. The default is [200,600].
Returns
-------
None.
"""
# Generate the plot
fig, ax = plt.subplots(figsize=(7, 10))
# show color mesh
ax.pcolormesh(X, Y, pil_mask_c, cmap='hot')
# plot the line
ax.scatter((ivs - 400) * conv_f, (dvs - 400) * conv_f, color='c', s=1)
hmik = hmi_dat / 1000
plt.contour(X, Y, hmik, levels=[-3, -1.8, -.6, .6, 1.8, 3],
cmap='seismic')
ax.set_xlim([xarr_Mm[xlim[0]], xarr_Mm[xlim[1]]])
ax.set_ylim([yarr_Mm[ylim[0]], yarr_Mm[ylim[1]]])
ax.set_xticks([-80, -60, -40, -20, 0, 20, 40, 60, 80])
ax.set_yticks([-80, -60, -40, -20, 0, 20, 40, 60, 80])
cbar = plt.colorbar(orientation='horizontal')
tick_font_size = 15
ax.tick_params(labelsize=tick_font_size)
cbar.ax.tick_params(labelsize=tick_font_size)
ax.set_xlabel('Horizontal Distance from Image Center [Mm]', fontsize=15)
ax.set_ylabel('Vertical Distance from Image Center [Mm]', fontsize=15)
cbar.ax.set_xlabel('HMI Contours [kG]', font='Times New Roman',
fontsize=17, fontweight='bold')
ax.set_title('PIL Mask and Polynomial', font='Times New Roman',
fontsize=25, fontweight='bold')
fig.savefig(str(flnum) + '_pilpolyplot.png')
return None
def ribbon_sep_plot(dist_pos, dist_neg, times, flnum, pltstrt, dt1600):
"""
Plot ribbon separation values throughout flare.
Parameters
----------
dist_pos : list
Separation values, positive ribbon.
dist_neg : list
Separation values, negative ribbon.
times : list
Times corresponding to each AIA time step.
flnum : int
Flare index from RibbonDB database.
pltstrt : int
Index for where to start displaying separation values.
Returns
-------
None.
"""
timelab = range(0, 24 * len(times), 24)
fig, [ax1, ax2] = plt.subplots(2, 1, figsize=(13, 15))
# Plot separation, positive ribbon
ax1.plot(timelab[pltstrt:], dist_pos[pltstrt:], '-+', c='red',
markersize=10, label='median')
ax1.legend(fontsize=15)
ax1.grid()
s = str(dt1600[0])
ax1.set_xlabel('Time [s since ' + s[5:-7] + ']', font='Times New Roman',
fontsize=15)
ax1.set_ylabel('Cartesian Pixel Distance', font='Times New Roman',
fontsize=15)
ax1.set_title('Positive Ribbon Separation', font='Times New Roman',
fontsize=25)
# Plot separation, negative ribbon
ax2.plot(timelab[pltstrt:], dist_neg[pltstrt:], '-+', c='red',
markersize=10, label='median')
ax2.legend(fontsize=15)
ax2.grid()
ax2.set_xlabel('Time [s since ' + s[5:-7] + ']', font='Times New Roman',
fontsize=15)
ax2.set_ylabel('Cartesian Pixel Distance', font='Times New Roman',
fontsize=15)
ax2.set_title('Negative Ribbon Separation', font='Times New Roman',
fontsize=25)
fig.savefig(str(flnum) + 'sep_raw_plt.png')
return None
def ribbon_elon_plot(lens_pos, lens_neg, times, pltstrt, flnum, dt1600):
"""
Plot ribbon elongation values throughout flare.
Parameters
----------
dist_pos : list
Elongation values, positive ribbon.
dist_neg : list
Elongation values, negative ribbon.
times : list
Times corresponding to each AIA time step.
flnum : int
Flare index from RibbonDB database.
pltstrt : int
Index for where to start displaying elongation values.
dt1600 : list
Array of datetimes for SDO/AIA 1600 Angstrom data
Returns
-------
None.
"""
timelab = range(0, 24 * len(times), 24)
fig, ax1 = plt.subplots(figsize=(13, 7))
# Plot elongation, positive ribbon
ax1.plot(timelab[pltstrt:], lens_pos[pltstrt:], '-+', c='red',
markersize=10, label='+ Ribbon')
# Plot elongation, negative ribbon
ax1.plot(timelab[pltstrt:], lens_neg[pltstrt:], '-+', c='blue',
markersize=10, label='- Ribbon')
ax1.legend(fontsize=15)
ax1.grid()
s = str(dt1600[0])
ax1.set_xlabel('Time [s since ' + s[5:-7] + ']', font='Times New Roman',
fontsize=17)
ax1.set_ylabel('Cartesian Pixel Distance', font='Times New Roman',
fontsize=17)
ax1.set_title('Ribbon Elongation', font='Times New Roman', fontsize=25)
fig.savefig(str(flnum) + 'elon_raw_plt.png')
return None
def elon_period_plot(dpos_len, dneg_len, times, times1600, lens_pos_Mm,
lens_neg_Mm, flnum, elonperiod_start_neg,
elonperiod_start_pos, elonperiod_end_neg,
elonperiod_end_pos, indstart=1):
"""
Elongation plotting, with periods of extended elongation included.
Parameters
----------
dpos_len : list
Time derivative of positive ribbon elongation.
dneg_len : list
Time derivative of negative ribbon elongation.
times : list
Times corresponding to each AIA time step.
times1600 : list
Times corresponding to selected flare from 1600 Angstrom data.
lens_pos_Mm : list
Perpendicular extent of positive ribbon for each time step, in Mm.
lens_neg_Mm : list
Parallel extent of positive ribbon for each time step in Mm.
flnum : int
Flare index from RibbonDB database.
elonperiod_start_pos : list
Determined start times for elongation in positive ribbon.
elonperiod_end_pos : list
Determined end times for elongation in positive ribbon.
elonperiod_start_neg : list
Determined start times for elongation in negative ribbon.
elonperiod_end_neg : list
Determined end times for elongation in negative ribbon.
indstart : int
Start index for plotting. The default is 1.
Returns
-------
None.
"""
timelab = np.linspace(0, 24 * len(times), len(times))
fig, [ax1, ax2, ax3] = plt.subplots(3, 1, figsize=(13, 20))
ax3.plot(timelab[indstart:-1], dpos_len[indstart:], '-+', c='red',
markersize=10, label='+ Ribbon')
ax3.plot(timelab[indstart:-1], dneg_len[indstart:], '-+', c='blue',
markersize=10, label='- Ribbon')
ax3.legend(fontsize=15)
ax3.grid()
s = str(times1600[0])
ax3.set_xlabel('Time [s since ' + s[2:13] + ', ' + s[13:-5] + ']',
font='Times New Roman', fontsize=17)
ax3.set_ylabel('Elongation Rate [Mm/sec]', font='Times New Roman',
fontsize=17)
ax3.set_title('Ribbon Elongation Rate', font='Times New Roman',
fontsize=25)
ax1.plot(timelab[indstart:-1], lens_pos_Mm[indstart:-1], '-o', c='red',
markersize=6, label='mean')
ax2.plot(timelab[indstart:-1], lens_neg_Mm[indstart:-1], '-o', c='blue',
markersize=6, label='mean')
ax1.grid()
ax1.set_ylabel('Distance [Mm]', font='Times New Roman', fontsize=17)
ax1.set_title('Ribbon Elongation, Positive Ribbon',
font='Times New Roman', fontsize=25)
ax2.set_ylabel('Distance [Mm]', font='Times New Roman', fontsize=17)
ax2.set_title('Ribbon Elongation, Negative Ribbon',
font='Times New Roman', fontsize=25)
ax2.grid()
for i, j in zip(elonperiod_start_pos, elonperiod_end_pos):
ax1.axvline(timelab[i], c='green')
ax1.axvline(timelab[j], c='red')
ax1.axvspan(timelab[i], timelab[j], alpha=0.5, color='pink')
for k, l in zip(elonperiod_start_neg, elonperiod_end_neg):
ax2.axvline(timelab[k], c='green')
ax2.axvline(timelab[l], c='red')
ax2.axvspan(timelab[k], timelab[l], alpha=0.5, color='cyan')
fig.savefig(str(flnum) + 'elon_timing_plt.png')
return None
def sep_period_plot(dpos_dist, dneg_dist, times, distpos_Mm, distneg_Mm, flnum,
sepperiod_start_pos, sepperiod_end_pos,
sepperiod_start_neg, sepperiod_end_neg, indstrt):
"""
Separation plots, including periods of extended perpendicular motion.
Parameters
----------
dpos_dist : list
Time derivative of positive ribbon separation.
dneg_dist : list
Time derivative of negative ribbon separation.
times : arr
Array of times for the flare, from AIA datafile.
distpos_Mm : list
Perpendicular extent of negative ribbon for each time step, in Mm.
distneg_Mm : list
Parallel extent of positive ribbon for each time step in Mm.
flnum : int
Flare index from RibbonDB database.
sepperiod_start_pos : list
Start times for periods of extended separation, positive ribbon.
sepperiod_end_pos : list
End times for periods of extended separation, positive ribbon.
sepperiod_start_neg : list
Start times for periods of extended separation, negative ribbon.
sepperiod_end_neg : list
End times for periods of extended separation, negative ribbon.
indstart : int
Start index for plotting. The default is 1.
Returns
-------
None.
"""
timelab = range(0, 24 * len(times), 24)
fig, [ax1, ax2, ax3] = plt.subplots(3, 1, figsize=(13, 20))
ax3.plot(timelab[indstrt:-1], scipy.signal.medfilt(dpos_dist[indstrt:],
kernel_size=3), '-+',
c='red', markersize=10, label='+ Ribbon')
ax3.plot(timelab[indstrt:-1], scipy.signal.medfilt(dneg_dist[indstrt:],
kernel_size=3), '-+',
c='blue', markersize=10, label='- Ribbon')
ax3.legend(fontsize=15)
ax3.grid()
s = str(times[0])
ax3.set_xlabel('Time [s since ' + s[2:12] + ', ' + s[13:-5] + ']',
font='Times New Roman', fontsize=17)
ax3.set_ylabel('Separation Rate [Mm/sec]', font='Times New Roman',
fontsize=17)
ax3.set_title('Ribbon Separation Rate', font='Times New Roman',
fontsize=25)
ax1.plot(timelab[indstrt:-1], distpos_Mm[indstrt:-1], '-o', c='red',
markersize=6, label='mean')
ax2.plot(timelab[indstrt:-1], distneg_Mm[indstrt:-1], '-o', c='blue',
markersize=6, label='mean')
ax1.grid()
ax1.set_ylabel('Distance [Mm]', font='Times New Roman', fontsize=17)
ax1.set_title('Ribbon Separation, Positive Ribbon',
font='Times New Roman', fontsize=25)
ax2.set_ylabel('Distance [Mm]', font='Times New Roman', fontsize=17)
ax2.set_title('Ribbon Separation, Negative Ribbon',
font='Times New Roman', fontsize=25)
ax2.grid()
for i, j in zip(sepperiod_start_pos, sepperiod_end_pos):
ax1.axvline(timelab[i], c='green')
ax1.axvline(timelab[j], c='red')
ax1.axvspan(timelab[i], timelab[j], alpha=0.5, color='pink')
for k, l in zip(sepperiod_start_neg, sepperiod_end_neg):
ax2.axvline(timelab[k], c='green')
ax2.axvline(timelab[l], c='red')
ax2.axvspan(timelab[k], timelab[l], alpha=0.5, color='cyan')
fig.savefig(str(flnum) + 'sep_timing_plt.png')
return None
def flux_rec_mod_process(sav_data, dt1600, pos1600, neg1600):
"""
Process data in order to produce reconnection flux and rate arrays in later
functions.
Parameters
----------
sav_data : AttrDict
Dictionary containing all of the saved parameters in the HMI file.
dt1600 : list
1600 Angstrom datetime values for flare.
pos1600 : list
Summed pixel numbers in positive ribbon for each time step.
neg1600 : list
Summed pixel numbers in negative ribbon for each time step.
Returns
-------
hmi : list
HMI data from file.
aia8_pos : list
Cumulative positive ribbon pixels for AIA, c=8.
aia8_neg : list
Cumulative negative ribbon pixels for AIA, c=8.
aia8_inst_pos : list
Instantaneous positive ribbon pixels for AIA, c=8.
aia8_inst_neg : list
Instantaneous negative ribbon pixels for AIA, c=8.
peak_pos : list
Time of peak counts from 1600 Angstrom data in positive ribbon.
peak_neg : list
Time of peak counts from 1600 Angstrom data in negative ribbon.
"""
# Process data for reconnection flux, reconnection rate,
# rise phase exponential modeling
hmi = sav_data.hmi
aia8 = sav_data.pos8
aia8_inst = sav_data.inst_pos8
aia8_pos = np.zeros(np.shape(aia8))
aia8_neg = np.zeros(np.shape(aia8))
aia8_inst_pos = np.zeros(np.shape(aia8_inst))
aia8_inst_neg = np.zeros(np.shape(aia8_inst))
xsh, ysh, zsh = aia8.shape
hmi_dat = sav_data.hmi
for i, j, k in np.ndindex(aia8.shape):
if aia8[i, j, k] == 1 and hmi_dat[j, k] > 0:
aia8_pos[i, j, k] = 1
elif aia8[i, j, k] == 1 and hmi_dat[j, k] < 0:
aia8_neg[i, j, k] = 1
for i, j, k in np.ndindex(aia8.shape):
if aia8_inst[i, j, k] == 1 and hmi_dat[j, k] > 0:
aia8_inst_pos[i, j, k] = 1
elif aia8_inst[i, j, k] == 1 and hmi_dat[j, k] < 0:
aia8_inst_neg[i, j, k] = 1
peak_pos = dt1600[np.argmax(pos1600)]
peak_neg = dt1600[np.argmax(neg1600)]
return hmi, aia8_pos, aia8_neg, aia8_inst_pos, aia8_inst_neg, peak_pos, \
peak_neg
def inst_flux_process(aia8_inst_pos, aia8_inst_neg, flnum, conv_f, hmi,
dt1600, peak_pos, peak_neg):
"""
Find and plot instantaneous reconnection flux values.
Parameters
----------
aia8_inst_pos : list
Instantaneous positive ribbon pixels for AIA, c=8.
aia8_inst_neg : list
Instantaneous negative ribbon pixels for AIA, c=8.
flnum : int
Flare index from RibbonDB database.
conv_f : float
Conversion factor from pixels to Mm.
hmi : list
HMI data for flare in question.
dt1600 : list
Datetime values for 1600 Angstrom data.
peak_pos : list
Time of peak counts from 1600 Angstrom data in positive ribbon.
peak_neg : list
Time of peak counts from 1600 Angstrom data in negative ribbon.
Returns
-------
rec_flux_pos_inst : list
Instantaneous reconnection flux, positive ribbon.
rec_flux_neg_inst : list
Instantaneous reconnection flux, negative ribbon.
pos_pix_inst : list
Instantaneous ribbon pixel counts, positive ribbon.
neg_pix_inst : list
Instantaneous ribbon pixel counts, negative ribbon.
ds2 : float
Conversion factor for 2D size of pixel.
"""
rec_flux_pos_inst = np.zeros(len(aia8_inst_pos))
rec_flux_neg_inst = np.zeros(len(aia8_inst_neg))
pos_area_pix_inst = np.zeros(len(aia8_inst_pos))
neg_area_pix_inst = np.zeros(len(aia8_inst_neg))
pos_pix_inst = np.zeros(len(aia8_inst_pos))
neg_pix_inst = np.zeros(len(aia8_inst_neg))
conv_f_cm = conv_f * 1e6 * 100 # conversion factor in cm
ds2 = conv_f_cm**2 # for each pixel grid
for i in range(len(aia8_inst_pos)):
pos_mask_inst = aia8_inst_pos[i, :, :]
neg_mask_inst = aia8_inst_neg[i, :, :]
pos_area_pix_inst[i] = np.sum(pos_mask_inst)
neg_area_pix_inst[i] = np.sum(neg_mask_inst)
hmi_pos_inst = pos_mask_inst*hmi # in G
hmi_neg_inst = neg_mask_inst*hmi # in G
pos_pix_inst[i] = np.sum(hmi_pos_inst)
neg_pix_inst[i] = np.sum(hmi_neg_inst)
rec_flux_pos_inst[i] = np.sum(hmi_pos_inst) * ds2
rec_flux_neg_inst[i] = np.sum(hmi_neg_inst) * ds2
fig, ax = plt.subplots(figsize=(10, 10))
ax.scatter(dt1600, rec_flux_pos_inst, c='red', label='+')
ax.scatter(dt1600, rec_flux_neg_inst, c='blue', label='-')
ax.grid()
ax.set_xlabel('Time', font='Times New Roman', fontsize=20)
ax.axvline(peak_pos, c='red', linestyle=':')
ax.axvline(peak_neg, c='blue', linestyle='-.')
ax.set_ylabel('Reconnection Flux [Mx]', font='Times New Roman',
fontsize=20)
ax.set_title('Reconnection Flux', font='Times New Roman', fontsize=25)
ax.legend()
fig.savefig(str(flnum) + '_inst_flx.png')
return rec_flux_pos_inst, rec_flux_neg_inst, pos_pix_inst, neg_pix_inst,\
ds2
def cumul_flux_process(aia8_pos, aia8_neg, conv_f, flnum, peak_pos, peak_neg,
hmi, dt1600):
"""
Determine reconnection flux from cumulative ribbon masks.
Parameters
----------
aia8_pos : list
Cumulative positive ribbon pixels for AIA, c=8.
aia8_neg : list
Cumulative negative ribbon pixels for AIA, c=8.
flnum : int
Flare index from RibbonDB database.
conv_f : float
Conversion factor from pixels to Mm.
peak_pos : list
Time of peak counts from 1600 Angstrom data in positive ribbon.
peak_neg : list
Time of peak counts from 1600 Angstrom data in negative ribbon.
hmi : list
HMI data for flare in question.
dt1600 : list
Datetime values for 1600 Angstrom data.
Returns
-------
rec_flux_pos : list
Cumulative reconnection flux, positive ribbon.
rec_flux_neg : list
Cumulative reconnection flux, negative ribbon.
pos_pix : list
Ribbon counts of cumulative masks, positive ribbon.
neg_pix : list
Ribbon counts of cumulative masks, negative ribbon.
pos_area_pix : list
Positive cumulative ribbon area.
neg_area_pix : list
Negative cumulative ribbon area.
ds2 : float
Conversion factor for 2D size of pixel.
pos_area : list
Positive cumulative ribbon area, Mm.
neg_area : list
Negative cumulative ribbon area, Mm.
"""
rec_flux_pos = np.zeros(len(aia8_pos))
rec_flux_neg = np.zeros(len(aia8_neg))
pos_area_pix = np.zeros(len(aia8_pos))
neg_area_pix = np.zeros(len(aia8_neg))
pos_pix = np.zeros(len(aia8_pos))
neg_pix = np.zeros(len(aia8_neg))
pos_area = pos_area_pix
neg_area = neg_area_pix
conv_f_cm = conv_f * 1e6 * 100 # conversion factor in cm
ds2 = conv_f_cm**2
for i in range(len(aia8_pos)):
pos_mask = aia8_pos[i, :, :]
neg_mask = aia8_neg[i, :, :]
pos_area_pix[i] = np.sum(pos_mask)
neg_area_pix[i] = np.sum(neg_mask)
hmi_pos = pos_mask*hmi # in G
hmi_neg = neg_mask*hmi # in G
pos_pix[i] = np.sum(hmi_pos)
neg_pix[i] = np.sum(hmi_neg)
rec_flux_pos[i] = np.sum(hmi_pos) * ds2
rec_flux_neg[i] = np.sum(hmi_neg) * ds2
fig, ax = plt.subplots(figsize=(10, 10))
ax.scatter(dt1600, rec_flux_pos, c='red', label='+')
ax.scatter(dt1600, rec_flux_neg, c='blue', label='-')
ax.grid()
ax.set_xlabel('Time', font='Times New Roman', fontsize=20)
ax.axvline(peak_pos, c='red', linestyle=':')
ax.axvline(peak_neg, c='blue', linestyle='-.')
ax.set_ylabel('Reconnection Flux [Mx]', font='Times New Roman',
fontsize=20)
ax.set_title('Reconnection Flux', font='Times New Roman', fontsize=25)
ax.legend()
fig.savefig(str(flnum) + '_cumul_flx.png')
return rec_flux_pos, rec_flux_neg, pos_pix, neg_pix, pos_area_pix, \
neg_area_pix, ds2, pos_area, neg_area
def exp_curve_fit(exp_ind, exp_ind_area, rec_flux_pos, rec_flux_neg,
exponential, exponential_neg, pos_area, neg_area):
"""
Fit exponential curve to flux and ribbon area curves for each ribbon.
Parameters
----------
exp_ind : int
The index where to stop the exponential fitting.
rec_flux_pos : list
Cumulative reconnection flux, positive ribbon.
rec_flux_neg : list
Cumulative reconnection flux, negative ribbon.
exponential : function
Exponential function definition.
exponential_neg : function
Negative exponential function definition.
pos_area : list
Positive cumulative ribbon area, Mm.
neg_area : list
Negative cumulative ribbon area, Mm.
Returns
-------
poptposflx : list
Fitting parameters, positive reconnection flux.
pcovposflx : list
Covariance matrix entries, positive reconnection flux.
poptnegflx : list
Fitting parameters, negative reconnection flux.
pcovnegflx : list
Covariance matrix entries, negative reconnection flux.
poptpos : list
Fitting parameters, positive ribbon area.
poptneg : list
Fitting parameters, negative ribbon area.
pcovpos : list
Covariance matrix entries, positive ribbon area.
pcovneg : list
Covariance matrix entries, negative ribbon area.
rise_pos_flx : list
Rise phase reconnection flux, positive ribbon.
rise_neg_flx : list
Rise phase reconnection flux, negative ribbon.
"""
# Fit only from start to specified exp_ind; usually the index corresponding
# to the peak of the light curve, but sometimes not.
rise_pos_flx = rec_flux_pos[0:exp_ind]
rise_neg_flx = rec_flux_neg[0:exp_ind]
rise_pos_area = pos_area[0:exp_ind_area]
rise_neg_area = neg_area[0:exp_ind_area]
# Fitting to exponential and negative exponential models
poptposflx, pcovposflx = \
scipy.optimize.curve_fit(exponential, range(0, len(rise_pos_flx)),
rise_pos_flx)
poptnegflx, pcovnegflx = \
scipy.optimize.curve_fit(exponential_neg, range(0, len(rise_neg_flx)),
rise_neg_flx)
poptpos, pcovpos = \
scipy.optimize.curve_fit(exponential, range(0, len(rise_pos_area)),
rise_pos_area)
poptneg, pcovneg = \
scipy.optimize.curve_fit(exponential, range(0, len(rise_neg_area)),
rise_neg_area)
return poptposflx, pcovposflx, poptnegflx, pcovnegflx, poptpos, poptneg, \
pcovpos, pcovneg, rise_pos_flx, rise_neg_flx
def exp_curve_plt(dt1600, rec_flux_pos, rec_flux_neg, rise_pos_flx,
rise_neg_flx, peak_pos, peak_neg, exp_ind, ds2, exponential,
exponential_neg, poptposflx, poptnegflx, flnum):
"""
Plotting of exponential curve with fit.
Parameters
----------
dt1600 : list
Datetime values for 1600 Angstrom data.
rec_flux_pos : list
Cumulative reconnection flux, positive ribbon.
rec_flux_neg : list
Cumulative reconnection flux, negative ribbon.
rise_pos_flx : list
Rise phase reconnection flux, positive ribbon.
rise_neg_flx : list
Rise phase reconnection flux, negative ribbon.
peak_pos : list
Time of peak counts from 1600 Angstrom data in positive ribbon.
peak_neg : list
Time of peak counts from 1600 Angstrom data in negative ribbon.
exp_ind : int
The index where to stop the exponential fitting.
ds2 : float
Conversion factor for 2D size of pixel.
exponential : function
Exponential function definition.
exponential_neg : function
Negative exponential function definition.
poptposflx : list
Fitting parameters, positive reconnection flux.
poptnegflx : list
Fitting parameters, negative reconnection flux.
flnum : int
Flare index from RibbonDB database.
Returns
-------
None.
"""
rise_pos_time = dt1600[0:exp_ind]
rise_neg_time = dt1600[0:exp_ind]
fig, ax = plt.subplots(figsize=(10, 10))
ax.scatter(dt1600, rec_flux_pos, c='red', label='+')
ax.scatter(dt1600, rec_flux_neg, c='blue', label='-')
ax.grid()
ax.set_xlabel('Time', font='Times New Roman', fontsize=20)
ax.axvline(peak_pos, c='red', linestyle=':')
ax.axvline(peak_neg, c='blue', linestyle='-.')
ax.set_ylabel('Reconnection Flux [Mx]',
font='Times New Roman', fontsize=20)
ax.set_title('Reconnection Flux', font='Times New Roman', fontsize=25)
ax.plot(rise_pos_time, ds2*exponential(range(0, len(rise_pos_flx)),
*poptposflx), 'r-',
label='Exponential Model, +')
ax.plot(rise_neg_time, ds2 * exponential_neg(range(0, len(rise_neg_flx)),
*poptnegflx), 'b-',
label='Exponential Model, -')
ax.axvline(dt1600[exp_ind])
ax.legend()
fig.savefig(str(flnum) + '_recflux_model.png')
# Now plot log-log of just the impulsive phase
fig2, [ax1, ax2] = plt.subplots(2, 1, figsize=(10, 20))
ax1.scatter((dt1600), np.log(rec_flux_pos), c='red')
ax2.scatter((dt1600), -np.log(- rec_flux_neg), c='blue')
ax1.grid()
ax2.grid()
ax1.set_xlabel('Time', font='Times New Roman', fontsize=20)
ax2.set_xlabel('Time', font='Times New Roman', fontsize=20)
ax1.plot((rise_pos_time),
np.log(ds2 * exponential(range(0, len(rise_pos_flx)),
*poptposflx)), 'r-',
label='Exponential Model, +')
ax2.plot((rise_neg_time),
-np.log(-ds2 * exponential_neg(range(0, len(rise_neg_flx)),
*poptnegflx)), 'b-',
label='Exponential Model, -')
ax1.set_ylabel(r'Rec. Flx [Mx]', font='Times New Roman', fontsize=20)
ax1.set_title('Reconnection Flux, Impulsive Phase',
font='Times New Roman', fontsize=25)
ax1.set_xlim(dt1600[0], dt1600[exp_ind])
ax1.legend(fontsize=15)
ax2.set_ylabel(r'Rec. Flx [Mx]', font='Times New Roman', fontsize=20)
ax2.set_title('Reconnection Flux, Impulsive Phase',
font='Times New Roman', fontsize=25)
ax2.set_xlim(dt1600[0], dt1600[exp_ind])
ax2.legend(fontsize=15)
fig2.savefig(str(flnum) + '_rec_impphase_model.png')
return None
def rib_area_plt(dt1600, poptpos, poptneg, flnum, pos_area_pix, neg_area_pix,
peak_pos, peak_neg, exp_ind):
"""
Plotting ribbon areas with fitted models.
Parameters
----------
dt1600 : list
Datetime values for 1600 Angstrom data.
poptpos : list
Fitting parameters, positive ribbon area.
poptneg : list
Fitting parameters, negative ribbon area.
flnum : int
Flare index from RibbonDB database.
pos_area_pix : list
Positive cumulative ribbon area.
neg_area_pix : list
Negative cumulative ribbon area.
peak_pos : list
Time of peak counts from 1600 Angstrom data in positive ribbon.
peak_neg : list
Time of peak counts from 1600 Angstrom data in negative ribbon.
exp_ind : int
The index where to stop the exponential fitting.
Returns
-------
None.
"""
# Cumulative
pos_area = pos_area_pix
neg_area = neg_area_pix
rise_pos_area = pos_area[0:exp_ind]
rise_neg_area = neg_area[0:exp_ind]
# Plot just the ribbon areas, c=8
fig, ax = plt.subplots(figsize=(10, 10))
ax.scatter(dt1600, pos_area, c='red', label='+')
ax.scatter(dt1600, neg_area, c='blue', label='-')
rise_pos_time = dt1600[0:exp_ind]
rise_neg_time = dt1600[0:exp_ind]
ax.grid()
ax.set_xlabel('Time', font='Times New Roman', fontsize=20)
ax.axvline(peak_pos, c='red', linestyle=':')
ax.axvline(peak_neg, c='blue', linestyle='-.')
ax.plot(rise_pos_time, exponential(range(0, len(rise_pos_area)), *poptpos),
'r-', label='Exponential Model, +')
ax.plot(rise_neg_time, exponential(range(0, len(rise_neg_area)), *poptneg),
'b-', label='Exponential Model, -')
ax.set_ylabel('Ribbon Area [cm^2]', font='Times New Roman', fontsize=20)
ax.set_title('Ribbon Area', font='Times New Roman', fontsize=25)
# If end of modeling region is before end of impulsive phase
ax.axvline(dt1600[exp_ind])
ax.legend()
fig.savefig(str(flnum) + '_ribarea_model.png')
# Just impulsive region, with log-log
fig2, [ax1, ax2] = plt.subplots(2, 1, figsize=(10, 20))
ax1.scatter((dt1600), np.log(pos_area), c='red')
ax2.scatter((dt1600), np.log(neg_area), c='blue')
ax1.grid()
ax2.grid()
ax1.set_xlabel('Time', font='Times New Roman', fontsize=20)
ax2.set_xlabel('Time', font='Times New Roman', fontsize=20)
ax1.plot((rise_pos_time), np.log(exponential(range(0, len(rise_pos_area)),
*poptpos)), 'r-',
label='Exponential Model, +')
ax2.plot((rise_neg_time), np.log(exponential(range(0, len(rise_neg_area)),
*poptneg)), 'b-',
label='Exponential Model, -')
ax1.set_ylabel('Ribbon Area [cm^2]', font='Times New Roman',
fontsize=20)
ax1.set_title('Ribbon Area, Impulsive Phase', font='Times New Roman',
fontsize=25)
ax1.set_xlim(dt1600[0], dt1600[exp_ind])
ax1.legend(fontsize=15)
ax2.set_ylabel('Ribbon Area [cm^2]', font='Times New Roman', fontsize=20)
ax2.set_title('Ribbon Area, Impulsive Phase', font='Times New Roman',
fontsize=25)
ax2.set_xlim(dt1600[0], dt1600[exp_ind])
ax2.legend(fontsize=15)
fig2.savefig(str(flnum) + '_impphase_model.png')
return None
def rec_rate(rec_flux_pos, rec_flux_neg, dn1600, dt1600, peak_pos, peak_neg,
flnum):
"""
Reconnection rate determination from reconnection flux values.
Parameters
----------
rec_flux_pos : list
Cumulative reconnection flux, positive ribbon.
rec_flux_neg : list
Cumulative reconnection flux, negative ribbon.
dn1600 : list
Datenum values for 1600 Angstrom data.
dt1600 : list
Datetime values for 1600 Angstrom data.
peak_pos : list
Time of peak counts from 1600 Angstrom data in positive ribbon.
peak_neg : list
Time of peak counts from 1600 Angstrom data in negative ribbon.
flnum : int
Flare index from RibbonDB database.
Returns
-------
rec_rate_pos : list
Reconnection rates for positive ribbon.
rec_rate_neg : list
Reconnection rates for negative ribbon.
"""
rec_rate_pos = (np.diff(rec_flux_pos) / (dn1600[1] - dn1600[0]))\
/ 3600 / 24 # Mx/s
rec_rate_neg = (np.diff(rec_flux_neg) / (dn1600[1] - dn1600[0]))\
/ 3600 / 24 # Mx/s
fig, ax = plt.subplots(figsize=(10, 10))
ax.scatter(dt1600[1:], rec_rate_pos, c='red', label='+')
ax.scatter(dt1600[1:], rec_rate_neg, c='blue', label='-')
ax.grid()
ax.set_xlabel('Time', font='Times New Roman', fontsize=20)
ax.axvline(peak_pos, c='red', linestyle=':')
ax.axvline(peak_neg, c='blue', linestyle='-.')
ax.set_ylabel('Reconnection Rate [Mx/s]', font='Times New Roman',
fontsize=20)
ax.set_title('Reconnection Rate', font='Times New Roman', fontsize=25)
fig.savefig(str(flnum) + '_recrate.png')
return rec_rate_pos, rec_rate_neg
def shear_ribbon_isolation(aia8_neg, aia8_pos, med_x, med_y,
pt_range=[-2, -1, 1, 2], poscrit=6, negcrit=6,
negylow=400, negyhi=0, negxlow=300,
negxhi=400, posylow=0, posyhi=0,
posxlow=350, posxhi=0):
"""
Isolates ribbons with the shear algorithm in mind.
Parameters
----------
aia8_neg : arr
Negative polarity array from AIA.
aia8_pos : arr
Positive polarity array from AIA
med_x : int
Median of x dimension.
med_y : int
Median of y dimension.
pt_range : arr, optional
Range of points to search around each pixel. The default is
[-2,-1,1,2].
poscrit : int, optional
Minimum number of positive points a pixel must be surrounded by in
order to be counted. The default is 6.
negcrit : int, optional
Minimum number of negative points a pixel must be surrounded by in
order to be counted. The default is 6.
negylow : int, optional
Low y-dimension of image to search through, negative ribbon. The
default is 400.
negyhi : int, optional
High y-dimension of image to search through, negative ribbon.
The default is 0.
negxlow : int, optional
Low x-dimension of image to search through, negative ribbon. The
default is 300.
negxhi : int, optional
High x-dimension of image to search through, negative ribbon.
The default is 400.
posylow : int, optional
Low y-dimension of image to search through, positive ribbon. The
default is 0.
posyhi : int, optional
High y-dimension of image to search through, positive ribbon. The
default is 0.
posxlow : int, optional
Low x-dimension of image to search through, positive ribbon. The
default is 350.
posxhi : int, optional
High x-dimension of image to search through, positive ribbon. The
default is 0.
Returns
-------
aia_neg_rem_shear : arr
AIA 1600 Angstrom images of negative ribbon, with spur pixels removed.
aia_pos_rem_shear : arr
AIA 1600 Angstrom images of positive ribbon, with spur pixels removed.
"""
neg_rem_shear = np.zeros(np.shape(aia8_pos))
pos_rem_shear = np.zeros(np.shape(aia8_neg))
aia_pos_rem_shear = np.zeros(np.shape(aia8_pos))
aia_neg_rem_shear = np.zeros(np.shape(aia8_neg))
negylow = int(round(med_y) - 100)
negyhi = int(round(med_y) + 100)
negxlow = int(round(med_x) - 100)
negxhi = int(round(med_y) + 100)
posylow = int(round(med_y) - 100)
posyhi = int(round(med_y) + 100)
posxlow = int(round(med_x) - 100)
posxhi = int(round(med_y) + 100)
# Search through negative ribbon and remove spur points.
for i in range(len(neg_rem_shear)):
for j in range(len(neg_rem_shear[0])-2):
for k in range(len(neg_rem_shear[1])-2):
n = 0
if aia8_neg[i, j, k] == 1:
for h in pt_range:
for m in pt_range:
if aia8_neg[i, j+h, k+m] == 1:
n = n + 1
if (n > negcrit):
neg_rem_shear[i, j, k] = 1
else:
neg_rem_shear[i, j, k] = 0
else:
neg_rem_shear[i, j, k] = 0
# Search through positive ribbon and remove spur points.
for i in range(len(pos_rem_shear)):
for j in range(len(pos_rem_shear[0])-2):
for k in range(len(pos_rem_shear[1])-2):
n = 0
if aia8_pos[i, j, k] == 1:
for h in pt_range:
for m in pt_range:
if aia8_pos[i, j+h, k+m] == 1:
n = n + 1
if (n > poscrit):
pos_rem_shear[i, j, k] = 1
else:
pos_rem_shear[i, j, k] = 0
else:
pos_rem_shear[i, j, k] = 0
# Limit ribbons to within desired range for analysis
for i in range(len(aia8_neg)):
for j in range(negylow, negyhi):
for k in range(negxlow, negxhi):
if neg_rem_shear[i, j, k] > 0:
aia_neg_rem_shear[i, j, k] = 1
for i in range(len(aia8_pos)):
for j in range(posylow, posyhi):
for k in range(posxlow, posxhi):
if pos_rem_shear[i, j, k] > 0:
aia_pos_rem_shear[i, j, k] = 1
return aia_neg_rem_shear, aia_pos_rem_shear
# find left and rightmost pixels
def leftrightshear(aia_pos_rem_shear, aia_neg_rem_shear):
"""
Finds leftmost and rightmost pixels of each ribbon.
Parameters
----------
aia_pos_rem_shear : arr
Images of isolated positive ribbons from above function.
aia_neg_rem_shear : arr
Images of isolated negative ribbons from above function.
Returns
-------
lr_coord_neg_shear : arr
Left and right coordinates of negative ribbon.
lr_coord_pos_shear : arr
Left and right coordinates of positive ribbon..
"""
lr_coord_pos_shear = np.zeros([len(aia_pos_rem_shear), 4])
lr_coord_neg_shear = np.zeros([len(aia_neg_rem_shear), 4])
# Find left and rightmost pixels for positive ribbon in each frame
for i in range(len(aia_pos_rem_shear)):
left_x = 0
left_y = 0
right_x = 0
right_y = 0
for k in range(len(aia_pos_rem_shear[1])):
for j in range(len(aia_pos_rem_shear[0])):
if aia_pos_rem_shear[i, j, k] == 1:
left_x = k
left_y = j
break
if left_x != 0:
break
for k in range(len(aia_pos_rem_shear[1])-1, 0, -1):
for j in range(len(aia_pos_rem_shear[0])):
if aia_pos_rem_shear[i, j, k] == 1:
right_x = k
right_y = j
break
if right_x != 0:
break
lr_coord_pos_shear[i, :] = [left_x, left_y, right_x, right_y]
# The same, for negative ribbon
for i in range(len(aia_neg_rem_shear)):
left_x = 0
left_y = 0
right_x = 0
right_y = 0
for k in range(len(aia_neg_rem_shear[1])):
for j in range(len(aia_neg_rem_shear[0])):
if aia_neg_rem_shear[i, j, k] == 1:
left_x = k
left_y = j
break
if left_x != 0:
break
for k in range(len(aia_neg_rem_shear[1])-1, 0, -1):
for j in range(len(aia_neg_rem_shear[0])):
if aia_neg_rem_shear[i, j, k] == 1:
right_x = k
right_y = j
break
if right_x != 0:
break
lr_coord_neg_shear[i, :] = [left_x, left_y, right_x, right_y]
return lr_coord_neg_shear, lr_coord_pos_shear
def sheardists(lr_coord_pos_shear, lr_coord_neg_shear, ivs_sort, dvs_sort):
"""
Find the position of the PIL nearest to the extremes of each ribbon.
Parameters
----------
lr_coord_pos_shear : arr
Left and rightmost pixels for the positive ribbon.
lr_coord_neg_shear : arr
Left and rightmost pixels for negative ribbon.
ivs_sort : arr
Independent variable indices for PIL.
dvs_sort : arr
Dependent variable indices for PIL.
Returns
-------
pil_right_near_pos_shear : arr
Indices of PIL point nearest positive ribbon, rightmost extreme.
pil_left_near_pos_shear : arr
Indices of PIL points nearest positive ribbon, leftmost extreme.
pil_right_near_neg_shear : arr
Indices of PIL points nearest negative ribbon, rightmost extreme.
pil_left_near_neg_shear : arr
Indices of PIL points nearest negative ribbon, leftmost extreme.
"""
left_pil_dist_pos_shear = np.zeros(
[len(lr_coord_pos_shear), len(ivs_sort)])
right_pil_dist_pos_shear = np.zeros([len(lr_coord_pos_shear),
len(ivs_sort)])
pil_left_near_pos_shear = np.zeros([len(left_pil_dist_pos_shear), 3])
pil_right_near_pos_shear = np.zeros([len(right_pil_dist_pos_shear), 3])
left_pil_dist_neg_shear = np.zeros(
[len(lr_coord_neg_shear), len(ivs_sort)])
right_pil_dist_neg_shear = np.zeros([len(lr_coord_neg_shear),
len(ivs_sort)])
pil_left_near_neg_shear = np.zeros([len(left_pil_dist_neg_shear), 3])
pil_right_near_neg_shear = np.zeros([len(right_pil_dist_neg_shear), 3])
# Arrays of distances from all positive ribbon points to all PIL points, in
# each dimension
for i in range(len(lr_coord_pos_shear)):
left_x, left_y, right_x, right_y = lr_coord_pos_shear[i]
for j in range(len(ivs_sort)):
left_pil_dist_pos_shear[i, j] = np.sqrt((left_x - ivs_sort[j])**2 +
(left_y - dvs_sort[j])**2)
right_pil_dist_pos_shear[i, j] = np.sqrt((right_x - ivs_sort[j])**2
+ (right_y -
dvs_sort[j])**2)
# Find smallest distance, and the corresponding PIL point
for i in range(len(left_pil_dist_pos_shear)):
ind = np.where(left_pil_dist_pos_shear[i] ==
np.min(left_pil_dist_pos_shear[i]))
pil_left_near_pos_shear[i, :] = [ivs_sort[ind[0][0]],
dvs_sort[ind[0][0]], ind[0][0]]
for j in range(len(right_pil_dist_neg_shear)):
ind = np.where(right_pil_dist_neg_shear[j] ==
np.min(right_pil_dist_neg_shear[j]))
pil_right_near_neg_shear[j, :] = [ivs_sort[ind[0][0]],
dvs_sort[ind[0][0]], ind[0][0]]
# Arrays of distances from all negative ribbon points to all PIL points, in
# each dimension
for i in range(len(lr_coord_neg_shear)):
left_x, left_y, right_x, right_y = lr_coord_neg_shear[i]
for j in range(len(ivs_sort)):
left_pil_dist_neg_shear[i, j] = np.sqrt((left_x - ivs_sort[j])**2 +
(left_y - dvs_sort[j])**2)
right_pil_dist_neg_shear[i, j] = np.sqrt((right_x - ivs_sort[j])**2
+ (right_y -
dvs_sort[j])**2)
# Find smallest distance, and the corresponding PIL point
for i in range(len(left_pil_dist_neg_shear)):
ind = np.where(left_pil_dist_neg_shear[i] ==
np.min(left_pil_dist_neg_shear[i]))
pil_left_near_neg_shear[i, :] = [ivs_sort[ind[0][0]],
dvs_sort[ind[0][0]], ind[0][0]]
for j in range(len(right_pil_dist_pos_shear)):
ind = np.where(right_pil_dist_pos_shear[j] ==
np.min(right_pil_dist_pos_shear[j]))
pil_right_near_pos_shear[j, :] = [ivs_sort[ind[0][0]],
dvs_sort[ind[0][0]], ind[0][0]]
return pil_right_near_pos_shear, pil_left_near_pos_shear,\
pil_right_near_neg_shear, pil_left_near_neg_shear
def guidefieldlen(pil_right_near_pos_shear, pil_left_near_pos_shear,
pil_right_near_neg_shear, pil_left_near_neg_shear,
sortedpil):
"""
Find length along the axis of the guide field, the PIL-parallel component
of magnetic field.
Parameters
----------
pil_right_near_pos_shear : arr
Indices of PIL point nearest positive ribbon, rightmost extreme.
pil_left_near_pos_shear : arr
Indices of PIL points nearest positive ribbon, leftmost extreme.
pil_right_near_neg_shear : arr
Indices of PIL points nearest negative ribbon, rightmost extreme.
pil_left_near_neg_shear : arr
Indices of PIL points nearest negative ribbon, leftmost extreme.
sortedpil : arr
Independent and dependent values of PIL, sorted along PIL.
Returns
-------
guide_right : arr
Guide field strength, right-hand side.
guide_left : arr
Guide field strength, left-hand side.
"""
guide_left = []
guide_right = []
# Length of segment of PIL between the leftmost ends of positive and
# negative ribbons
for i in range(len(pil_left_near_pos_shear)):
posin = int(pil_left_near_pos_shear[i, 2])
negin = int(pil_left_near_neg_shear[i, 2])
if posin > negin:
curvei = sortedpil[negin:posin, :]
else:
curvei = -sortedpil[posin:negin, :]
guide_left.append(curve_length(curvei))
# Length of segment of PIL between rightmost ends of positive and negative
# ribbons
for i in range(len(pil_right_near_pos_shear)):
posin = int(pil_right_near_pos_shear[i, 2])
negin = int(pil_right_near_neg_shear[i, 2])
if posin > negin:
curvei = sortedpil[negin:posin, :]
else:
curvei = -sortedpil[posin:negin, :]
guide_right.append(curve_length(curvei))
return guide_right, guide_left
def guidefieldlen_alt(pil_right_near_pos_shear, pil_left_near_pos_shear,
pil_right_near_neg_shear, pil_left_near_neg_shear,
sortedpil, flag='posright'):
"""
Find length along the axis of the guide field, the PIL-parallel component
of magnetic field - alternative, tracking opposite ends of ribbons.
Parameters
----------
pil_right_near_pos_shear : arr
Indices of PIL point nearest positive ribbon, rightmost extreme.
pil_left_near_pos_shear : arr
Indices of PIL points nearest positive ribbon, leftmost extreme.
pil_right_near_neg_shear : arr
Indices of PIL points nearest negative ribbon, rightmost extreme.
pil_left_near_neg_shear : arr
Indices of PIL points nearest negative ribbon, leftmost extreme.
sortedpil : arr
Independent and dependent values of PIL, sorted along PIL.
flag : str, optional
Ends of ribbons to check; either right positive/left negative or
right negative/left positive.
Returns
-------
guide : arr
Guide field strength.
"""
guide = []
if flag == 'posleft':
for i in range(len(pil_left_near_pos_shear)):
posin = int(pil_left_near_pos_shear[i, 2])
negin = int(pil_right_near_neg_shear[i, 2])
if posin > negin:
curvei = sortedpil[negin:posin, :]
else:
curvei = -sortedpil[posin:negin, :]
guide.append(curve_length(curvei))
elif flag == 'posright':
for i in range(len(pil_right_near_pos_shear)):
posin = int(pil_right_near_pos_shear[i, 2])
negin = int(pil_left_near_neg_shear[i, 2])
if posin > negin:
curvei = sortedpil[negin:posin, :]
else:
curvei = -sortedpil[posin:negin, :]
guide.append(curve_length(curvei))
return guide
def gfrcalc(guide_left, guide_right, distneg_med, distpos_med):
"""
Determines guide field ratio, the ratio of the PIL-parallel component of
magnetic field to the PIL-perpendicular component, a proxy for the magnetic
shear. Alternative, with cross-PIL-perpendicular loops.
Parameters
----------
guide : arr
Guide-field strength.
distneg_med : arr
Distance from negative ribbon to PIL for each time step; from code for
separation.
distpos_med : arr
Distance from positive ribbon to PIL for each time step; from code for
separation.
Returns
-------
gfr : arr
Guide field ratio, left edge of ribbons.
"""
left_gfr = guide_left/(distneg_med+distpos_med)
right_gfr = guide_right/(distneg_med+distneg_med)
return left_gfr, right_gfr
def gfrcalc_alt(guide, distneg_med, distpos_med):
"""
Determines guide field ratio, the ratio of the PIL-parallel component of
magnetic field to the PIL-perpendicular component, a proxy for the magnetic
shear.
Parameters
----------
guide_left : arr
Guide field strength, right-hand side.
guide_right : arr
Guide field strength, left-hand side.
distneg_med : arr
Distance from negative ribbon to PIL for each time step; from code for
separation.
distpos_med : arr
Distance from positive ribbon to PIL for each time step; from code for
separation.
Returns
-------
left_gfr : arr
Guide field ratio, left edge of ribbons.
right_gfr : arr
Guide field ratio, right edge of ribbons.
"""
gfr = guide/(distneg_med+distpos_med)
return gfr
def plt_gfr(times, right_gfr, left_gfr, flnum, dt1600, flag = 0):
"""
Plots guide field ratio for right and left edges of ribbons.
Parameters
----------
times : arr
Times corresponding to AIA 1600 Angstrom images.
right_gfr : arr
Guide field ratio, right edge of ribbons.
left_gfr : arr
Guide field ratio, left edge of ribbons.
flnum : int
Flare number from RibbonDB database.
Returns
-------
None.
"""
timelab = range(0, 24*len(times), 24)
s = str(dt1600[0])
fig, ax = plt.subplots(figsize=(13, 7))
if flag == 0:
ax.plot(timelab, right_gfr, c='red', marker='o',
label='GFR proxy, right')
ax.plot(timelab, left_gfr, c='blue', marker='o', label='GFR proxy, left')
elif flag == 1:
ax.plot(timelab, right_gfr, c='blue', marker='o', label='GFR proxy')
ax.set_xlabel('Time [s since '+s[5:-7]+']', font='Times New Roman',
fontsize=18)
ax.set_ylabel('GFR Proxy', font='Times New Roman', fontsize=18)
ax.set_title('Guide Field Ratio', font='Times New Roman', fontsize=20)
ax.grid()
ax.legend(fontsize=15)
fig.savefig(str(flnum) + '_gfr.png')
return None
def process_fermi(day, month, year, instrument, dayint, moint, yearint, low=0,
high=800, ylo=1e-3, yhi=10):
"""
Processing of Fermi data in the 25-300 keV band (though applicable to
others), from a .sav file generated using the Fermi OSPEX database.
Parameters
----------
day : str
Day of flare, numerical string format (DD).
month : str
Month of flare, numerical string format (MM).
year : str
Year of flare, numerical string format (YYYY).
instrument : str
Corresponding Fermi instrument (n5, typically).
dayint : int
Day of flare, integer format.
moint : int
Month of flare, integer format.
yearint : int
Year of flare, integer format.
low : int, optional
Lower limit of range to search for flare in curve. The default is 0.
high : int, optional
Upper limit of range to search for flare in curve. The default is 800.
Returns
-------
raw_hxr_sum : arr
Raw 25-300 keV energy from Fermi.
cspec_hxr_sum : arr
Background-subtracted 25-300 keV energy from Fermi.
fermitimes : arr
Timestamps from Fermi data file.
"""
directory = '/Users/owner/Desktop/CU_Research/Fermi_April_2022/'\
'Fermi_Events_sav/'
filename_cspec = directory + 'fermi_' + instrument + '_cspec_bkgd_' + day \
+ month + year + '.sav'
cspec_dat = readsav(filename_cspec, python_dict='True')
bksub_cspec = cspec_dat['lc_bksub'][0][0]
raw_cspec = cspec_dat['lc_raw'][0][0]
times = cspec_dat['time']
energies = cspec_dat['ct_energy']
hxrinds = np.where(cspec_dat['ct_energy'] < 300.) and \
np.where(cspec_dat['ct_energy'] > 25.)
cspec_hxr = bksub_cspec[:, hxrinds]
raw_hxr = raw_cspec[:, hxrinds]
cspec_hxr_sum = np.sum(cspec_hxr, axis=2)
raw_hxr_sum = np.sum(raw_hxr, axis=2)
a = datetime.datetime(1970, 1, 1, 0, 0, 0)
b = datetime.datetime(1979, 1, 1, 0, 0, 0)
err1 = (b-a).total_seconds()
timesadj1 = times + err1
curr = datetime.datetime.fromtimestamp(min(timesadj1))
corr = datetime.datetime(yearint, moint, dayint, 0, 0, 0)
err2 = (corr-curr).seconds
totsec = (b-a).total_seconds() + err2
timesadj = times + totsec
timepkg.ctime(min(timesadj))
strtimes = []
for i in timesadj:
strtimes.append(datetime.datetime.fromtimestamp(i))
flag = 0
for i in range(low, high):
if cspec_hxr_sum[i] > 0.1:
flag += 1
if cspec_hxr_sum[i] < 0.1:
flag = 0
fermitimes = strtimes
return raw_hxr_sum, cspec_hxr_sum, fermitimes
def plt_fourpanel(times, right_gfr, left_gfr, flnum, dt1600, time304,
filter_304, lens_pos_Mm, lens_neg_Mm, distpos_Mm, distneg_Mm,
dt304, timelab, conv_f,
elonperiod_start_pos, elonperiod_end_pos,
elonperiod_start_neg, elonperiod_end_neg,
sepperiod_start_pos, sepperiod_end_pos,
sepperiod_start_neg, sepperiod_end_neg, exp_ind,
s304, e304, pos1600, neg1600, dn1600, indstrt_elon,
indstrt_sep, fermitimes, raw_hxr_sum, cspec_hxr_sum,
gfr_trans, low_hxr=0, high_hxr=800, period_flag=0,
flag = 0):
"""
Four panel plot to compare HXR/1600 Angstrom/304 Angstrom (panel 1),
ribbon separation (panel 2), ribbon elongation (panel 3), guide field ratio
proxy (panel 4, a measurement of shear). the peak times for the Fermi HXR
25-300 keV band, 304 Angstrom light curve, and 1600 Angstrom light curves
are shown in each panel, and timestamps accurately lined up to show periods
of separation/elongation/shear. There is an option to plot, also, the
periods of significant separation and elongation, though this may make
panels 2 and 3 too busy.
Parameters
----------
times : arr
Array of times for the flare, from AIA datafile.
right_gfr : arr
Guide field ratio, determined from the right edge of the ribbons.
left_gfr : arr
Guide field ratio, determined from the left edge of the ribbons.
flnum : int
Flare number
dt1600 : arr
Datetime values from 1600 Angstrom data file.
time304 : arr
Datenum values from MATLAB 304 Angstrom data file
filter_304 : arr
Filtered 304 Angstrom data.
lens_pos_Mm : list
Perpendicular extent of positive ribbon for each time step, in Mm.
lens_neg_Mm : list
Parallel extent of positive ribbon for each time step in Mm.
distpos_Mm : list
Perpendicular extent of negative ribbon for each time step, in Mm.
distneg_Mm : list
Parallel extent of positive ribbon for each time step in Mm.
dt304 : list
Datetime values for 304 Angstrom data.
timelab : list
Preparation of time labels for future plotting of light curves.
conv_f : float
Conversion factor between pixels and Mm, approximate assuming no
curvature
elonperiod_start_pos : list
Determined start times for elongation in positive ribbon.
elonperiod_end_pos : list
Determined end times for elongation in positive ribbon.
elonperiod_start_neg : list
Determined start times for elongation in negative ribbon.
elonperiod_end_neg : list
Determined end times for elongation in negative ribbon.
sepperiod_start_pos : list
Start times for periods of extended separation, positive ribbon.
sepperiod_end_pos : list
End times for periods of extended separation, positive ribbon.
sepperiod_start_neg : list
Start times for periods of extended separation, negative ribbon.
sepperiod_end_neg : list
End times for periods of extended separation, negative ribbon.
exp_ind : int
The index where to stop the exponential fitting.
s304 : int
Nearest index in AIA data to start time of the flare from EUV 304 light
curve.
e304 : int
Nearest index in AIA data to end time of the flare from EUV 304 light
curve.
pos1600 : list
Summed pixel numbers in positive ribbon for each time step.
neg1600 : list
Summed pixel numbers in negative ribbon for each time step.
dn1600 : list
Datenum values for 1600 Angstrom data.
indstrt_elon : int
Index at which to begin plotting elongation.
indstrt_sep : int
Index at which to begin plotting separation.
fermitimes : arr
Timestamps from Fermi data file.
raw_hxr_sum : arr
Raw 25-300 keV energy from Fermi.
cspec_hxr_sum : arr
Background-subtracted 25-300 keV energy from Fermi.
gfr_trans : arr
Index for the end of the Guide Field Ratio transient period.
low_hxr : int, optional
Low index for flare search in HXR data. The default is 0.
high_hxr : int, optional
High index for flare search in HXR data. The default is 800,
applied to Oct-13-2013 flare.
period_flag : TYPE, optional
DESCRIPTION. The default is 0.
Returns
-------
None.
"""
min304 = min(filter_304[s304: e304])
max304 = max(filter_304[s304: e304])
minpos1600 = min(pos1600)
maxpos1600 = max(pos1600)
minneg1600 = min(neg1600)
maxneg1600 = max(neg1600)
# Normalize for light curve comparison
norm304 = (filter_304 - min304) / (max304 - min304)
normpos1600 = (pos1600 - minpos1600) / (maxpos1600 - minpos1600)
normneg1600 = (neg1600 - minneg1600) / (maxneg1600 - minneg1600)
if flag == 0:
GFR = np.mean([right_gfr, left_gfr], axis=0)
elif flag == 1:
# if the alterative version of GFR, take only right_gfr (the input
# should just be gfr)
GFR = right_gfr
hxrmax0 = np.argmax(cspec_hxr_sum[low_hxr:high_hxr])
print(hxrmax0)
hxrmaxt = fermitimes[hxrmax0]
print(hxrmaxt)
hxrmax = find_nearest_ind(dt1600, hxrmaxt)
max304_0 = np.nanargmax(filter_304)
max304t = dt304[max304_0]
max304 = find_nearest_ind(dt1600, max304t)
max1600pos = np.argmax(normpos1600)
max1600neg = np.argmax(normneg1600)
fig, [ax1, ax2, ax3, ax4] = plt.subplots(4, 1, figsize=(20, 35))
lns1 = ax1.plot(dt1600, normpos1600, linewidth=1, color='red', marker='.',
linestyle='dashed',
label=r'Norm. 1600 $\AA$ Light Curve, +')
lns2 = ax1.plot(dt1600, normneg1600, linewidth=1, color='blue', marker='.',
linestyle='dashed',
label=r'Norm. 1600 $\AA$ Light Curve, -')
lns3 = ax1.plot(dt304, norm304, color='black', linewidth=1, marker='.',
linestyle='dashed',
label=r'Norm. 304 $\AA$ Light Curve')
ax1_0 = ax1.twinx()
lns4 = ax1_0.plot(fermitimes[low_hxr:high_hxr],
np.log10(scipy.signal.medfilt(
cspec_hxr_sum[low_hxr:high_hxr, 0], 3)),
marker='.', linestyle='dashed',
label='Fermi Bkgd. Sub. Cts.')
ax1.grid()
lns = lns1+lns2+lns3+lns4
labs = [k.get_label() for k in lns]
font = font_manager.FontProperties(family='Times New Roman',
style='normal', size=16)
ax1.legend(lns, labs, prop=font, fontsize=20, loc='lower center')
ax1.set_ylabel('EUV Normalized Light Curves',
font='Times New Roman', fontsize=25)
ax1_0.set_ylabel(
'HXR Flux [$cts* s^{-1}* cm^{-2}* keV^{-1}$]', font='Times New Roman',
fontsize=25)
ax1.set_title('Chromospheric and HXR Light Curves',
font='Times New Roman', fontsize=30)
ax1.set_xlim([dt1600[0], dt1600[-1]])
ax2.plot(dt1600[indstrt_sep:-1], distpos_Mm[indstrt_sep:-1], '-o', c='red',
markersize=6)
ax2.plot(dt1600[indstrt_sep:-1], distneg_Mm[indstrt_sep:-1], '-o',
c='blue', markersize=6)
ax2.set_ylabel(
'Perpendicular PIL Distance [Mm]', font='Times New Roman', fontsize=25)
ax2.set_title('Ribbon Separation',
font='Times New Roman', fontsize=30)
ax2.set_xlim([dt1600[0], dt1600[-1]])
ax2.axvline(dt1600[hxrmax], label='Max. HXR')
ax2.axvline(dt1600[max304], color='black',
label=r'Max. 304 $\AA$', linestyle='dashdot')
ax2.axvline(dt1600[max1600pos], color='red',
label=r'Max. pos. 1600 $\AA$', linestyle='dashed')
ax2.axvline(dt1600[max1600neg], color='blue',
label=r'Max. neg. 1600 $\AA$', linestyle='dotted')
ax2.grid()
font = font_manager.FontProperties(family='Times New Roman',
style='normal', size=20)
ax2.legend(prop=font, fontsize=20)
# regions of separation/elongation make a little busy?
if period_flag == 1:
for i, j in zip(sepperiod_start_pos, sepperiod_end_pos):
ax2.axvline(dt1600[i], c='green')
ax2.axvline(dt1600[j], c='red')
ax2.axvspan(dt1600[i], dt1600[j], alpha=0.5, color='pink')
for k, l in zip(elonperiod_start_neg, elonperiod_end_neg):
ax2.axvline(dt1600[k], c='green')
ax2.axvline(dt1600[l], c='red')
ax2.axvspan(dt1600[k], dt1600[l], alpha=0.5, color='cyan')
ax3.plot(dt1600[indstrt_elon:-1], lens_pos_Mm[indstrt_elon:-1], '-o',
c='red', markersize=6)
ax3.plot(dt1600[indstrt_elon:-1], lens_neg_Mm[indstrt_elon:-1], '-o',
c='blue', markersize=6)
ax3.grid()
ax3.set_ylabel(
'Parallel PIL Distance [Mm]', font='Times New Roman', fontsize=25)
ax3.set_title('Ribbon Elongation',
font='Times New Roman', fontsize=30)
ax3.set_xlim([dt1600[0], dt1600[-1]])
ax3.axvline(dt1600[hxrmax], label='Max. HXR')
ax3.axvline(dt1600[max304], color='black',
label=r'Max. 304 $\AA$', linestyle='dashdot')
ax3.axvline(dt1600[max1600pos], color='red',
label=r'Max. pos. 1600 $\AA$', linestyle='dashed')
ax3.axvline(dt1600[max1600neg], color='blue',
label=r'Max. neg. 1600 $\AA$', linestyle='dotted')
font = font_manager.FontProperties(family='Times New Roman',
style='normal', size=20)
ax3.legend(prop=font, fontsize=20)
# definitely optional...
if period_flag == 1:
for i, j in zip(elonperiod_start_pos, elonperiod_end_pos):
ax3.axvline(dt1600[i], c='green')
ax3.axvline(dt1600[j], c='red')
ax3.axvspan(dt1600[i], dt1600[j], alpha=0.5, color='pink')
for k, l in zip(elonperiod_start_neg, elonperiod_end_neg):
ax3.axvline(dt1600[k], c='green')
ax3.axvline(dt1600[l], c='red')
ax3.axvspan(dt1600[k], dt1600[l], alpha=0.5, color='cyan')
ax4.plot(dt1600[gfr_trans:], GFR[gfr_trans:], c='green', marker='o')
ax4.set_xlabel('Time [DD HH:MM]', font='Times New Roman',
fontsize=25)
ax4.set_ylabel('GFR Proxy', font='Times New Roman', fontsize=25)
ax4.set_title('Magnetic Shear', font='Times New Roman', fontsize=30)
ax4.grid()
ax4.legend(fontsize=15)
ax4.set_xlim([dt1600[0], dt1600[-1]])
ax4.axvline(dt1600[hxrmax], label='Max. HXR')
ax4.axvline(dt1600[max304], color='black',
label=r'Max. 304 $\AA$', linestyle='dashdot')
ax4.axvline(dt1600[max1600pos], color='red',
label=r'Max. pos. 1600 $\AA$', linestyle='dashed')
ax4.axvline(dt1600[max1600neg], color='blue',
label=r'Max. neg. 1600 $\AA$', linestyle='dotted')
font = font_manager.FontProperties(family='Times New Roman',
style='normal', size=20)
ax4.legend(prop=font, fontsize=20)
fig.savefig(str(flnum) + '_summary.png')
return None
| [
"astropy.convolution.Gaussian2DKernel",
"astropy.convolution.convolve",
"numpy.load",
"numpy.sum",
"numpy.abs",
"numpy.amin",
"scipy.io.loadmat",
"numpy.polyfit",
"numpy.empty",
"numpy.argmax",
"numpy.isnan",
"matplotlib.animation.FuncAnimation",
"numpy.shape",
"matplotlib.pyplot.figure",
... | [((3126, 3139), 'numpy.zeros', 'np.zeros', (['(800)'], {}), '(800)\n', (3134, 3139), True, 'import numpy as np\n'), ((3154, 3167), 'numpy.zeros', 'np.zeros', (['(800)'], {}), '(800)\n', (3162, 3167), True, 'import numpy as np\n'), ((3278, 3307), 'numpy.meshgrid', 'np.meshgrid', (['xarr_Mm', 'yarr_Mm'], {}), '(xarr_Mm, yarr_Mm)\n', (3289, 3307), True, 'import numpy as np\n'), ((5301, 5318), 'numpy.asarray', 'np.asarray', (['array'], {}), '(array)\n', (5311, 5318), True, 'import numpy as np\n'), ((5515, 5538), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5536, 5538), False, 'import datetime\n'), ((5981, 5998), 'numpy.asarray', 'np.asarray', (['array'], {}), '(array)\n', (5991, 5998), True, 'import numpy as np\n'), ((8083, 8109), 'scipy.io.loadmat', 'sio.loadmat', (['bestflarefile'], {}), '(bestflarefile)\n', (8094, 8109), True, 'import scipy.io as sio\n'), ((8802, 8824), 'scipy.io.readsav', 'readsav', (['sav_fname_aia'], {}), '(sav_fname_aia)\n', (8809, 8824), False, 'from scipy.io import readsav\n'), ((9171, 9189), 'scipy.io.readsav', 'readsav', (['sav_fname'], {}), '(sav_fname)\n', (9178, 9189), False, 'from scipy.io import readsav\n'), ((17281, 17304), 'astropy.convolution.Gaussian2DKernel', 'Gaussian2DKernel', (['sigma'], {}), '(sigma)\n', (17297, 17304), False, 'from astropy.convolution import convolve, Gaussian2DKernel\n'), ((17325, 17356), 'astropy.convolution.convolve', 'convolve', (['pos_rem', 'gauss_kernel'], {}), '(pos_rem, gauss_kernel)\n', (17333, 17356), False, 'from astropy.convolution import convolve, Gaussian2DKernel\n'), ((17377, 17408), 'astropy.convolution.convolve', 'convolve', (['neg_rem', 'gauss_kernel'], {}), '(neg_rem, gauss_kernel)\n', (17385, 17408), False, 'from astropy.convolution import convolve, Gaussian2DKernel\n'), ((18717, 18746), 'numpy.where', 'np.where', (['(pil_mask_c > thresh)'], {}), '(pil_mask_c > thresh)\n', (18725, 18746), True, 'import numpy as np\n'), ((18860, 18882), 'numpy.linspace', 'np.linspace', (['(0)', 'lx', 'lx'], {}), '(0, lx, lx)\n', (18871, 18882), True, 'import numpy as np\n'), ((18891, 18913), 'numpy.linspace', 'np.linspace', (['(0)', 'ly', 'ly'], {}), '(0, ly, ly)\n', (18902, 18913), True, 'import numpy as np\n'), ((18927, 18959), 'numpy.polyfit', 'np.polyfit', (['y[yc]', 'x[xc]', 'polyor'], {}), '(y[yc], x[xc], polyor)\n', (18937, 18959), True, 'import numpy as np\n'), ((30542, 30566), 'numpy.ndindex', 'np.ndindex', (['aia8_a.shape'], {}), '(aia8_a.shape)\n', (30552, 30566), True, 'import numpy as np\n'), ((35112, 35126), 'numpy.median', 'np.median', (['ivs'], {}), '(ivs)\n', (35121, 35126), True, 'import numpy as np\n'), ((35139, 35153), 'numpy.median', 'np.median', (['dvs'], {}), '(dvs)\n', (35148, 35153), True, 'import numpy as np\n'), ((52687, 52713), 'numpy.linspace', 'np.linspace', (['tst', 'tend', 'nt'], {}), '(tst, tend, nt)\n', (52698, 52713), True, 'import numpy as np\n'), ((52723, 52748), 'numpy.linspace', 'np.linspace', (['xlo', 'xhi', 'nx'], {}), '(xlo, xhi, nx)\n', (52734, 52748), True, 'import numpy as np\n'), ((52757, 52782), 'numpy.linspace', 'np.linspace', (['ylo', 'yhi', 'ny'], {}), '(ylo, yhi, ny)\n', (52768, 52782), True, 'import numpy as np\n'), ((52794, 52811), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (52805, 52811), True, 'import numpy as np\n'), ((52829, 52866), 'numpy.empty', 'np.empty', (['nt'], {'dtype': 'datetime.datetime'}), '(nt, dtype=datetime.datetime)\n', (52837, 52866), True, 'import numpy as np\n'), ((52881, 52893), 'numpy.empty', 'np.empty', (['nt'], {}), '(nt)\n', (52889, 52893), True, 'import numpy as np\n'), ((52907, 52919), 'numpy.empty', 'np.empty', (['nt'], {}), '(nt)\n', (52915, 52919), True, 'import numpy as np\n'), ((55111, 55130), 'numpy.median', 'np.median', (['curve304'], {}), '(curve304)\n', (55120, 55130), True, 'import numpy as np\n'), ((55144, 55160), 'numpy.std', 'np.std', (['curve304'], {}), '(curve304)\n', (55150, 55160), True, 'import numpy as np\n'), ((55412, 55424), 'numpy.empty', 'np.empty', (['nt'], {}), '(nt)\n', (55420, 55424), True, 'import numpy as np\n'), ((56906, 56918), 'numpy.empty', 'np.empty', (['nt'], {}), '(nt)\n', (56914, 56918), True, 'import numpy as np\n'), ((56933, 56945), 'numpy.empty', 'np.empty', (['nt'], {}), '(nt)\n', (56941, 56945), True, 'import numpy as np\n'), ((58405, 58438), 'numpy.load', 'np.load', (['flnum'], {'allow_pickle': 'pick'}), '(flnum, allow_pickle=pick)\n', (58412, 58438), True, 'import numpy as np\n'), ((70473, 70501), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(25, 12)'}), '(figsize=(25, 12))\n', (70483, 70501), True, 'import matplotlib.pyplot as plt\n'), ((78625, 78653), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (78637, 78653), True, 'import matplotlib.pyplot as plt\n'), ((79173, 79201), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (79185, 79201), True, 'import matplotlib.pyplot as plt\n'), ((80854, 80882), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (80866, 80882), True, 'import matplotlib.pyplot as plt\n'), ((81372, 81400), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (81384, 81400), True, 'import matplotlib.pyplot as plt\n'), ((81906, 81920), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (81918, 81920), True, 'import matplotlib.pyplot as plt\n'), ((83479, 83508), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(7, 10)'}), '(figsize=(7, 10))\n', (83491, 83508), True, 'import matplotlib.pyplot as plt\n'), ((83706, 83783), 'matplotlib.pyplot.contour', 'plt.contour', (['X', 'Y', 'hmik'], {'levels': '[-3, -1.8, -0.6, 0.6, 1.8, 3]', 'cmap': '"""seismic"""'}), "(X, Y, hmik, levels=[-3, -1.8, -0.6, 0.6, 1.8, 3], cmap='seismic')\n", (83717, 83783), True, 'import matplotlib.pyplot as plt\n'), ((84036, 84074), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'orientation': '"""horizontal"""'}), "(orientation='horizontal')\n", (84048, 84074), True, 'import matplotlib.pyplot as plt\n'), ((85265, 85301), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(13, 15)'}), '(2, 1, figsize=(13, 15))\n', (85277, 85301), True, 'import matplotlib.pyplot as plt\n'), ((87092, 87121), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(13, 7)'}), '(figsize=(13, 7))\n', (87104, 87121), True, 'import matplotlib.pyplot as plt\n'), ((89358, 89394), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(13, 20)'}), '(3, 1, figsize=(13, 20))\n', (89370, 89394), True, 'import matplotlib.pyplot as plt\n'), ((92561, 92597), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(13, 20)'}), '(3, 1, figsize=(13, 20))\n', (92573, 92597), True, 'import matplotlib.pyplot as plt\n'), ((96096, 96118), 'numpy.ndindex', 'np.ndindex', (['aia8.shape'], {}), '(aia8.shape)\n', (96106, 96118), True, 'import numpy as np\n'), ((96316, 96338), 'numpy.ndindex', 'np.ndindex', (['aia8.shape'], {}), '(aia8.shape)\n', (96326, 96338), True, 'import numpy as np\n'), ((98960, 98990), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (98972, 98990), True, 'import matplotlib.pyplot as plt\n'), ((101945, 101975), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (101957, 101975), True, 'import matplotlib.pyplot as plt\n'), ((106821, 106851), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (106833, 106851), True, 'import matplotlib.pyplot as plt\n'), ((107860, 107896), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(10, 20)'}), '(2, 1, figsize=(10, 20))\n', (107872, 107896), True, 'import matplotlib.pyplot as plt\n'), ((110292, 110322), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (110304, 110322), True, 'import matplotlib.pyplot as plt\n'), ((111317, 111353), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(10, 20)'}), '(2, 1, figsize=(10, 20))\n', (111329, 111353), True, 'import matplotlib.pyplot as plt\n'), ((113728, 113758), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (113740, 113758), True, 'import matplotlib.pyplot as plt\n'), ((132023, 132052), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(13, 7)'}), '(figsize=(13, 7))\n', (132035, 132052), True, 'import matplotlib.pyplot as plt\n'), ((134151, 134194), 'scipy.io.readsav', 'readsav', (['filename_cspec'], {'python_dict': '"""True"""'}), "(filename_cspec, python_dict='True')\n", (134158, 134194), False, 'from scipy.io import readsav\n'), ((134557, 134582), 'numpy.sum', 'np.sum', (['cspec_hxr'], {'axis': '(2)'}), '(cspec_hxr, axis=2)\n', (134563, 134582), True, 'import numpy as np\n'), ((134601, 134624), 'numpy.sum', 'np.sum', (['raw_hxr'], {'axis': '(2)'}), '(raw_hxr, axis=2)\n', (134607, 134624), True, 'import numpy as np\n'), ((134634, 134672), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)', '(0)', '(0)', '(0)'], {}), '(1970, 1, 1, 0, 0, 0)\n', (134651, 134672), False, 'import datetime\n'), ((134681, 134719), 'datetime.datetime', 'datetime.datetime', (['(1979)', '(1)', '(1)', '(0)', '(0)', '(0)'], {}), '(1979, 1, 1, 0, 0, 0)\n', (134698, 134719), False, 'import datetime\n'), ((134855, 134905), 'datetime.datetime', 'datetime.datetime', (['yearint', 'moint', 'dayint', '(0)', '(0)', '(0)'], {}), '(yearint, moint, dayint, 0, 0, 0)\n', (134872, 134905), False, 'import datetime\n'), ((140614, 140656), 'numpy.argmax', 'np.argmax', (['cspec_hxr_sum[low_hxr:high_hxr]'], {}), '(cspec_hxr_sum[low_hxr:high_hxr])\n', (140623, 140656), True, 'import numpy as np\n'), ((140792, 140816), 'numpy.nanargmax', 'np.nanargmax', (['filter_304'], {}), '(filter_304)\n', (140804, 140816), True, 'import numpy as np\n'), ((140912, 140934), 'numpy.argmax', 'np.argmax', (['normpos1600'], {}), '(normpos1600)\n', (140921, 140934), True, 'import numpy as np\n'), ((140952, 140974), 'numpy.argmax', 'np.argmax', (['normneg1600'], {}), '(normneg1600)\n', (140961, 140974), True, 'import numpy as np\n'), ((141008, 141044), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(1)'], {'figsize': '(20, 35)'}), '(4, 1, figsize=(20, 35))\n', (141020, 141044), True, 'import matplotlib.pyplot as plt\n'), ((141982, 142060), 'matplotlib.font_manager.FontProperties', 'font_manager.FontProperties', ([], {'family': '"""Times New Roman"""', 'style': '"""normal"""', 'size': '(16)'}), "(family='Times New Roman', style='normal', size=16)\n", (142009, 142060), False, 'from matplotlib import font_manager\n'), ((143425, 143503), 'matplotlib.font_manager.FontProperties', 'font_manager.FontProperties', ([], {'family': '"""Times New Roman"""', 'style': '"""normal"""', 'size': '(20)'}), "(family='Times New Roman', style='normal', size=20)\n", (143452, 143503), False, 'from matplotlib import font_manager\n'), ((144998, 145076), 'matplotlib.font_manager.FontProperties', 'font_manager.FontProperties', ([], {'family': '"""Times New Roman"""', 'style': '"""normal"""', 'size': '(20)'}), "(family='Times New Roman', style='normal', size=20)\n", (145025, 145076), False, 'from matplotlib import font_manager\n'), ((146467, 146545), 'matplotlib.font_manager.FontProperties', 'font_manager.FontProperties', ([], {'family': '"""Times New Roman"""', 'style': '"""normal"""', 'size': '(20)'}), "(family='Times New Roman', style='normal', size=20)\n", (146494, 146545), False, 'from matplotlib import font_manager\n'), ((3722, 3735), 'numpy.exp', 'np.exp', (['(b * x)'], {}), '(b * x)\n', (3728, 3735), True, 'import numpy as np\n'), ((4123, 4136), 'numpy.exp', 'np.exp', (['(b * x)'], {}), '(b * x)\n', (4129, 4136), True, 'import numpy as np\n'), ((4673, 4701), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(366)'}), '(days=366)\n', (4691, 4701), False, 'import datetime\n'), ((6022, 6043), 'numpy.abs', 'np.abs', (['(array - value)'], {}), '(array - value)\n', (6028, 6043), True, 'import numpy as np\n'), ((7948, 7969), 'os.path.dirname', 'dirname', (['sio.__file__'], {}), '(sio.__file__)\n', (7955, 7969), False, 'from os.path import dirname, join as pjoin\n'), ((10573, 10593), 'numpy.shape', 'np.shape', (['aia_cumul8'], {}), '(aia_cumul8)\n', (10581, 10593), True, 'import numpy as np\n'), ((10626, 10646), 'numpy.shape', 'np.shape', (['aia_cumul8'], {}), '(aia_cumul8)\n', (10634, 10646), True, 'import numpy as np\n'), ((10751, 10782), 'numpy.squeeze', 'np.squeeze', (['aia_cumul8[i, :, :]'], {}), '(aia_cumul8[i, :, :])\n', (10761, 10782), True, 'import numpy as np\n'), ((11323, 11342), 'numpy.shape', 'np.shape', (['aia_step8'], {}), '(aia_step8)\n', (11331, 11342), True, 'import numpy as np\n'), ((11374, 11393), 'numpy.shape', 'np.shape', (['aia_step8'], {}), '(aia_step8)\n', (11382, 11393), True, 'import numpy as np\n'), ((11500, 11530), 'numpy.squeeze', 'np.squeeze', (['aia_step8[i, :, :]'], {}), '(aia_step8[i, :, :])\n', (11510, 11530), True, 'import numpy as np\n'), ((12122, 12139), 'numpy.shape', 'np.shape', (['hmi_dat'], {}), '(hmi_dat)\n', (12130, 12139), True, 'import numpy as np\n'), ((12171, 12188), 'numpy.shape', 'np.shape', (['hmi_dat'], {}), '(hmi_dat)\n', (12179, 12188), True, 'import numpy as np\n'), ((15114, 15138), 'numpy.shape', 'np.shape', (['hmi_neg_mask_c'], {}), '(hmi_neg_mask_c)\n', (15122, 15138), True, 'import numpy as np\n'), ((15163, 15187), 'numpy.shape', 'np.shape', (['hmi_pos_mask_c'], {}), '(hmi_pos_mask_c)\n', (15171, 15187), True, 'import numpy as np\n'), ((18636, 18655), 'numpy.amax', 'np.amax', (['pil_mask_c'], {}), '(pil_mask_c)\n', (18643, 18655), True, 'import numpy as np\n'), ((19689, 19703), 'numpy.shape', 'np.shape', (['aia8'], {}), '(aia8)\n', (19697, 19703), True, 'import numpy as np\n'), ((19729, 19743), 'numpy.shape', 'np.shape', (['aia8'], {}), '(aia8)\n', (19737, 19743), True, 'import numpy as np\n'), ((22305, 22323), 'numpy.shape', 'np.shape', (['aia8_pos'], {}), '(aia8_pos)\n', (22313, 22323), True, 'import numpy as np\n'), ((22349, 22367), 'numpy.shape', 'np.shape', (['aia8_neg'], {}), '(aia8_neg)\n', (22357, 22367), True, 'import numpy as np\n'), ((25959, 25977), 'numpy.shape', 'np.shape', (['aia8_pos'], {}), '(aia8_pos)\n', (25967, 25977), True, 'import numpy as np\n'), ((26003, 26021), 'numpy.shape', 'np.shape', (['aia8_neg'], {}), '(aia8_neg)\n', (26011, 26021), True, 'import numpy as np\n'), ((28989, 29012), 'numpy.where', 'np.where', (['(posframe == 1)'], {}), '(posframe == 1)\n', (28997, 29012), True, 'import numpy as np\n'), ((29034, 29057), 'numpy.where', 'np.where', (['(negframe == 1)'], {}), '(negframe == 1)\n', (29042, 29057), True, 'import numpy as np\n'), ((30367, 30383), 'numpy.shape', 'np.shape', (['aia8_a'], {}), '(aia8_a)\n', (30375, 30383), True, 'import numpy as np\n'), ((30411, 30427), 'numpy.shape', 'np.shape', (['aia8_a'], {}), '(aia8_a)\n', (30419, 30427), True, 'import numpy as np\n'), ((32948, 32968), 'numpy.shape', 'np.shape', (['aia8_pos_2'], {}), '(aia8_pos_2)\n', (32956, 32968), True, 'import numpy as np\n'), ((32994, 33014), 'numpy.shape', 'np.shape', (['aia8_neg_2'], {}), '(aia8_neg_2)\n', (33002, 33014), True, 'import numpy as np\n'), ((36879, 36899), 'numpy.shape', 'np.shape', (['aia8_pos_2'], {}), '(aia8_pos_2)\n', (36887, 36899), True, 'import numpy as np\n'), ((36928, 36948), 'numpy.shape', 'np.shape', (['aia8_neg_2'], {}), '(aia8_neg_2)\n', (36936, 36948), True, 'import numpy as np\n'), ((37661, 37682), 'numpy.shape', 'np.shape', (['aia_pos_rem'], {}), '(aia_pos_rem)\n', (37669, 37682), True, 'import numpy as np\n'), ((37709, 37730), 'numpy.shape', 'np.shape', (['aia_pos_rem'], {}), '(aia_pos_rem)\n', (37717, 37730), True, 'import numpy as np\n'), ((38107, 38128), 'numpy.shape', 'np.shape', (['aia_neg_rem'], {}), '(aia_neg_rem)\n', (38115, 38128), True, 'import numpy as np\n'), ((38155, 38176), 'numpy.shape', 'np.shape', (['aia_neg_rem'], {}), '(aia_neg_rem)\n', (38163, 38176), True, 'import numpy as np\n'), ((41528, 41557), 'numpy.vstack', 'np.vstack', (['(ivs_lim, dvs_lim)'], {}), '((ivs_lim, dvs_lim))\n', (41537, 41557), True, 'import numpy as np\n'), ((48910, 48928), 'numpy.shape', 'np.shape', (['lens_pos'], {}), '(lens_pos)\n', (48918, 48928), True, 'import numpy as np\n'), ((48957, 48975), 'numpy.shape', 'np.shape', (['lens_neg'], {}), '(lens_neg)\n', (48965, 48975), True, 'import numpy as np\n'), ((49003, 49021), 'numpy.shape', 'np.shape', (['dist_pos'], {}), '(dist_pos)\n', (49011, 49021), True, 'import numpy as np\n'), ((49049, 49067), 'numpy.shape', 'np.shape', (['dist_neg'], {}), '(dist_neg)\n', (49057, 49067), True, 'import numpy as np\n'), ((49303, 49323), 'numpy.diff', 'np.diff', (['lens_neg_Mm'], {}), '(lens_neg_Mm)\n', (49310, 49323), True, 'import numpy as np\n'), ((49345, 49365), 'numpy.diff', 'np.diff', (['lens_pos_Mm'], {}), '(lens_pos_Mm)\n', (49352, 49365), True, 'import numpy as np\n'), ((49388, 49407), 'numpy.diff', 'np.diff', (['distneg_Mm'], {}), '(distneg_Mm)\n', (49395, 49407), True, 'import numpy as np\n'), ((49430, 49449), 'numpy.diff', 'np.diff', (['distpos_Mm'], {}), '(distpos_Mm)\n', (49437, 49449), True, 'import numpy as np\n'), ((53008, 53073), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['timechoi[2:21]', '"""20%y-%m-%dT%H:%M:%S"""'], {}), "(timechoi[2:21], '20%y-%m-%dT%H:%M:%S')\n", (53034, 53073), False, 'import datetime\n'), ((53378, 53399), 'scipy.io.loadmat', 'sio.loadmat', (['file1242'], {}), '(file1242)\n', (53389, 53399), True, 'import scipy.io as sio\n'), ((54757, 54822), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['timechoi[2:21]', '"""20%y-%m-%dT%H:%M:%S"""'], {}), "(timechoi[2:21], '20%y-%m-%dT%H:%M:%S')\n", (54783, 54822), False, 'import datetime\n'), ((56556, 56574), 'numpy.shape', 'np.shape', (['aia8_pos'], {}), '(aia8_pos)\n', (56564, 56574), True, 'import numpy as np\n'), ((56598, 56616), 'numpy.shape', 'np.shape', (['aia8_neg'], {}), '(aia8_neg)\n', (56606, 56616), True, 'import numpy as np\n'), ((65978, 65998), 'numpy.isnan', 'np.isnan', (['time304[i]'], {}), '(time304[i])\n', (65986, 65998), True, 'import numpy as np\n'), ((71366, 71391), 'numpy.log10', 'np.log10', (['aiadat[0, :, :]'], {}), '(aiadat[0, :, :])\n', (71374, 71391), True, 'import numpy as np\n'), ((74803, 74832), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (74823, 74832), True, 'import matplotlib.dates as mdates\n'), ((95862, 95876), 'numpy.shape', 'np.shape', (['aia8'], {}), '(aia8)\n', (95870, 95876), True, 'import numpy as np\n'), ((95902, 95916), 'numpy.shape', 'np.shape', (['aia8'], {}), '(aia8)\n', (95910, 95916), True, 'import numpy as np\n'), ((95947, 95966), 'numpy.shape', 'np.shape', (['aia8_inst'], {}), '(aia8_inst)\n', (95955, 95966), True, 'import numpy as np\n'), ((95997, 96016), 'numpy.shape', 'np.shape', (['aia8_inst'], {}), '(aia8_inst)\n', (96005, 96016), True, 'import numpy as np\n'), ((96559, 96577), 'numpy.argmax', 'np.argmax', (['pos1600'], {}), '(pos1600)\n', (96568, 96577), True, 'import numpy as np\n'), ((96601, 96619), 'numpy.argmax', 'np.argmax', (['neg1600'], {}), '(neg1600)\n', (96610, 96619), True, 'import numpy as np\n'), ((98560, 98581), 'numpy.sum', 'np.sum', (['pos_mask_inst'], {}), '(pos_mask_inst)\n', (98566, 98581), True, 'import numpy as np\n'), ((98613, 98634), 'numpy.sum', 'np.sum', (['neg_mask_inst'], {}), '(neg_mask_inst)\n', (98619, 98634), True, 'import numpy as np\n'), ((98761, 98781), 'numpy.sum', 'np.sum', (['hmi_pos_inst'], {}), '(hmi_pos_inst)\n', (98767, 98781), True, 'import numpy as np\n'), ((98808, 98828), 'numpy.sum', 'np.sum', (['hmi_neg_inst'], {}), '(hmi_neg_inst)\n', (98814, 98828), True, 'import numpy as np\n'), ((101620, 101636), 'numpy.sum', 'np.sum', (['pos_mask'], {}), '(pos_mask)\n', (101626, 101636), True, 'import numpy as np\n'), ((101663, 101679), 'numpy.sum', 'np.sum', (['neg_mask'], {}), '(neg_mask)\n', (101669, 101679), True, 'import numpy as np\n'), ((101781, 101796), 'numpy.sum', 'np.sum', (['hmi_pos'], {}), '(hmi_pos)\n', (101787, 101796), True, 'import numpy as np\n'), ((101818, 101833), 'numpy.sum', 'np.sum', (['hmi_neg'], {}), '(hmi_neg)\n', (101824, 101833), True, 'import numpy as np\n'), ((107923, 107943), 'numpy.log', 'np.log', (['rec_flux_pos'], {}), '(rec_flux_pos)\n', (107929, 107943), True, 'import numpy as np\n'), ((111380, 111396), 'numpy.log', 'np.log', (['pos_area'], {}), '(pos_area)\n', (111386, 111396), True, 'import numpy as np\n'), ((111433, 111449), 'numpy.log', 'np.log', (['neg_area'], {}), '(neg_area)\n', (111439, 111449), True, 'import numpy as np\n'), ((116627, 116645), 'numpy.shape', 'np.shape', (['aia8_pos'], {}), '(aia8_pos)\n', (116635, 116645), True, 'import numpy as np\n'), ((116676, 116694), 'numpy.shape', 'np.shape', (['aia8_neg'], {}), '(aia8_neg)\n', (116684, 116694), True, 'import numpy as np\n'), ((116729, 116747), 'numpy.shape', 'np.shape', (['aia8_pos'], {}), '(aia8_pos)\n', (116737, 116747), True, 'import numpy as np\n'), ((116782, 116800), 'numpy.shape', 'np.shape', (['aia8_neg'], {}), '(aia8_neg)\n', (116790, 116800), True, 'import numpy as np\n'), ((134367, 134407), 'numpy.where', 'np.where', (["(cspec_dat['ct_energy'] < 300.0)"], {}), "(cspec_dat['ct_energy'] < 300.0)\n", (134375, 134407), True, 'import numpy as np\n'), ((134421, 134460), 'numpy.where', 'np.where', (["(cspec_dat['ct_energy'] > 25.0)"], {}), "(cspec_dat['ct_energy'] > 25.0)\n", (134429, 134460), True, 'import numpy as np\n'), ((140410, 140448), 'numpy.mean', 'np.mean', (['[right_gfr, left_gfr]'], {'axis': '(0)'}), '([right_gfr, left_gfr], axis=0)\n', (140417, 140448), True, 'import numpy as np\n'), ((4252, 4297), 'numpy.sum', 'np.sum', (['((curve[:-1] - curve[1:]) ** 2)'], {'axis': '(1)'}), '((curve[:-1] - curve[1:]) ** 2, axis=1)\n', (4258, 4297), True, 'import numpy as np\n'), ((4641, 4670), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'days'}), '(days=days)\n', (4659, 4670), False, 'import datetime\n'), ((5330, 5351), 'numpy.abs', 'np.abs', (['(array - value)'], {}), '(array - value)\n', (5336, 5351), True, 'import numpy as np\n'), ((29258, 29277), 'scipy.spatial.distance.cdist', 'cdist', (['pos_ops', 'pil'], {}), '(pos_ops, pil)\n', (29263, 29277), False, 'from scipy.spatial.distance import cdist\n'), ((29354, 29377), 'numpy.amin', 'np.amin', (['allpos'], {'axis': '(1)'}), '(allpos, axis=1)\n', (29361, 29377), True, 'import numpy as np\n'), ((29450, 29471), 'numpy.median', 'np.median', (['allpos_min'], {}), '(allpos_min)\n', (29459, 29471), True, 'import numpy as np\n'), ((29502, 29521), 'numpy.mean', 'np.mean', (['allpos_min'], {}), '(allpos_min)\n', (29509, 29521), True, 'import numpy as np\n'), ((29613, 29632), 'scipy.spatial.distance.cdist', 'cdist', (['neg_ops', 'pil'], {}), '(neg_ops, pil)\n', (29618, 29632), False, 'from scipy.spatial.distance import cdist\n'), ((29658, 29681), 'numpy.amin', 'np.amin', (['allneg'], {'axis': '(1)'}), '(allneg, axis=1)\n', (29665, 29681), True, 'import numpy as np\n'), ((29711, 29732), 'numpy.median', 'np.median', (['allneg_min'], {}), '(allneg_min)\n', (29720, 29732), True, 'import numpy as np\n'), ((29763, 29782), 'numpy.mean', 'np.mean', (['allneg_min'], {}), '(allneg_min)\n', (29770, 29782), True, 'import numpy as np\n'), ((43849, 43915), 'numpy.sqrt', 'np.sqrt', (['((left_x - ivs_sort[j]) ** 2 + (left_y - dvs_sort[j]) ** 2)'], {}), '((left_x - ivs_sort[j]) ** 2 + (left_y - dvs_sort[j]) ** 2)\n', (43856, 43915), True, 'import numpy as np\n'), ((43969, 44037), 'numpy.sqrt', 'np.sqrt', (['((right_x - ivs_sort[j]) ** 2 + (right_y - dvs_sort[j]) ** 2)'], {}), '((right_x - ivs_sort[j]) ** 2 + (right_y - dvs_sort[j]) ** 2)\n', (43976, 44037), True, 'import numpy as np\n'), ((44865, 44931), 'numpy.sqrt', 'np.sqrt', (['((left_x - ivs_sort[j]) ** 2 + (left_y - dvs_sort[j]) ** 2)'], {}), '((left_x - ivs_sort[j]) ** 2 + (left_y - dvs_sort[j]) ** 2)\n', (44872, 44931), True, 'import numpy as np\n'), ((45040, 45108), 'numpy.sqrt', 'np.sqrt', (['((right_x - ivs_sort[j]) ** 2 + (right_y - dvs_sort[j]) ** 2)'], {}), '((right_x - ivs_sort[j]) ** 2 + (right_y - dvs_sort[j]) ** 2)\n', (45047, 45108), True, 'import numpy as np\n'), ((54055, 54086), 'numpy.isclose', 'np.isclose', (['eventindices', 'flnum'], {}), '(eventindices, flnum)\n', (54065, 54086), True, 'import numpy as np\n'), ((72985, 73010), 'numpy.log10', 'np.log10', (['aiadat[t, :, :]'], {}), '(aiadat[t, :, :])\n', (72993, 73010), True, 'import numpy as np\n'), ((77730, 77803), 'matplotlib.animation.FuncAnimation', 'animat.FuncAnimation', (['fig', 'animate'], {'frames': '(5)', 'interval': '(20)', 'repeat_delay': '(0)'}), '(fig, animate, frames=5, interval=20, repeat_delay=0)\n', (77750, 77803), True, 'import matplotlib.animation as animat\n'), ((98860, 98880), 'numpy.sum', 'np.sum', (['hmi_pos_inst'], {}), '(hmi_pos_inst)\n', (98866, 98880), True, 'import numpy as np\n'), ((98918, 98938), 'numpy.sum', 'np.sum', (['hmi_neg_inst'], {}), '(hmi_neg_inst)\n', (98924, 98938), True, 'import numpy as np\n'), ((101860, 101875), 'numpy.sum', 'np.sum', (['hmi_pos'], {}), '(hmi_pos)\n', (101866, 101875), True, 'import numpy as np\n'), ((101908, 101923), 'numpy.sum', 'np.sum', (['hmi_neg'], {}), '(hmi_neg)\n', (101914, 101923), True, 'import numpy as np\n'), ((107981, 108002), 'numpy.log', 'np.log', (['(-rec_flux_neg)'], {}), '(-rec_flux_neg)\n', (107987, 108002), True, 'import numpy as np\n'), ((123465, 123531), 'numpy.sqrt', 'np.sqrt', (['((left_x - ivs_sort[j]) ** 2 + (left_y - dvs_sort[j]) ** 2)'], {}), '((left_x - ivs_sort[j]) ** 2 + (left_y - dvs_sort[j]) ** 2)\n', (123472, 123531), True, 'import numpy as np\n'), ((123625, 123693), 'numpy.sqrt', 'np.sqrt', (['((right_x - ivs_sort[j]) ** 2 + (right_y - dvs_sort[j]) ** 2)'], {}), '((right_x - ivs_sort[j]) ** 2 + (right_y - dvs_sort[j]) ** 2)\n', (123632, 123693), True, 'import numpy as np\n'), ((124757, 124823), 'numpy.sqrt', 'np.sqrt', (['((left_x - ivs_sort[j]) ** 2 + (left_y - dvs_sort[j]) ** 2)'], {}), '((left_x - ivs_sort[j]) ** 2 + (left_y - dvs_sort[j]) ** 2)\n', (124764, 124823), True, 'import numpy as np\n'), ((124917, 124985), 'numpy.sqrt', 'np.sqrt', (['((right_x - ivs_sort[j]) ** 2 + (right_y - dvs_sort[j]) ** 2)'], {}), '((right_x - ivs_sort[j]) ** 2 + (right_y - dvs_sort[j]) ** 2)\n', (124924, 124985), True, 'import numpy as np\n'), ((135111, 135145), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['i'], {}), '(i)\n', (135142, 135145), False, 'import datetime\n'), ((44255, 44283), 'numpy.min', 'np.min', (['left_pil_dist_pos[i]'], {}), '(left_pil_dist_pos[i])\n', (44261, 44283), True, 'import numpy as np\n'), ((44480, 44509), 'numpy.min', 'np.min', (['right_pil_dist_pos[j]'], {}), '(right_pil_dist_pos[j])\n', (44486, 44509), True, 'import numpy as np\n'), ((45253, 45281), 'numpy.min', 'np.min', (['left_pil_dist_neg[i]'], {}), '(left_pil_dist_neg[i])\n', (45259, 45281), True, 'import numpy as np\n'), ((45478, 45507), 'numpy.min', 'np.min', (['right_pil_dist_neg[j]'], {}), '(right_pil_dist_neg[j])\n', (45484, 45507), True, 'import numpy as np\n'), ((113537, 113558), 'numpy.diff', 'np.diff', (['rec_flux_pos'], {}), '(rec_flux_pos)\n', (113544, 113558), True, 'import numpy as np\n'), ((113635, 113656), 'numpy.diff', 'np.diff', (['rec_flux_neg'], {}), '(rec_flux_neg)\n', (113642, 113656), True, 'import numpy as np\n'), ((123988, 124022), 'numpy.min', 'np.min', (['left_pil_dist_pos_shear[i]'], {}), '(left_pil_dist_pos_shear[i])\n', (123994, 124022), True, 'import numpy as np\n'), ((124288, 124323), 'numpy.min', 'np.min', (['right_pil_dist_neg_shear[j]'], {}), '(right_pil_dist_neg_shear[j])\n', (124294, 124323), True, 'import numpy as np\n'), ((125280, 125314), 'numpy.min', 'np.min', (['left_pil_dist_neg_shear[i]'], {}), '(left_pil_dist_neg_shear[i])\n', (125286, 125314), True, 'import numpy as np\n'), ((125580, 125615), 'numpy.min', 'np.min', (['right_pil_dist_pos_shear[j]'], {}), '(right_pil_dist_pos_shear[j])\n', (125586, 125615), True, 'import numpy as np\n'), ((54104, 54117), 'numpy.where', 'np.where', (['ind'], {}), '(ind)\n', (54112, 54117), True, 'import numpy as np\n'), ((77611, 77627), 'numpy.shape', 'np.shape', (['aiadat'], {}), '(aiadat)\n', (77619, 77627), True, 'import numpy as np\n')] |
from __future__ import division
from pyomo.environ import *
import numpy as np
import pandas as pd
# Create a model
model = AbstractModel()
# Import sets
set_df = pd.read_csv('../data/interim/lp_data/input_data/Set_List.csv')
node_list = list(set_df['B'])[:95]
charger_list = list(set_df['K'])[:2]
time_list = list(set_df['T'])[:72]
line_list = list(set_df['L'])[:772]
# Create pyomo sets
model.B = Set(initialize=node_list)
model.K = Set(initialize=charger_list)
model.T = Set(initialize=time_list)
model.L = Set(initialize=line_list)
# Create Model Parameters
model.F = Param(model.B, model.K)
model.D = Param(model.B, model.K)
model.p = Param(model.B, model.L)
model.A = Param(model.B, model.T)
model.G = Param(model.T)
model.C = Param(model.B, model.K)
model.N = Param(model.K)
model.E = Param(model.B, model.K)
model.S = Param(model.B)
model.M = Param(initialize=100)
model.VW = Param(model.B, model.K, model.T)
model.P_H_U = Param(model.L, model.T)
# Load data into model parameters
data = DataPortal()
data.load(filename='../data/interim/lp_data/input_data/Fixed_Cost.csv', param=model.F, index=(model.B, model.K))
data.load(filename='../data/interim/lp_data/input_data/Demand_Charge.csv', param=model.D, index=(model.B, model.K))
data.load(filename='../data/interim/lp_data/input_data/Incidence_Matrix.tab', param=model.p, format='array')
data.load(filename='../data/interim/lp_data/input_data/Demand.csv', param=model.A, index=(model.B, model.T))
data.load(filename='../data/interim/lp_data/input_data/Charging_Efficiency.csv', param=model.G, index=(model.T))
data.load(filename='i../data/interim/lp_data/input_data/Plug_in_Limit.csv', param=model.C, index=(model.B, model.K))
data.load(filename='../data/interim/lp_data/input_data/Charger_Capacity.csv', param=model.N, index=(model.K))
data.load(filename='../data/interim/lp_data/input_data/Existing_Capacity.csv', param=model.E, index=(model.B, model.K))
data.load(filename='../data/interim/lp_data/input_data/Site_Develop_Cost.csv', param=model.S, index=(model.B))
data.load(filename='../data/interim/lp_data/input_data/V_Times_W.csv', param=model.VW, index=(model.B, model.K, model.T))
data.load(filename='../data/interim/lp_data/input_data/P_H_U.csv', param=model.P_H_U, index=(model.L, model.T))
# Create Decision Variables
model.x = Var(model.B, model.K, within=NonNegativeReals)
model.n = Var(model.B, model.K, within=NonNegativeIntegers)
model.y = Var(model.B, model.K, model.T, within=NonNegativeReals)
model.f = Var(model.L, model.T, within=NonNegativeReals)
model.v = Var(model.B, within=Binary)
# Objective Function
def obj_expression(model):
return summation(model.S, model.v) + \
summation(model.F, model.x) + \
summation(model.D, model.x) + \
summation(model.VW, model.y) + \
summation(model.P_H_U, model.f)
model.OBJ = Objective(rule=obj_expression, sense=minimize)
# Constraint One
def first_constraint_rule(model, b, t):
return (sum(model.y[b, k, t] for k in model.K) + sum(model.p[b, l] * model.f[l, t] for l in model.L)) \
>= (model.A[b, t])
model.FirstConstraint = Constraint(model.B, model.T, rule=first_constraint_rule)
# Constraint Two
def second_constraint_rule(model, b, k, t):
return (model.y[b, k, t] <= (model.x[b, k] + model.E[b, k]) * model.G[t])
model.SecondConstraint = Constraint(model.B, model.K, model.T, rule=second_constraint_rule)
# Create model instance
instance = model.create_instance(data)
# Solve the LP
solver = pyomo.opt.SolverFactory('glpk')
results = solver.solve(instance, tee=True, keepfiles=True)
# Write out results
ind_x = list(instance.x)
val_x = list(instance.x[:, :].value)
ind_v = list(instance.v)
val_v = list(instance.v[:].value)
ind_y = list(instance.y)
val_y = list(instance.y[:, :, :].value)
ind_f = list(instance.f)
val_f = list(instance.f[:, :].value)
result_x = [i + tuple([j]) for i, j in zip(ind_x, val_x)]
result_v = [i for i in zip(ind_v, val_v)]
result_y = [i + tuple([j]) for i, j in zip(ind_y, val_y)]
result_f = [i + tuple([j]) for i, j in zip(ind_f, val_f)]
pd.DataFrame(np.array(result_x)).to_csv('../data/interim/lp_data/output_data/x.csv', index=False)
pd.DataFrame(np.array(result_v)).to_csv('../data/interim/lp_data/output_data/v.csv', index=False)
pd.DataFrame(np.array(result_y)).to_csv('../data/interim/lp_data/output_data/y.csv', index=False)
pd.DataFrame(np.array(result_f)).to_csv('../data/interim/lp_data/output_data/f.csv', index=False) | [
"pandas.read_csv",
"numpy.array"
] | [((165, 227), 'pandas.read_csv', 'pd.read_csv', (['"""../data/interim/lp_data/input_data/Set_List.csv"""'], {}), "('../data/interim/lp_data/input_data/Set_List.csv')\n", (176, 227), True, 'import pandas as pd\n'), ((4099, 4117), 'numpy.array', 'np.array', (['result_x'], {}), '(result_x)\n', (4107, 4117), True, 'import numpy as np\n'), ((4197, 4215), 'numpy.array', 'np.array', (['result_v'], {}), '(result_v)\n', (4205, 4215), True, 'import numpy as np\n'), ((4295, 4313), 'numpy.array', 'np.array', (['result_y'], {}), '(result_y)\n', (4303, 4313), True, 'import numpy as np\n'), ((4393, 4411), 'numpy.array', 'np.array', (['result_f'], {}), '(result_f)\n', (4401, 4411), True, 'import numpy as np\n')] |
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
import itertools
import matplotlib.pyplot as plt
import numpy as np
import pytest
import scipy.stats
from abex.emukit.moment_matching_qei import (
calculate_cumulative_min_moments,
correlation_from_covariance,
get_next_cumulative_min_moments,
)
@pytest.mark.parametrize("size,num_points", itertools.product(range(1, 5), range(1, 10)))
def test_calculate_cumulative_min_moments__output_has_right_shape(size, num_points):
# Generate random Gaussian variables
means = np.random.normal(size=[num_points, size], scale=0, loc=1)
cov = np.stack([random_positive_definite_matrix(size) for _ in range(num_points)], axis=0)
stds = np.sqrt(np.einsum("ijj->ij", cov))
corr_matrix = correlation_from_covariance(cov, stds)
# Calculate the results using moment matching
approx_mean, approx_std, alphas = calculate_cumulative_min_moments(means, stds, corr_matrix)
assert len(approx_mean) == size
assert len(approx_std) == size
assert len(alphas) == size
for i in range(size):
if i == 0:
assert alphas[i] is None
else:
assert alphas[i].ndim == 1
assert alphas[i].shape[0] == num_points
assert approx_mean[i].ndim == 1
assert approx_mean[i].shape[0] == num_points
assert approx_std[i].ndim == 1
assert approx_std[i].shape[0] == num_points
@pytest.mark.parametrize("size", range(1, 50, 5))
def test_calculate_cumulative_min_moments__output_is_finite(size):
num_points = 20
# Generate random Gaussian variables
means = np.random.normal(size=[num_points, size], scale=0, loc=1)
cov = np.stack([random_positive_definite_matrix(size) for _ in range(num_points)], axis=0)
stds = np.sqrt(np.einsum("ijj->ij", cov))
corr_matrix = correlation_from_covariance(cov, stds)
# Calculate the results using moment matching
approx_mean, approx_std, alphas = calculate_cumulative_min_moments(means, stds, corr_matrix)
for i in range(size):
if i == 0:
assert alphas[i] is None
else:
assert np.isfinite(alphas[i]).all()
assert np.isfinite(approx_mean[i]).all()
assert np.isfinite(approx_std[i]).all()
@pytest.mark.timeout(30)
@pytest.mark.parametrize("mean_spread_scale,means_mean", [(1, 0), (1, -100), (1e3, 0), (1e-3, 0)])
def test_calculate_cumulative_min_moments__mean_std_exact_for_dim2(mean_spread_scale, means_mean):
# Fix random seed
np.random.seed(0)
# Moment calculation is exact for two Gaussian variables => let size = 2
size = 2
num_points = 100
# Generate random Gaussian variables
means = np.random.normal(size=[num_points, size], scale=mean_spread_scale, loc=means_mean)
cov = np.stack([random_positive_definite_matrix(size) for _ in range(num_points)], axis=0)
stds = np.sqrt(np.einsum("ijj->ij", cov))
corr_matrix = correlation_from_covariance(cov, stds)
# Calculate the results using moment matching
approx_mean, approx_std, alphas = calculate_cumulative_min_moments(means, stds, corr_matrix)
assert np.isfinite(approx_mean[-1]).all()
assert np.isfinite(approx_std[-1]).all()
# Find MC estimates:
num_samples = 10000
num_repeats = 20
error_prob = 1e-4
for i in range(num_points):
means_repeats, stds_repeats = np.zeros([num_repeats]), np.zeros([num_repeats])
for j in range(num_repeats):
theta_samples = np.random.multivariate_normal(mean=means[i], cov=cov[i], size=num_samples).min(axis=1)
means_repeats[j] = theta_samples.mean()
stds_repeats[j] = theta_samples.std()
mc_mean = means_repeats.mean()
mc_std = stds_repeats.mean()
mc_mean_err = scipy.stats.norm.ppf(1 - (error_prob / 2)) * means_repeats.std() # type: ignore # auto
mc_std_err = scipy.stats.norm.ppf(1 - (error_prob / 2)) * stds_repeats.std() # type: ignore # auto
# Assert that estimates within confidence bound estimates
assert pytest.approx(mc_mean, abs=mc_mean_err) == approx_mean[-1][i]
assert pytest.approx(mc_std, abs=mc_std_err) == approx_std[-1][i]
@pytest.mark.timeout(20)
@pytest.mark.parametrize("mean_spread_scale,means_mean", [(1, 0), (1, -100), (1e3, 0), (1e-3, 0)])
def test_calculate_cumulative_min_moments__correlation_exact_for_dim3(mean_spread_scale, means_mean):
# Fix random seed
np.random.seed(0)
# Moment calculation is exact for two Gaussian variables => let size = 2
size = 3
num_points = 100
# Generate random Gaussian variables
means = np.random.normal(size=[num_points, size], scale=mean_spread_scale, loc=means_mean)
cov = np.stack([random_positive_definite_matrix(size) for _ in range(num_points)], axis=0)
stds = np.sqrt(np.einsum("ijj->ij", cov))
corr_matrix = correlation_from_covariance(cov, stds)
# Calculate the results using moment matching for the first 2 variables
theta_means, theta_stds, alphas = calculate_cumulative_min_moments(
means[:, : size - 1], stds[:, : size - 1], corr_matrix[:, : size - 1, : size - 1]
)
assert np.isfinite(theta_means[-1]).all()
assert np.isfinite(theta_stds[-1]).all()
# Calculate the correlations to the 3rd variable
last_output_idx = size - 1
last_mean, last_std, last_output_theta_corr, _ = get_next_cumulative_min_moments(
next_output_idx=last_output_idx,
mean=means[:, last_output_idx],
std=stds[:, last_output_idx],
prev_stds=stds[:, :last_output_idx],
corr_to_next=corr_matrix[:, :, last_output_idx],
theta_means=theta_means,
theta_stds=theta_stds,
alphas=alphas,
)
# Find MC estimates:
num_samples = 10000
num_repeats = 20
error_prob = 1e-4
for i in range(num_points):
corr_repeats = np.zeros([num_repeats])
for j in range(num_repeats):
y_samples = np.random.multivariate_normal(mean=means[i], cov=cov[i], size=num_samples)
theta_prelast_samples = y_samples[:, :-1].min(axis=1)
y_last_samples = y_samples[:, -1]
corr_repeats[j] = np.corrcoef(y_last_samples, theta_prelast_samples)[0, 1]
corr_mean = corr_repeats.mean()
corr_err = scipy.stats.norm.ppf(1 - (error_prob / 2)) * corr_repeats.std() # type: ignore # auto
# Assert that estimates within confidence bound estimates
assert pytest.approx(corr_mean, abs=corr_err) == last_output_theta_corr[-1][i]
return
def random_positive_definite_matrix(size: int) -> np.ndarray:
factor = np.random.rand(size, size)
mat: np.ndarray = factor.T @ factor
return mat
def plot_mc_samples_vs_predicted_means_and_stds(
size: int = 2,
num_samples: int = 10000,
num_repeats: int = 40,
num_points: int = 100,
confidence_interval: float = 0.9,
):
means = np.random.normal(size=[num_points, size], scale=2.0, loc=0.0)
cov = np.stack([random_positive_definite_matrix(size) for _ in range(num_points)], axis=0)
stds = np.sqrt(np.einsum("ijj->ij", cov))
corr_matrix = correlation_from_covariance(cov, stds)
approx_mean, approx_std, alphas = calculate_cumulative_min_moments(means, stds, corr_matrix)
# Find MC estimates:
error_prob = 1.0 - confidence_interval
assert error_prob > 0
mc_means = np.zeros([num_points])
mc_stds = np.zeros_like(mc_means)
mc_means_err = np.zeros_like(mc_means)
mc_stds_err = np.zeros_like(mc_stds)
for i in range(num_points):
means_repeats, stds_repeats = np.zeros([num_repeats]), np.zeros([num_repeats])
for j in range(num_repeats):
theta_samples = np.random.multivariate_normal(mean=means[i], cov=cov[i], size=num_samples).min(axis=1)
means_repeats[j] = theta_samples.mean()
stds_repeats[j] = theta_samples.std()
mc_means[i] = means_repeats.mean()
mc_stds[i] = stds_repeats.mean()
mc_means_err[i] = scipy.stats.norm.ppf(1 - (error_prob / 2)) * means_repeats.std() # type: ignore # auto
mc_stds_err[i] = scipy.stats.norm.ppf(1 - (error_prob / 2)) * stds_repeats.std() # type: ignore # auto
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(10, 5))
xgrid = np.linspace(mc_means.min(), mc_means.max(), 100)
ax1.plot(xgrid, xgrid, alpha=0.5, color="red")
ax1.errorbar(
approx_mean[-1], mc_means, yerr=mc_means_err * 10, fmt="o", color="black", ecolor="black", elinewidth=0.4
)
ax1.set_xlabel("Predicted mean (Moment matching)")
ax1.set_ylabel("Monte Carlo estimate of mean")
stdgrid = np.linspace(0, mc_stds.max(), 100)
ax2.plot(stdgrid, stdgrid, alpha=0.5, color="blue")
ax2.errorbar(approx_std[-1], mc_stds, yerr=mc_stds_err * 10, fmt="o", color="black", ecolor="black", elinewidth=0.4)
ax1.set_xlabel("Predicted st. deviation (Moment matching)")
ax1.set_ylabel("Monte Carlo estimate of st. deviation")
return fig, (ax1, ax2)
def plot_mc_samples_vs_predicted_correlations(
size: int = 3,
num_samples: int = 10000,
num_repeats: int = 40,
num_points: int = 100,
confidence_interval: float = 0.9,
):
assert size >= 3
means = np.random.normal(size=[num_points, size], scale=2.0, loc=0.0)
cov = np.stack([random_positive_definite_matrix(size) for _ in range(num_points)], axis=0)
stds = np.sqrt(np.einsum("ijj->ij", cov))
corr_matrix = correlation_from_covariance(cov, stds)
# Calculate the results using moment matching for the first size-1 variables
approx_mean, approx_std, alphas = calculate_cumulative_min_moments(
means[:, : size - 1], stds[:, : size - 1], corr_matrix[:, : size - 1, : size - 1]
)
# Calculate the correlations to the 3rd variable
last_mean, last_std, last_output_theta_corr, _ = get_next_cumulative_min_moments(
next_output_idx=size - 1,
mean=means[:, size - 1],
std=stds[:, size - 1],
prev_stds=stds[:, : size - 1],
corr_to_next=corr_matrix[:, size - 1, :],
theta_means=approx_mean,
theta_stds=approx_std,
alphas=alphas,
)
# Find MC estimates:
error_prob = 1.0 - confidence_interval
assert error_prob > 0
mc_corrs = np.zeros([num_points])
mc_corrs_err = np.zeros_like(mc_corrs)
for i in range(num_points):
corr_repeats = np.zeros([num_repeats])
for j in range(num_repeats):
y_samples = np.random.multivariate_normal(mean=means[i], cov=cov[i], size=num_samples)
theta_prelast_samples = y_samples[:, :-1].min(axis=1)
y_last_samples = y_samples[:, -1]
corr_repeats[j] = np.corrcoef(y_last_samples, theta_prelast_samples)[0, 1]
mc_corrs[i] = corr_repeats.mean()
mc_corrs_err[i] = scipy.stats.norm.ppf(1 - (error_prob / 2)) * corr_repeats.std() # type: ignore # auto
fig, ax = plt.subplots(figsize=(6, 6))
xgrid = np.linspace(mc_corrs.min(), mc_corrs.max(), 100)
ax.plot(xgrid, xgrid, alpha=0.5, color="red")
ax.errorbar(
last_output_theta_corr[-1],
mc_corrs,
yerr=mc_corrs_err * 10,
fmt="o",
color="black",
ecolor="black",
elinewidth=0.4,
)
ax.set_xlabel("Predicted Correlation (Moment matching)")
ax.set_ylabel("Monte Carlo estimate of correlation")
return fig, ax
| [
"abex.emukit.moment_matching_qei.correlation_from_covariance",
"abex.emukit.moment_matching_qei.calculate_cumulative_min_moments",
"numpy.zeros_like",
"numpy.random.seed",
"abex.emukit.moment_matching_qei.get_next_cumulative_min_moments",
"numpy.random.rand",
"numpy.corrcoef",
"numpy.zeros",
"numpy.... | [((2546, 2569), 'pytest.mark.timeout', 'pytest.mark.timeout', (['(30)'], {}), '(30)\n', (2565, 2569), False, 'import pytest\n'), ((2571, 2676), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mean_spread_scale,means_mean"""', '[(1, 0), (1, -100), (1000.0, 0), (0.001, 0)]'], {}), "('mean_spread_scale,means_mean', [(1, 0), (1, -100),\n (1000.0, 0), (0.001, 0)])\n", (2594, 2676), False, 'import pytest\n'), ((4476, 4499), 'pytest.mark.timeout', 'pytest.mark.timeout', (['(20)'], {}), '(20)\n', (4495, 4499), False, 'import pytest\n'), ((4501, 4606), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mean_spread_scale,means_mean"""', '[(1, 0), (1, -100), (1000.0, 0), (0.001, 0)]'], {}), "('mean_spread_scale,means_mean', [(1, 0), (1, -100),\n (1000.0, 0), (0.001, 0)])\n", (4524, 4606), False, 'import pytest\n'), ((831, 888), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[num_points, size]', 'scale': '(0)', 'loc': '(1)'}), '(size=[num_points, size], scale=0, loc=1)\n', (847, 888), True, 'import numpy as np\n'), ((1048, 1086), 'abex.emukit.moment_matching_qei.correlation_from_covariance', 'correlation_from_covariance', (['cov', 'stds'], {}), '(cov, stds)\n', (1075, 1086), False, 'from abex.emukit.moment_matching_qei import calculate_cumulative_min_moments, correlation_from_covariance, get_next_cumulative_min_moments\n'), ((1175, 1233), 'abex.emukit.moment_matching_qei.calculate_cumulative_min_moments', 'calculate_cumulative_min_moments', (['means', 'stds', 'corr_matrix'], {}), '(means, stds, corr_matrix)\n', (1207, 1233), False, 'from abex.emukit.moment_matching_qei import calculate_cumulative_min_moments, correlation_from_covariance, get_next_cumulative_min_moments\n'), ((1899, 1956), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[num_points, size]', 'scale': '(0)', 'loc': '(1)'}), '(size=[num_points, size], scale=0, loc=1)\n', (1915, 1956), True, 'import numpy as np\n'), ((2116, 2154), 'abex.emukit.moment_matching_qei.correlation_from_covariance', 'correlation_from_covariance', (['cov', 'stds'], {}), '(cov, stds)\n', (2143, 2154), False, 'from abex.emukit.moment_matching_qei import calculate_cumulative_min_moments, correlation_from_covariance, get_next_cumulative_min_moments\n'), ((2243, 2301), 'abex.emukit.moment_matching_qei.calculate_cumulative_min_moments', 'calculate_cumulative_min_moments', (['means', 'stds', 'corr_matrix'], {}), '(means, stds, corr_matrix)\n', (2275, 2301), False, 'from abex.emukit.moment_matching_qei import calculate_cumulative_min_moments, correlation_from_covariance, get_next_cumulative_min_moments\n'), ((2794, 2811), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2808, 2811), True, 'import numpy as np\n'), ((2976, 3063), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[num_points, size]', 'scale': 'mean_spread_scale', 'loc': 'means_mean'}), '(size=[num_points, size], scale=mean_spread_scale, loc=\n means_mean)\n', (2992, 3063), True, 'import numpy as np\n'), ((3218, 3256), 'abex.emukit.moment_matching_qei.correlation_from_covariance', 'correlation_from_covariance', (['cov', 'stds'], {}), '(cov, stds)\n', (3245, 3256), False, 'from abex.emukit.moment_matching_qei import calculate_cumulative_min_moments, correlation_from_covariance, get_next_cumulative_min_moments\n'), ((3345, 3403), 'abex.emukit.moment_matching_qei.calculate_cumulative_min_moments', 'calculate_cumulative_min_moments', (['means', 'stds', 'corr_matrix'], {}), '(means, stds, corr_matrix)\n', (3377, 3403), False, 'from abex.emukit.moment_matching_qei import calculate_cumulative_min_moments, correlation_from_covariance, get_next_cumulative_min_moments\n'), ((4727, 4744), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (4741, 4744), True, 'import numpy as np\n'), ((4909, 4996), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[num_points, size]', 'scale': 'mean_spread_scale', 'loc': 'means_mean'}), '(size=[num_points, size], scale=mean_spread_scale, loc=\n means_mean)\n', (4925, 4996), True, 'import numpy as np\n'), ((5151, 5189), 'abex.emukit.moment_matching_qei.correlation_from_covariance', 'correlation_from_covariance', (['cov', 'stds'], {}), '(cov, stds)\n', (5178, 5189), False, 'from abex.emukit.moment_matching_qei import calculate_cumulative_min_moments, correlation_from_covariance, get_next_cumulative_min_moments\n'), ((5304, 5419), 'abex.emukit.moment_matching_qei.calculate_cumulative_min_moments', 'calculate_cumulative_min_moments', (['means[:, :size - 1]', 'stds[:, :size - 1]', 'corr_matrix[:, :size - 1, :size - 1]'], {}), '(means[:, :size - 1], stds[:, :size - 1],\n corr_matrix[:, :size - 1, :size - 1])\n', (5336, 5419), False, 'from abex.emukit.moment_matching_qei import calculate_cumulative_min_moments, correlation_from_covariance, get_next_cumulative_min_moments\n'), ((5662, 5951), 'abex.emukit.moment_matching_qei.get_next_cumulative_min_moments', 'get_next_cumulative_min_moments', ([], {'next_output_idx': 'last_output_idx', 'mean': 'means[:, last_output_idx]', 'std': 'stds[:, last_output_idx]', 'prev_stds': 'stds[:, :last_output_idx]', 'corr_to_next': 'corr_matrix[:, :, last_output_idx]', 'theta_means': 'theta_means', 'theta_stds': 'theta_stds', 'alphas': 'alphas'}), '(next_output_idx=last_output_idx, mean=means\n [:, last_output_idx], std=stds[:, last_output_idx], prev_stds=stds[:, :\n last_output_idx], corr_to_next=corr_matrix[:, :, last_output_idx],\n theta_means=theta_means, theta_stds=theta_stds, alphas=alphas)\n', (5693, 5951), False, 'from abex.emukit.moment_matching_qei import calculate_cumulative_min_moments, correlation_from_covariance, get_next_cumulative_min_moments\n'), ((6905, 6931), 'numpy.random.rand', 'np.random.rand', (['size', 'size'], {}), '(size, size)\n', (6919, 6931), True, 'import numpy as np\n'), ((7194, 7255), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[num_points, size]', 'scale': '(2.0)', 'loc': '(0.0)'}), '(size=[num_points, size], scale=2.0, loc=0.0)\n', (7210, 7255), True, 'import numpy as np\n'), ((7415, 7453), 'abex.emukit.moment_matching_qei.correlation_from_covariance', 'correlation_from_covariance', (['cov', 'stds'], {}), '(cov, stds)\n', (7442, 7453), False, 'from abex.emukit.moment_matching_qei import calculate_cumulative_min_moments, correlation_from_covariance, get_next_cumulative_min_moments\n'), ((7493, 7551), 'abex.emukit.moment_matching_qei.calculate_cumulative_min_moments', 'calculate_cumulative_min_moments', (['means', 'stds', 'corr_matrix'], {}), '(means, stds, corr_matrix)\n', (7525, 7551), False, 'from abex.emukit.moment_matching_qei import calculate_cumulative_min_moments, correlation_from_covariance, get_next_cumulative_min_moments\n'), ((7662, 7684), 'numpy.zeros', 'np.zeros', (['[num_points]'], {}), '([num_points])\n', (7670, 7684), True, 'import numpy as np\n'), ((7699, 7722), 'numpy.zeros_like', 'np.zeros_like', (['mc_means'], {}), '(mc_means)\n', (7712, 7722), True, 'import numpy as np\n'), ((7742, 7765), 'numpy.zeros_like', 'np.zeros_like', (['mc_means'], {}), '(mc_means)\n', (7755, 7765), True, 'import numpy as np\n'), ((7784, 7806), 'numpy.zeros_like', 'np.zeros_like', (['mc_stds'], {}), '(mc_stds)\n', (7797, 7806), True, 'import numpy as np\n'), ((8513, 8551), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(2)', 'figsize': '(10, 5)'}), '(ncols=2, figsize=(10, 5))\n', (8525, 8551), True, 'import matplotlib.pyplot as plt\n'), ((9512, 9573), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[num_points, size]', 'scale': '(2.0)', 'loc': '(0.0)'}), '(size=[num_points, size], scale=2.0, loc=0.0)\n', (9528, 9573), True, 'import numpy as np\n'), ((9733, 9771), 'abex.emukit.moment_matching_qei.correlation_from_covariance', 'correlation_from_covariance', (['cov', 'stds'], {}), '(cov, stds)\n', (9760, 9771), False, 'from abex.emukit.moment_matching_qei import calculate_cumulative_min_moments, correlation_from_covariance, get_next_cumulative_min_moments\n'), ((9892, 10007), 'abex.emukit.moment_matching_qei.calculate_cumulative_min_moments', 'calculate_cumulative_min_moments', (['means[:, :size - 1]', 'stds[:, :size - 1]', 'corr_matrix[:, :size - 1, :size - 1]'], {}), '(means[:, :size - 1], stds[:, :size - 1],\n corr_matrix[:, :size - 1, :size - 1])\n', (9924, 10007), False, 'from abex.emukit.moment_matching_qei import calculate_cumulative_min_moments, correlation_from_covariance, get_next_cumulative_min_moments\n'), ((10128, 10381), 'abex.emukit.moment_matching_qei.get_next_cumulative_min_moments', 'get_next_cumulative_min_moments', ([], {'next_output_idx': '(size - 1)', 'mean': 'means[:, size - 1]', 'std': 'stds[:, size - 1]', 'prev_stds': 'stds[:, :size - 1]', 'corr_to_next': 'corr_matrix[:, size - 1, :]', 'theta_means': 'approx_mean', 'theta_stds': 'approx_std', 'alphas': 'alphas'}), '(next_output_idx=size - 1, mean=means[:, \n size - 1], std=stds[:, size - 1], prev_stds=stds[:, :size - 1],\n corr_to_next=corr_matrix[:, size - 1, :], theta_means=approx_mean,\n theta_stds=approx_std, alphas=alphas)\n', (10159, 10381), False, 'from abex.emukit.moment_matching_qei import calculate_cumulative_min_moments, correlation_from_covariance, get_next_cumulative_min_moments\n'), ((10551, 10573), 'numpy.zeros', 'np.zeros', (['[num_points]'], {}), '([num_points])\n', (10559, 10573), True, 'import numpy as np\n'), ((10593, 10616), 'numpy.zeros_like', 'np.zeros_like', (['mc_corrs'], {}), '(mc_corrs)\n', (10606, 10616), True, 'import numpy as np\n'), ((11201, 11229), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (11213, 11229), True, 'import matplotlib.pyplot as plt\n'), ((1003, 1028), 'numpy.einsum', 'np.einsum', (['"""ijj->ij"""', 'cov'], {}), "('ijj->ij', cov)\n", (1012, 1028), True, 'import numpy as np\n'), ((2071, 2096), 'numpy.einsum', 'np.einsum', (['"""ijj->ij"""', 'cov'], {}), "('ijj->ij', cov)\n", (2080, 2096), True, 'import numpy as np\n'), ((3173, 3198), 'numpy.einsum', 'np.einsum', (['"""ijj->ij"""', 'cov'], {}), "('ijj->ij', cov)\n", (3182, 3198), True, 'import numpy as np\n'), ((5106, 5131), 'numpy.einsum', 'np.einsum', (['"""ijj->ij"""', 'cov'], {}), "('ijj->ij', cov)\n", (5115, 5131), True, 'import numpy as np\n'), ((6158, 6181), 'numpy.zeros', 'np.zeros', (['[num_repeats]'], {}), '([num_repeats])\n', (6166, 6181), True, 'import numpy as np\n'), ((7370, 7395), 'numpy.einsum', 'np.einsum', (['"""ijj->ij"""', 'cov'], {}), "('ijj->ij', cov)\n", (7379, 7395), True, 'import numpy as np\n'), ((9688, 9713), 'numpy.einsum', 'np.einsum', (['"""ijj->ij"""', 'cov'], {}), "('ijj->ij', cov)\n", (9697, 9713), True, 'import numpy as np\n'), ((10672, 10695), 'numpy.zeros', 'np.zeros', (['[num_repeats]'], {}), '([num_repeats])\n', (10680, 10695), True, 'import numpy as np\n'), ((3415, 3443), 'numpy.isfinite', 'np.isfinite', (['approx_mean[-1]'], {}), '(approx_mean[-1])\n', (3426, 3443), True, 'import numpy as np\n'), ((3461, 3488), 'numpy.isfinite', 'np.isfinite', (['approx_std[-1]'], {}), '(approx_std[-1])\n', (3472, 3488), True, 'import numpy as np\n'), ((3659, 3682), 'numpy.zeros', 'np.zeros', (['[num_repeats]'], {}), '([num_repeats])\n', (3667, 3682), True, 'import numpy as np\n'), ((3684, 3707), 'numpy.zeros', 'np.zeros', (['[num_repeats]'], {}), '([num_repeats])\n', (3692, 3707), True, 'import numpy as np\n'), ((4337, 4376), 'pytest.approx', 'pytest.approx', (['mc_mean'], {'abs': 'mc_mean_err'}), '(mc_mean, abs=mc_mean_err)\n', (4350, 4376), False, 'import pytest\n'), ((4414, 4451), 'pytest.approx', 'pytest.approx', (['mc_std'], {'abs': 'mc_std_err'}), '(mc_std, abs=mc_std_err)\n', (4427, 4451), False, 'import pytest\n'), ((5445, 5473), 'numpy.isfinite', 'np.isfinite', (['theta_means[-1]'], {}), '(theta_means[-1])\n', (5456, 5473), True, 'import numpy as np\n'), ((5491, 5518), 'numpy.isfinite', 'np.isfinite', (['theta_stds[-1]'], {}), '(theta_stds[-1])\n', (5502, 5518), True, 'import numpy as np\n'), ((6243, 6317), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', ([], {'mean': 'means[i]', 'cov': 'cov[i]', 'size': 'num_samples'}), '(mean=means[i], cov=cov[i], size=num_samples)\n', (6272, 6317), True, 'import numpy as np\n'), ((6745, 6783), 'pytest.approx', 'pytest.approx', (['corr_mean'], {'abs': 'corr_err'}), '(corr_mean, abs=corr_err)\n', (6758, 6783), False, 'import pytest\n'), ((7877, 7900), 'numpy.zeros', 'np.zeros', (['[num_repeats]'], {}), '([num_repeats])\n', (7885, 7900), True, 'import numpy as np\n'), ((7902, 7925), 'numpy.zeros', 'np.zeros', (['[num_repeats]'], {}), '([num_repeats])\n', (7910, 7925), True, 'import numpy as np\n'), ((10757, 10831), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', ([], {'mean': 'means[i]', 'cov': 'cov[i]', 'size': 'num_samples'}), '(mean=means[i], cov=cov[i], size=num_samples)\n', (10786, 10831), True, 'import numpy as np\n'), ((2461, 2488), 'numpy.isfinite', 'np.isfinite', (['approx_mean[i]'], {}), '(approx_mean[i])\n', (2472, 2488), True, 'import numpy as np\n'), ((2510, 2536), 'numpy.isfinite', 'np.isfinite', (['approx_std[i]'], {}), '(approx_std[i])\n', (2521, 2536), True, 'import numpy as np\n'), ((6460, 6510), 'numpy.corrcoef', 'np.corrcoef', (['y_last_samples', 'theta_prelast_samples'], {}), '(y_last_samples, theta_prelast_samples)\n', (6471, 6510), True, 'import numpy as np\n'), ((10974, 11024), 'numpy.corrcoef', 'np.corrcoef', (['y_last_samples', 'theta_prelast_samples'], {}), '(y_last_samples, theta_prelast_samples)\n', (10985, 11024), True, 'import numpy as np\n'), ((2417, 2439), 'numpy.isfinite', 'np.isfinite', (['alphas[i]'], {}), '(alphas[i])\n', (2428, 2439), True, 'import numpy as np\n'), ((3773, 3847), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', ([], {'mean': 'means[i]', 'cov': 'cov[i]', 'size': 'num_samples'}), '(mean=means[i], cov=cov[i], size=num_samples)\n', (3802, 3847), True, 'import numpy as np\n'), ((7991, 8065), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', ([], {'mean': 'means[i]', 'cov': 'cov[i]', 'size': 'num_samples'}), '(mean=means[i], cov=cov[i], size=num_samples)\n', (8020, 8065), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#helping pages:
#https://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python/13849249#13849249
import rospy
import numpy
import tf
import tf2_ros
import geometry_msgs.msg
#flag to start the first iteration
starter = True
##FOR NEW SPACE POSITION
def message_from_transform(T):
message = geometry_msgs.msg.Transform()
quad = tf.transformations.quaternion_from_matrix(T)
translationMove = tf.transformations.translation_from_matrix(T)
message.translation.x = translationMove[0]
message.translation.y = translationMove[1]
message.translation.z = translationMove[2]
message.rotation.x = quad[0]
message.rotation.y = quad[1]
message.rotation.z = quad[2]
message.rotation.w = quad[3]
return message
def one_magnitude_vector(vector):
#returnning unit vector of a vector
unitVector = vector / numpy.linalg.norm(vector)
return unitVector
def angle_calculation_btwn(vector1, vector2):
#this will return the angle in radians beteen two vectors
vector1_unit = one_magnitude_vector(vector1)
vector2_unit = one_magnitude_vector(vector2)
#getting the dot product between vector1 and vector2
dotProd = numpy.dot(vector1_unit, vector2_unit)
"""
clip(a, a_min, a_max, out=None)
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to the interval
edges. For example, if an interval of [0, 1] is specified, values smaller
than 0 become 0, and values larger than 1 become 1.
"""
clippedVector = numpy.clip(dotProd, -1.0, 1.0)
vectorized = numpy.arccos(clippedVector)
return vectorized
#matrix times vector
def matrixTimesVector(matrix, vector):
vector.append(1)
MV = [sum([vector[x]*matrix[n][x] for x in range(len(vector))]) for n in range(len(matrix))]
return MV
def publish_transforms():
#GLOBAL VARIABLES FOR ANGLE IN T3 CALCULATION
global angleT3
global xT3
global cameraT3pos
global robotT2pos
global starter
# ----------T1-------------------------------------
T1 = tf.transformations.concatenate_matrices(
tf.transformations.quaternion_matrix(
tf.transformations.quaternion_from_euler(0.79, 0.0, 0.79)
),
tf.transformations.translation_matrix((0.0, 1.0, 1.0))
)
object_transform = geometry_msgs.msg.TransformStamped()
object_transform.header.stamp = rospy.Time.now()
object_transform.header.frame_id = "base_frame"
object_transform.child_frame_id = "object_frame"
object_transform.transform = message_from_transform(T1)
br.sendTransform(object_transform)
#-------------T2------------------------------------
T2 = tf.transformations.concatenate_matrices(
tf.transformations.quaternion_matrix(
tf.transformations.quaternion_about_axis(1.5, (0, 0, 1))
),
tf.transformations.translation_matrix((0.0, -1.0, 0.0))
)
robot_transform = geometry_msgs.msg.TransformStamped()
robot_transform.header.stamp = rospy.Time.now()
robot_transform.header.frame_id = "base_frame"
robot_transform.child_frame_id = "robot_frame"
robot_transform.transform = message_from_transform(T2)
br.sendTransform(robot_transform)
#-------------T3------------------------------------
#Starter point
T3 = tf.transformations.concatenate_matrices(
tf.transformations.quaternion_matrix(
tf.transformations.quaternion_from_euler(0.0, 0.0, 0.0)
),
tf.transformations.translation_matrix((0.0, 0.1, 0.1))
)
if starter:
starter = False
#now the x axis
xT3 = [1, 0, 0]
#now lets calculate the origin coordinates
#usign object coordinate frame and camera frame
#using object frame
P2 = tf.transformations.translation_from_matrix(T1)
P2 = P2.tolist()
#inverse matrix of t2
T2_inverse = tf.transformations.inverse_matrix(T2)
#inverse matrix of t3
T3_inverse = tf.transformations.inverse_matrix(T3)
#position reference to robot--------------
robotT2pos = matrixTimesVector(T2_inverse, P2)
newRob = len(robotT2pos)-1
#robotT2pos - last
robotT2pos = robotT2pos[:newRob]
#position reference to camera frame------
cameraT3pos = matrixTimesVector(T3_inverse, robotT2pos)
newCam = len(cameraT3pos)-1
#popping the last element
cameraT3pos = cameraT3pos[:newCam]
#calculating angles between
angleT3 = angle_calculation_btwn(xT3, cameraT3pos)
#now when it is not in first time
laNormal = numpy.cross(xT3, cameraT3pos)
#applying those movements
T3 = tf.transformations.concatenate_matrices(
tf.transformations.quaternion_matrix(
tf.transformations.quaternion_about_axis(angleT3, laNormal)
),
tf.transformations.translation_matrix((0.0, 0.1, 0.1))
)
#STAMPING T3
camera_transform = geometry_msgs.msg.TransformStamped()
camera_transform.header.stamp = rospy.Time.now()
camera_transform.header.frame_id = "robot_frame"
camera_transform.child_frame_id = "camera_frame"
camera_transform.transform = message_from_transform(T3)
br.sendTransform(camera_transform)
if __name__ == '__main__':
rospy.init_node('project2_solution')
br = tf2_ros.TransformBroadcaster()
rospy.sleep(0.5)
while not rospy.is_shutdown():
publish_transforms()
rospy.sleep(0.05)
| [
"tf2_ros.TransformBroadcaster",
"tf.transformations.translation_matrix",
"tf.transformations.quaternion_about_axis",
"rospy.Time.now",
"numpy.cross",
"numpy.arccos",
"numpy.clip",
"rospy.sleep",
"rospy.is_shutdown",
"numpy.linalg.norm",
"tf.transformations.translation_from_matrix",
"rospy.init... | [((394, 438), 'tf.transformations.quaternion_from_matrix', 'tf.transformations.quaternion_from_matrix', (['T'], {}), '(T)\n', (435, 438), False, 'import tf\n'), ((461, 506), 'tf.transformations.translation_from_matrix', 'tf.transformations.translation_from_matrix', (['T'], {}), '(T)\n', (503, 506), False, 'import tf\n'), ((1228, 1265), 'numpy.dot', 'numpy.dot', (['vector1_unit', 'vector2_unit'], {}), '(vector1_unit, vector2_unit)\n', (1237, 1265), False, 'import numpy\n'), ((1595, 1625), 'numpy.clip', 'numpy.clip', (['dotProd', '(-1.0)', '(1.0)'], {}), '(dotProd, -1.0, 1.0)\n', (1605, 1625), False, 'import numpy\n'), ((1644, 1671), 'numpy.arccos', 'numpy.arccos', (['clippedVector'], {}), '(clippedVector)\n', (1656, 1671), False, 'import numpy\n'), ((2470, 2486), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (2484, 2486), False, 'import rospy\n'), ((3097, 3113), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (3111, 3113), False, 'import rospy\n'), ((4721, 4750), 'numpy.cross', 'numpy.cross', (['xT3', 'cameraT3pos'], {}), '(xT3, cameraT3pos)\n', (4732, 4750), False, 'import numpy\n'), ((5152, 5168), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (5166, 5168), False, 'import rospy\n'), ((5406, 5442), 'rospy.init_node', 'rospy.init_node', (['"""project2_solution"""'], {}), "('project2_solution')\n", (5421, 5442), False, 'import rospy\n'), ((5453, 5483), 'tf2_ros.TransformBroadcaster', 'tf2_ros.TransformBroadcaster', ([], {}), '()\n', (5481, 5483), False, 'import tf2_ros\n'), ((5488, 5504), 'rospy.sleep', 'rospy.sleep', (['(0.5)'], {}), '(0.5)\n', (5499, 5504), False, 'import rospy\n'), ((901, 926), 'numpy.linalg.norm', 'numpy.linalg.norm', (['vector'], {}), '(vector)\n', (918, 926), False, 'import numpy\n'), ((2309, 2363), 'tf.transformations.translation_matrix', 'tf.transformations.translation_matrix', (['(0.0, 1.0, 1.0)'], {}), '((0.0, 1.0, 1.0))\n', (2346, 2363), False, 'import tf\n'), ((2937, 2992), 'tf.transformations.translation_matrix', 'tf.transformations.translation_matrix', (['(0.0, -1.0, 0.0)'], {}), '((0.0, -1.0, 0.0))\n', (2974, 2992), False, 'import tf\n'), ((3578, 3632), 'tf.transformations.translation_matrix', 'tf.transformations.translation_matrix', (['(0.0, 0.1, 0.1)'], {}), '((0.0, 0.1, 0.1))\n', (3615, 3632), False, 'import tf\n'), ((3881, 3927), 'tf.transformations.translation_from_matrix', 'tf.transformations.translation_from_matrix', (['T1'], {}), '(T1)\n', (3923, 3927), False, 'import tf\n'), ((4006, 4043), 'tf.transformations.inverse_matrix', 'tf.transformations.inverse_matrix', (['T2'], {}), '(T2)\n', (4039, 4043), False, 'import tf\n'), ((4095, 4132), 'tf.transformations.inverse_matrix', 'tf.transformations.inverse_matrix', (['T3'], {}), '(T3)\n', (4128, 4132), False, 'import tf\n'), ((4973, 5027), 'tf.transformations.translation_matrix', 'tf.transformations.translation_matrix', (['(0.0, 0.1, 0.1)'], {}), '((0.0, 0.1, 0.1))\n', (5010, 5027), False, 'import tf\n'), ((5520, 5539), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (5537, 5539), False, 'import rospy\n'), ((5578, 5595), 'rospy.sleep', 'rospy.sleep', (['(0.05)'], {}), '(0.05)\n', (5589, 5595), False, 'import rospy\n'), ((2228, 2285), 'tf.transformations.quaternion_from_euler', 'tf.transformations.quaternion_from_euler', (['(0.79)', '(0.0)', '(0.79)'], {}), '(0.79, 0.0, 0.79)\n', (2268, 2285), False, 'import tf\n'), ((2857, 2913), 'tf.transformations.quaternion_about_axis', 'tf.transformations.quaternion_about_axis', (['(1.5)', '(0, 0, 1)'], {}), '(1.5, (0, 0, 1))\n', (2897, 2913), False, 'import tf\n'), ((3499, 3554), 'tf.transformations.quaternion_from_euler', 'tf.transformations.quaternion_from_euler', (['(0.0)', '(0.0)', '(0.0)'], {}), '(0.0, 0.0, 0.0)\n', (3539, 3554), False, 'import tf\n'), ((4890, 4949), 'tf.transformations.quaternion_about_axis', 'tf.transformations.quaternion_about_axis', (['angleT3', 'laNormal'], {}), '(angleT3, laNormal)\n', (4930, 4949), False, 'import tf\n')] |
import numpy as np
class Plotter(object):
def __init__(self, backend=None):
if backend is not None:
self.use(backend)
# ----------
def points(self, points, **kwargs):
points = np.asarray(points, dtype='d')
points = np.atleast_2d(points)
shape = points.shape[:-1]
dim = points.shape[-1]
assert 1 <= dim <= 3
if dim < 3:
C = np.zeros(shape+(3,), dtype='d')
C[...,0:dim] = points
else:
C = points
x, y, z = C.T
#
options = dict(kwargs)
if 'mode' not in options:
options['mode'] = 'sphere'
#
pts = self.backend.points3d(x, y, z, **options)
return pts
def quiver(self, points, vectors, **kwargs):
#
points = np.asarray(points, dtype='d')
points = np.atleast_2d(points)
dim = points.shape[-1]
assert 1 <= dim <= 3
if dim < 3:
shape = points.shape[:-1]
C = np.zeros(shape+(3,), dtype='d')
C[...,0:dim] = points
else:
C = points
x, y, z = C.T
#
vectors = np.asarray(vectors, dtype='d')
vectors = np.atleast_2d(vectors)
dim = vectors.shape[-1]
assert 1 <= dim <= 3
if dim < 3:
shape = vectors.shape[:-1]
A = np.zeros(shape+(3,), dtype='d')
A[...,0:dim] = vectors
else:
A = vectors
u, v, w = A.T
#
options = dict(kwargs)
if 'mode' not in options:
options['mode'] = 'arrow'
#
vcs = self.backend.quiver3d(x, y, z, u, v, w, **options)
return vcs
# ----------
def cpoint(self, nurbs, **kwargs):
C = nurbs.points
x, y, z = C.T
#
options = dict(kwargs)
if 'color' not in options:
options['color'] = (1,0,0)
if 'mode' not in options:
options['mode'] = 'sphere'
#
pts = self.backend.points3d(x, y, z, **options)
if 'scale_factor' not in options:
try:
pts.glyph.glyph.scale_factor *= 0.125
except AttributeError:
pass
return pts
def cwire(self, nurbs, **kwargs):
C = nurbs.points
x, y, z = C.T
#
options = dict(kwargs)
if 'color' not in options:
options['color'] = (0,0,1)
mode = options.pop('mode', 'line')
assert mode in ('line', 'tube')
if mode == 'line':
options['representation'] = 'wireframe'
options['tube_radius'] = None
elif mode == 'tube':
options['representation'] = 'surface'
#
lines = [(x, y, z)]
grd = self.backend.line3d(lines=lines, **options)
return grd
def kpoint(self, nurbs, **kwargs):
uvw = nurbs.breaks()
C = nurbs(*uvw)
x, y, z = C.T
#
options = dict(kwargs)
if 'color' not in options:
options['color'] = (0,1,0)
if 'mode' not in options:
options['mode'] = 'cube'
#
pts = self.backend.points3d(x, y, z, **options)
if 'scale_factor' not in options:
try:
pts.glyph.glyph.scale_factor *= 0.1
except AttributeError:
pass
return pts
def kwire(self, nurbs, axes=None, **kwargs):
if axes is None:
axes = (0,1,2)[:nurbs.dim]
elif not isinstance(axes, (list, tuple)):
axes = (axes,)
#
uvw = nurbs.breaks()
lines = []
for axis in axes:
u = uvw[axis]
resolution = self.backend._resolution[1]
a = np.linspace(u[0], u[-1], resolution)
abc = list(uvw)
abc[axis] = a
C = nurbs(*abc)
C = np.rollaxis(C, axis, -1)
C = C.reshape((-1, a.size, 3))
lines.extend(C)
#
options = dict(kwargs)
if 'color' not in options:
options['color'] = (0,1,0)
mode = options.pop('mode', 'line')
assert mode in ('line', 'tube')
if mode == 'line':
options['representation'] = 'wireframe'
options['tube_radius'] = None
elif mode == 'tube':
options['representation'] = 'surface'
#
lines = [tuple(C.T) for C in lines]
wire = self.backend.line3d(lines=lines, **options)
return wire
def ksurf(self, nurbs, axes=None, **kwargs):
if axes is None:
axes = (0,1,2)[:nurbs.dim]
elif not isinstance(axes, (list, tuple)):
axes = (axes,)
#
surfs = []
for axis in axes:
for u in nurbs.breaks(axis):
nrb = nurbs.extract(axis, u)
resolution = self.backend._resolution[2]
uvw = [np.linspace(U[p], U[-p-1], resolution)
for (p, U) in zip(nrb.degree, nrb.knots)]
C = nrb(*uvw)
surfs.append(C)
#
options = dict(kwargs)
if 'color' not in options:
options['color'] = (0,1,0)
options['representation'] = 'surface'
#
surfs = [tuple(C.T) for C in surfs]
surf = self.backend.surf3d(surfs=surfs, **options)
return surf
# ----------
def curve(self, nurbs, **kwargs):
if nurbs.dim < 1: return None
if nurbs.dim > 1:
boundaries = []
for axis in range(nurbs.dim):
for side in range(2):
nrb = nurbs.boundary(axis, side)
boundaries.append(nrb)
return [self.curve(nrb, **kwargs)
for nrb in boundaries]
#
resolution = self.backend._resolution[1]
p, U = nurbs.degree[0], nurbs.knots[0]
u = np.linspace(U[p], U[-p-1], resolution)
C = nurbs(u)
x, y, z = C.T
#
options = dict(kwargs)
color = options.pop('color', (1,1,0))
if color is not None:
options['color'] = color
mode = options.pop('mode', 'tube')
assert mode in ('line', 'tube')
if mode == 'line':
options['representation'] = 'wireframe'
options['tube_radius'] = None
elif mode == 'tube':
options['representation'] = 'surface'
#
lines = [(x, y, z)]
crv = self.backend.line3d(lines=lines, **options)
return crv
def surface(self, nurbs, **kwargs):
if nurbs.dim < 2: return None
if nurbs.dim > 2:
surfaces = []
for axis in range(nurbs.dim):
for side in range(2):
nrb = nurbs.boundary(axis, side)
surfaces.append(nrb)
else:
surfaces = [nurbs]
#
surfs = []
for nrb in surfaces:
resolution = self.backend._resolution[2]
uvw = [np.linspace(U[p], U[-p-1], resolution)
for (p, U) in zip(nrb.degree, nrb.knots)]
C = nrb(*uvw)
surfs.append(C)
#
options = dict(kwargs)
color = options.pop('color', (1,1,0))
if color is not None:
options['color'] = color
options['representation'] = 'surface'
#
surfs = [tuple(C.T) for C in surfs]
srf = self.backend.surf3d(surfs=surfs, **options)
return srf
def volume(self, nurbs, **kwargs):
if nurbs.dim < 3: return None
#
options = dict(kwargs)
color = options.pop('color', (1,1,0))
if color is not None:
options['color'] = color
#
return self.surface(nurbs, **kwargs)
# ----------
def cplot(self, nurbs, **kwargs):
opts = dict(kwargs)
pts = self.cpoint(nurbs, **opts)
opts = dict(kwargs)
grd = self.cwire(nurbs, **opts)
return (pts, grd)
def kplot(self, nurbs, **kwargs):
opts = dict(kwargs)
pts = self.kpoint(nurbs, **opts)
opts = dict(kwargs)
grd = self.kwire(nurbs, **opts)
return (pts, grd)
def plot(self, nurbs, **kwargs):
if nurbs.dim == 1:
return self.curve(nurbs, **kwargs)
if nurbs.dim == 2:
return self.surface(nurbs, **kwargs)
if nurbs.dim == 3:
return self.volume(nurbs, **kwargs)
return None
# ----------
def __getattr__(self, attr):
return getattr(self.backend, attr)
_modules = {
'mpl': None,
'myv': None,
'nul': None,
}
_alias = {
'matplotlib' : 'mpl',
'mayavi' : 'myv',
'null' : 'nul',
'none' : 'nul',
}
_backend = None
def use(self, backend):
self.backend = backend
def set_backend(self, backend):
name = self._alias.get(backend, backend)
try:
module = self._modules[name]
except KeyError:
raise ValueError("unknown backend '%s'" % backend)
if module is None:
modname = 'igakit.plot_' + name
module = __import__(modname, fromlist=[None])
self._modules[name] = module
self._backend = module
def get_backend(self):
if self._backend is None:
try:
self.use('mayavi')
except ImportError:
self.use('matplotlib')
return self._backend
backend = property(get_backend, set_backend)
# ----------
plt = plotter = Plotter()
use = plotter.use
plot = plotter.plot
cplot = plotter.cplot
kplot = plotter.kplot
| [
"numpy.asarray",
"numpy.zeros",
"numpy.linspace",
"numpy.rollaxis",
"numpy.atleast_2d"
] | [((220, 249), 'numpy.asarray', 'np.asarray', (['points'], {'dtype': '"""d"""'}), "(points, dtype='d')\n", (230, 249), True, 'import numpy as np\n'), ((267, 288), 'numpy.atleast_2d', 'np.atleast_2d', (['points'], {}), '(points)\n', (280, 288), True, 'import numpy as np\n'), ((820, 849), 'numpy.asarray', 'np.asarray', (['points'], {'dtype': '"""d"""'}), "(points, dtype='d')\n", (830, 849), True, 'import numpy as np\n'), ((867, 888), 'numpy.atleast_2d', 'np.atleast_2d', (['points'], {}), '(points)\n', (880, 888), True, 'import numpy as np\n'), ((1176, 1206), 'numpy.asarray', 'np.asarray', (['vectors'], {'dtype': '"""d"""'}), "(vectors, dtype='d')\n", (1186, 1206), True, 'import numpy as np\n'), ((1225, 1247), 'numpy.atleast_2d', 'np.atleast_2d', (['vectors'], {}), '(vectors)\n', (1238, 1247), True, 'import numpy as np\n'), ((5953, 5993), 'numpy.linspace', 'np.linspace', (['U[p]', 'U[-p - 1]', 'resolution'], {}), '(U[p], U[-p - 1], resolution)\n', (5964, 5993), True, 'import numpy as np\n'), ((419, 452), 'numpy.zeros', 'np.zeros', (['(shape + (3,))'], {'dtype': '"""d"""'}), "(shape + (3,), dtype='d')\n", (427, 452), True, 'import numpy as np\n'), ((1023, 1056), 'numpy.zeros', 'np.zeros', (['(shape + (3,))'], {'dtype': '"""d"""'}), "(shape + (3,), dtype='d')\n", (1031, 1056), True, 'import numpy as np\n'), ((1384, 1417), 'numpy.zeros', 'np.zeros', (['(shape + (3,))'], {'dtype': '"""d"""'}), "(shape + (3,), dtype='d')\n", (1392, 1417), True, 'import numpy as np\n'), ((3787, 3823), 'numpy.linspace', 'np.linspace', (['u[0]', 'u[-1]', 'resolution'], {}), '(u[0], u[-1], resolution)\n', (3798, 3823), True, 'import numpy as np\n'), ((3922, 3946), 'numpy.rollaxis', 'np.rollaxis', (['C', 'axis', '(-1)'], {}), '(C, axis, -1)\n', (3933, 3946), True, 'import numpy as np\n'), ((7067, 7107), 'numpy.linspace', 'np.linspace', (['U[p]', 'U[-p - 1]', 'resolution'], {}), '(U[p], U[-p - 1], resolution)\n', (7078, 7107), True, 'import numpy as np\n'), ((4961, 5001), 'numpy.linspace', 'np.linspace', (['U[p]', 'U[-p - 1]', 'resolution'], {}), '(U[p], U[-p - 1], resolution)\n', (4972, 5001), True, 'import numpy as np\n')] |
from collections.abc import Iterable
import re
import numpy as np
import quantum_keymap.config.default as default_conf
from quantum_keymap.util import list_concat
from quantum_keymap.util import load_config
class KeymapModel(object):
def __init__(self, config=None) -> None:
self.config = load_config(default_conf)
if config:
self.config.update(config)
self.key_to_code = {}
for i, key in enumerate(self.config["KEY_LIST"]):
if isinstance(key, Iterable):
for item in key:
self.key_to_code[item] = i
else:
self.key_to_code[key] = i
self.code_to_key = {v: k for k, v in self.key_to_code.items()}
self.N = np.array(self.config["HAND"]).size
self.H_obj = np.zeros((self.N * self.N, self.N * self.N))
self.H_1hot, self.const_1hot = self._create_H_1hot()
self.H_key_unique, self.const_key_unique = self._create_H_key_unique()
chars = "".join([key.lower() for key in self.key_to_code.keys()])
self.p = re.compile(f"[{chars}]+")
def update_weight(self, text):
H_sub = np.zeros((self.N, self.N, self.N, self.N))
text = text.lower()
result = self.p.findall(text)
position_cost = list_concat(self.config["POSITION_COST"])
hand = list_concat(self.config["HAND"])
finger = list_concat(self.config["FINGER"])
consecutive_hand_cost = self.config["CONSECUTIVE_HAND_COST"]
consecutive_finger_cost = self.config["CONSECUTIVE_FINGER_COST"]
consecutive_key_cost = self.config["CONSECUTIVE_KEY_COST"]
for string in result:
# position cost
for char_raw in string:
char = self.key_to_code[char_raw]
for key in range(self.N): # key position
H_sub[key, char][key, char] += position_cost[key]
# hand/finger cost
for pos in range(len(string) - 1):
char1 = self.key_to_code[string[pos]]
char2 = self.key_to_code[string[pos + 1]]
for key1 in range(self.N):
for key2 in range(self.N):
# add finger cost
if hand[key1] == hand[key2]:
H_sub[key1, char1][key2, char2] += consecutive_hand_cost
# add finger cost
if finger[key1] == finger[key2]:
if char1 == char2:
H_sub[key1, char1][
key2, char2
] += consecutive_key_cost
else:
H_sub[key1, char1][
key2, char2
] += consecutive_finger_cost
H_sub = H_sub.reshape((self.N * self.N, self.N * self.N))
self.H_obj += H_sub
def _create_H_1hot(self):
H = np.zeros((self.N, self.N, self.N, self.N))
for key in range(self.N):
for char1 in range(self.N):
H[key, char1][key, char1] = -1
for char2 in range(char1 + 1, self.N):
H[key, char1][key, char2] = 2
return H.reshape((self.N * self.N, self.N * self.N)), self.N
def _create_H_key_unique(self):
H = np.zeros((self.N, self.N, self.N, self.N))
for char in range(self.N):
for key1 in range(self.N):
H[key1, char][key1, char] = -1
for key2 in range(key1 + 1, self.N):
H[key1, char][key2, char] = 2
return H.reshape((self.N * self.N, self.N * self.N)), self.N
def H(self, w_1hot, w_key_unique):
H = self.H_obj + w_1hot * self.H_1hot + w_key_unique * self.H_key_unique
const = w_1hot * self.const_1hot + w_key_unique * self.const_key_unique
return H, const
def energy(self, state, w_1hot, w_key_unique):
H, const = self.H(w_1hot, w_key_unique)
return state @ H @ state + const
def cost(self, state):
return state @ self.H_obj @ state
def _energy_1hot(self, state, weight):
return weight * (state @ self.H_1hot @ state + self.const_1hot)
def _energy_key_unique(self, state, weight):
return weight * (state @ self.H_key_unique @ state + self.const_key_unique)
def validate(self, state):
return bool(self._energy_1hot(state, 1) == 0) and bool(
self._energy_key_unique(state, 1) == 0
)
def qubo(self, w_1hot, w_key_unique):
H, _ = self.H(w_1hot, w_key_unique)
H = np.triu(H, k=1) + np.triu(H.T)
qubo = {}
size = len(H)
for i in range(size):
for j in range(i, size):
if H[i][j] != 0:
qubo[(i, j)] = H[i][j]
return qubo
def keys_from_state(self, state):
state_2d = state.reshape((-1, self.N))
code = np.dot(state_2d, np.array(range(self.N))).astype(int)
keys = np.array([self.code_to_key[c] for c in code])
return keys.reshape(np.array(self.config["HAND"]).shape)
| [
"quantum_keymap.util.list_concat",
"numpy.triu",
"numpy.zeros",
"numpy.array",
"quantum_keymap.util.load_config",
"re.compile"
] | [((305, 330), 'quantum_keymap.util.load_config', 'load_config', (['default_conf'], {}), '(default_conf)\n', (316, 330), False, 'from quantum_keymap.util import load_config\n'), ((806, 850), 'numpy.zeros', 'np.zeros', (['(self.N * self.N, self.N * self.N)'], {}), '((self.N * self.N, self.N * self.N))\n', (814, 850), True, 'import numpy as np\n'), ((1083, 1108), 're.compile', 're.compile', (['f"""[{chars}]+"""'], {}), "(f'[{chars}]+')\n", (1093, 1108), False, 'import re\n'), ((1161, 1203), 'numpy.zeros', 'np.zeros', (['(self.N, self.N, self.N, self.N)'], {}), '((self.N, self.N, self.N, self.N))\n', (1169, 1203), True, 'import numpy as np\n'), ((1295, 1336), 'quantum_keymap.util.list_concat', 'list_concat', (["self.config['POSITION_COST']"], {}), "(self.config['POSITION_COST'])\n", (1306, 1336), False, 'from quantum_keymap.util import list_concat\n'), ((1352, 1384), 'quantum_keymap.util.list_concat', 'list_concat', (["self.config['HAND']"], {}), "(self.config['HAND'])\n", (1363, 1384), False, 'from quantum_keymap.util import list_concat\n'), ((1402, 1436), 'quantum_keymap.util.list_concat', 'list_concat', (["self.config['FINGER']"], {}), "(self.config['FINGER'])\n", (1413, 1436), False, 'from quantum_keymap.util import list_concat\n'), ((3059, 3101), 'numpy.zeros', 'np.zeros', (['(self.N, self.N, self.N, self.N)'], {}), '((self.N, self.N, self.N, self.N))\n', (3067, 3101), True, 'import numpy as np\n'), ((3447, 3489), 'numpy.zeros', 'np.zeros', (['(self.N, self.N, self.N, self.N)'], {}), '((self.N, self.N, self.N, self.N))\n', (3455, 3489), True, 'import numpy as np\n'), ((5131, 5176), 'numpy.array', 'np.array', (['[self.code_to_key[c] for c in code]'], {}), '([self.code_to_key[c] for c in code])\n', (5139, 5176), True, 'import numpy as np\n'), ((750, 779), 'numpy.array', 'np.array', (["self.config['HAND']"], {}), "(self.config['HAND'])\n", (758, 779), True, 'import numpy as np\n'), ((4726, 4741), 'numpy.triu', 'np.triu', (['H'], {'k': '(1)'}), '(H, k=1)\n', (4733, 4741), True, 'import numpy as np\n'), ((4744, 4756), 'numpy.triu', 'np.triu', (['H.T'], {}), '(H.T)\n', (4751, 4756), True, 'import numpy as np\n'), ((5205, 5234), 'numpy.array', 'np.array', (["self.config['HAND']"], {}), "(self.config['HAND'])\n", (5213, 5234), True, 'import numpy as np\n')] |
from typing import List
import numpy as np
import pytest
from numpy import float64
from modes.mode_solver_full import mode_solver_full
def group_index(
wavelength: float = 1.55,
wavelength_step: float = 0.01,
overwrite: bool = False,
n_modes: int = 1,
**wg_kwargs,
) -> List[float64]:
r"""
Solve for the group index, :math:`n_g`, of a structure at a particular
wavelength.
Args:
structure (Structure): The target structure to solve
for modes.
wavelength_step (float): The step to take below and
above the nominal wavelength. This is used for
approximating the gradient of :math:`n_\mathrm{eff}`
at the nominal wavelength. Default is 0.01.
n_modes: number of modes
x_step: 0.02
y_step: 0.02
thickness: 0.22
width: 0.5
slab_thickness: 0
sub_thickness: 0.5
sub_width: 2.0
clad_thickness: [0.5]
n_sub: sio2
n_wg: si
n_clads: [sio2]
wavelength: 1.55
angle: 90.0
Returns:
List of the group indices found for each mode.
"""
n_gs = []
msc = mode_solver_full(
wavelength=wavelength, overwrite=overwrite, n_modes=n_modes, **wg_kwargs
)
msf = mode_solver_full(
wavelength=wavelength + wavelength_step,
overwrite=overwrite,
n_modes=n_modes,
**wg_kwargs,
)
msb = mode_solver_full(
wavelength=wavelength - wavelength_step,
overwrite=overwrite,
n_modes=n_modes,
**wg_kwargs,
)
n_ctrs = np.real(msc.n_effs)
n_bcks = np.real(msb.n_effs)
n_frws = np.real(msf.n_effs)
filename = (
msc._modes_directory / f"_ng_{msc.name}_{wavelength}_{wavelength_step}.dat"
)
n_gs = []
for n_ctr, n_bck, n_frw in zip(n_ctrs, n_bcks, n_frws):
n_gs.append(n_ctr - wavelength * (n_frw - n_bck) / (2 * wavelength_step))
with open(filename, "w") as fs:
fs.write("# Mode idx, Group index\n")
for idx, n_g in enumerate(n_gs):
fs.write("%i,%.3f\n" % (idx, np.round(n_g.real, 3)))
return n_gs
@pytest.mark.parametrize("overwrite", [True, False])
def test_sweep(overwrite: bool) -> None:
ng = group_index(n_modes=2)
print(ng)
assert np.isclose(
ng, np.array([4.123932892727449, 3.9318152179618666]), atol=0.1
).all()
if __name__ == "__main__":
# import matplotlib.pylab as plt
print(group_index(g=2))
# test_sweep(overwrite=False)
# plt.show()
| [
"numpy.array",
"modes.mode_solver_full.mode_solver_full",
"numpy.real",
"pytest.mark.parametrize",
"numpy.round"
] | [((2173, 2224), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""overwrite"""', '[True, False]'], {}), "('overwrite', [True, False])\n", (2196, 2224), False, 'import pytest\n'), ((1178, 1273), 'modes.mode_solver_full.mode_solver_full', 'mode_solver_full', ([], {'wavelength': 'wavelength', 'overwrite': 'overwrite', 'n_modes': 'n_modes'}), '(wavelength=wavelength, overwrite=overwrite, n_modes=\n n_modes, **wg_kwargs)\n', (1194, 1273), False, 'from modes.mode_solver_full import mode_solver_full\n'), ((1293, 1406), 'modes.mode_solver_full.mode_solver_full', 'mode_solver_full', ([], {'wavelength': '(wavelength + wavelength_step)', 'overwrite': 'overwrite', 'n_modes': 'n_modes'}), '(wavelength=wavelength + wavelength_step, overwrite=\n overwrite, n_modes=n_modes, **wg_kwargs)\n', (1309, 1406), False, 'from modes.mode_solver_full import mode_solver_full\n'), ((1451, 1564), 'modes.mode_solver_full.mode_solver_full', 'mode_solver_full', ([], {'wavelength': '(wavelength - wavelength_step)', 'overwrite': 'overwrite', 'n_modes': 'n_modes'}), '(wavelength=wavelength - wavelength_step, overwrite=\n overwrite, n_modes=n_modes, **wg_kwargs)\n', (1467, 1564), False, 'from modes.mode_solver_full import mode_solver_full\n'), ((1613, 1632), 'numpy.real', 'np.real', (['msc.n_effs'], {}), '(msc.n_effs)\n', (1620, 1632), True, 'import numpy as np\n'), ((1646, 1665), 'numpy.real', 'np.real', (['msb.n_effs'], {}), '(msb.n_effs)\n', (1653, 1665), True, 'import numpy as np\n'), ((1679, 1698), 'numpy.real', 'np.real', (['msf.n_effs'], {}), '(msf.n_effs)\n', (1686, 1698), True, 'import numpy as np\n'), ((2347, 2396), 'numpy.array', 'np.array', (['[4.123932892727449, 3.9318152179618666]'], {}), '([4.123932892727449, 3.9318152179618666])\n', (2355, 2396), True, 'import numpy as np\n'), ((2129, 2150), 'numpy.round', 'np.round', (['n_g.real', '(3)'], {}), '(n_g.real, 3)\n', (2137, 2150), True, 'import numpy as np\n')] |
"""Utility functions for working with numpy arrays."""
from __future__ import annotations
from typing import TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from numpy.typing import ArrayLike
def message_bit(
message: str,
delimiter: str,
bits: int,
) -> tuple[ArrayLike, int]:
"""Return a message turned into bits in an array.
:param message: Main message to encode
:param delimiter: Message end identifier
:param bits: Amount of bits per pixel
"""
byte_msg: bytes = (message + delimiter).encode("utf-8")
msg_arr = np.frombuffer(byte_msg, dtype=np.uint8)
lsbits_arr = np.unpackbits(msg_arr)
lsbits_arr.resize( # resize fills with zeros if shape doesn't match fully
(np.ceil(lsbits_arr.size / bits).astype(int), bits),
refcheck=False,
)
msg_len, _ = lsbits_arr.shape
return lsbits_arr, msg_len
def edit_column(
base: ArrayLike,
new: ArrayLike,
column_num: int,
start_from_end: bool = False,
) -> ArrayLike:
"""Replace elements in base array with a new array.
:param base: Main array to edit
:param new: New data to put into the main array
:param column_num: Number of columns of the main base array to edit
:param start_from_end: If indexing of cols should start from the last column
rather than from first one, defaults to False
"""
if start_from_end:
base[:, -column_num:] = new # type: ignore
else:
base[:, :column_num] = new # type: ignore
return base
def pack_and_concatenate(
unpacked: ArrayLike,
base: ArrayLike,
final_shape: tuple,
) -> ArrayLike:
"""Pack bits of the first array and then concatenate it with the base one, lastly reshape it.
:param unpacked: The packed array with the encoded data
:param base: Main array with the pixel data
:param final_shape: Shape of the original image array
"""
packed_arr = np.packbits(unpacked)
final: ArrayLike = np.concatenate((packed_arr, base)).reshape(final_shape)
return final
| [
"numpy.ceil",
"numpy.frombuffer",
"numpy.packbits",
"numpy.unpackbits",
"numpy.concatenate"
] | [((571, 610), 'numpy.frombuffer', 'np.frombuffer', (['byte_msg'], {'dtype': 'np.uint8'}), '(byte_msg, dtype=np.uint8)\n', (584, 610), True, 'import numpy as np\n'), ((628, 650), 'numpy.unpackbits', 'np.unpackbits', (['msg_arr'], {}), '(msg_arr)\n', (641, 650), True, 'import numpy as np\n'), ((1936, 1957), 'numpy.packbits', 'np.packbits', (['unpacked'], {}), '(unpacked)\n', (1947, 1957), True, 'import numpy as np\n'), ((1981, 2015), 'numpy.concatenate', 'np.concatenate', (['(packed_arr, base)'], {}), '((packed_arr, base))\n', (1995, 2015), True, 'import numpy as np\n'), ((739, 770), 'numpy.ceil', 'np.ceil', (['(lsbits_arr.size / bits)'], {}), '(lsbits_arr.size / bits)\n', (746, 770), True, 'import numpy as np\n')] |
from sys import platform
import numpy as np
import matplotlib.pyplot as plt
import random
'''
Authors:
<NAME>
<NAME>
<NAME>
Date: 5th February 2021
'''
class Agent_holonomic:
'''
Omnidirectional robot class
'''
def __init__(self, radius_bot):
self.radius_bot = radius_bot
self.type = "Omnidirectional robot"
# Omnidirectional wheels angle.
self.alpha_2 = np.deg2rad(120)
self.alpha_3 = np.deg2rad(240)
self.wheel_radius = 1
self.num_wheels = 3
def getFuturePositionAfter_dt(self, velocity, position_old, direction_vector, dt):
'''
Get position after applying velocity and time step
'''
v_x = velocity * direction_vector[0]
v_y = velocity * direction_vector[1]
x_new = v_x * dt + position_old[0]
y_new = v_y * dt + position_old[1]
theta_new = position_old[2]
omega = 0
# Get outputs
wheel_velocity_vector = self.getWheelVelocity(v_x, v_y, omega, position_old).squeeze()
position_new = [x_new, y_new, theta_new]
platform_velocity = np.array([v_x, v_y])
wheel_position = self.getWheelPosition(position_new)
return position_new, wheel_velocity_vector, platform_velocity, wheel_position
def getWheelVelocity(self, v_x, v_y, omega, position_old):
'''
Function to obtain wheel velocity of omnidirectional drive
'''
theta_old = np.deg2rad(position_old[2])
velocity_vector= np.array([[v_x], [v_y], [omega]])
# Local to global transform
local_T_global = np.array([[np.cos(theta_old), 0, 0],
[ 0, np.cos(theta_old), 0],
[ 0, 0, 1]])
# Local to wheel transform
wheel_T_local = np.array([[ -np.sin(theta_old), np.cos(theta_old), self.radius_bot],
[ -np.sin(theta_old + self.alpha_2), np.cos(theta_old + self.alpha_2), self.radius_bot],
[ -np.sin(theta_old + self.alpha_3), np.cos(theta_old + self.alpha_3), self.radius_bot]])
# Apply global to wheel transform to get velocity for each of the wheels in Omnidirectional robot
wheel_velocity_vector = wheel_T_local @ local_T_global @ velocity_vector
return wheel_velocity_vector
def getWheelPosition(self, position):
'''
Get wheel position given position of platform
'''
theta = np.deg2rad(position[2])
x_1 = position[0] + self.radius_bot * np.cos(theta)
y_1 = position[1] + self.radius_bot * np.sin(theta)
x_2 = position[0] + self.radius_bot * np.cos(theta + self.alpha_2)
y_2 = position[1] + self.radius_bot * np.sin(theta + self.alpha_2)
x_3 = position[0] + self.radius_bot * np.cos(theta + self.alpha_3)
y_3 = position[1] + self.radius_bot * np.sin(theta + self.alpha_3)
theta_wheel = np.rad2deg(theta)
return {"wheel_1": (x_1, y_1, theta_wheel), "wheel_2": (x_2, y_2, theta_wheel + np.rad2deg(self.alpha_2)), "wheel_3": (x_3, y_3, theta_wheel + np.rad2deg(self.alpha_3))}
class Agent_non_holonomic:
'''
Differential drive robot class
'''
def __init__(self, radius_bot, step_size = 1):
# Bot radius
self.radius_bot = radius_bot
self.step_size = step_size
self.type = "Differential drive"
# Maximum rotation angle
self.max_rotation = 15
self.num_wheels = 2
def getFuturePositionAfter_dt(self, velocity, position_old, direction_vector, dt):
'''
Get position after applying velocity and time step
'''
theta_pts = np.arccos(1 * direction_vector[0] / np.sqrt(direction_vector[0]**2 + direction_vector[1]**2))
degree_error = (np.rad2deg(theta_pts) - position_old[2])
# Constrain the robot's orientation
if np.abs(degree_error) < self.max_rotation:
omega = degree_error
else:
omega = np.sign(degree_error) * self.max_rotation
v_x = velocity * np.cos(np.deg2rad(omega*dt + position_old[2]))
v_y = velocity * np.sin(np.deg2rad(omega*dt + position_old[2]))
x_new = np.round(v_x * dt + position_old[0]).astype(int)
y_new = np.round(v_y * dt + position_old[1]).astype(int)
theta_new = position_old[2] + omega*dt
# Outputs
wheel_velocity_vector = self.getWheelVelocity(velocity, v_x, v_y, omega, position_old).squeeze()
platform_velocity = np.array([v_x, v_y])
position_new = [x_new, y_new, theta_new]
wheel_position = self.getWheelPosition(position_new)
return position_new, wheel_velocity_vector, platform_velocity, wheel_position
def getWheelVelocity(self, velocity, v_x, v_y, omega, position_old):
'''
Get position after applying velocity and time step
'''
if omega != 0:
# If angular velocity is not zero, ICC (Instantaneous Center of Curvature) is not infinity
R = velocity / omega
v_l = omega * (R + self.radius_bot)
v_r = omega * (R - self.radius_bot)
else:
v_l = velocity
v_r = velocity
wheel_velocity_vector = np.array([[v_l], [v_r]])
return wheel_velocity_vector
def getWheelPosition(self, position):
'''
Get wheel position given position of platform
'''
theta = np.deg2rad(position[2])
x_1 = position[0] + (self.radius_bot * np.sin(theta))
y_1 = position[1] + (self.radius_bot * np.cos(theta))
# print(position[0], x_1)
x_2 = position[0] + (self.radius_bot * np.sin(theta + np.pi))
y_2 = position[1] + (self.radius_bot * np.cos(theta + np.pi))
theta_wheel = np.rad2deg(theta)
return {"wheel_1": (x_1, y_1, theta_wheel), "wheel_2": (x_2, y_2, theta_wheel)}
if __name__=="__main__":
agent = Agent_non_holonomic()
position_new = agent.getFuturePositionAfter_dt(2, (0, 0, -35), (1, 0), 1)
print(position_new)
| [
"numpy.abs",
"numpy.deg2rad",
"numpy.rad2deg",
"numpy.sin",
"numpy.array",
"numpy.cos",
"numpy.sign",
"numpy.round",
"numpy.sqrt"
] | [((376, 391), 'numpy.deg2rad', 'np.deg2rad', (['(120)'], {}), '(120)\n', (386, 391), True, 'import numpy as np\n'), ((409, 424), 'numpy.deg2rad', 'np.deg2rad', (['(240)'], {}), '(240)\n', (419, 424), True, 'import numpy as np\n'), ((992, 1012), 'numpy.array', 'np.array', (['[v_x, v_y]'], {}), '([v_x, v_y])\n', (1000, 1012), True, 'import numpy as np\n'), ((1300, 1327), 'numpy.deg2rad', 'np.deg2rad', (['position_old[2]'], {}), '(position_old[2])\n', (1310, 1327), True, 'import numpy as np\n'), ((1347, 1380), 'numpy.array', 'np.array', (['[[v_x], [v_y], [omega]]'], {}), '([[v_x], [v_y], [omega]])\n', (1355, 1380), True, 'import numpy as np\n'), ((2260, 2283), 'numpy.deg2rad', 'np.deg2rad', (['position[2]'], {}), '(position[2])\n', (2270, 2283), True, 'import numpy as np\n'), ((2693, 2710), 'numpy.rad2deg', 'np.rad2deg', (['theta'], {}), '(theta)\n', (2703, 2710), True, 'import numpy as np\n'), ((4120, 4140), 'numpy.array', 'np.array', (['[v_x, v_y]'], {}), '([v_x, v_y])\n', (4128, 4140), True, 'import numpy as np\n'), ((4745, 4769), 'numpy.array', 'np.array', (['[[v_l], [v_r]]'], {}), '([[v_l], [v_r]])\n', (4753, 4769), True, 'import numpy as np\n'), ((4912, 4935), 'numpy.deg2rad', 'np.deg2rad', (['position[2]'], {}), '(position[2])\n', (4922, 4935), True, 'import numpy as np\n'), ((5226, 5243), 'numpy.rad2deg', 'np.rad2deg', (['theta'], {}), '(theta)\n', (5236, 5243), True, 'import numpy as np\n'), ((3470, 3491), 'numpy.rad2deg', 'np.rad2deg', (['theta_pts'], {}), '(theta_pts)\n', (3480, 3491), True, 'import numpy as np\n'), ((3557, 3577), 'numpy.abs', 'np.abs', (['degree_error'], {}), '(degree_error)\n', (3563, 3577), True, 'import numpy as np\n'), ((2324, 2337), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2330, 2337), True, 'import numpy as np\n'), ((2378, 2391), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2384, 2391), True, 'import numpy as np\n'), ((2435, 2463), 'numpy.cos', 'np.cos', (['(theta + self.alpha_2)'], {}), '(theta + self.alpha_2)\n', (2441, 2463), True, 'import numpy as np\n'), ((2504, 2532), 'numpy.sin', 'np.sin', (['(theta + self.alpha_2)'], {}), '(theta + self.alpha_2)\n', (2510, 2532), True, 'import numpy as np\n'), ((2576, 2604), 'numpy.cos', 'np.cos', (['(theta + self.alpha_3)'], {}), '(theta + self.alpha_3)\n', (2582, 2604), True, 'import numpy as np\n'), ((2645, 2673), 'numpy.sin', 'np.sin', (['(theta + self.alpha_3)'], {}), '(theta + self.alpha_3)\n', (2651, 2673), True, 'import numpy as np\n'), ((3394, 3454), 'numpy.sqrt', 'np.sqrt', (['(direction_vector[0] ** 2 + direction_vector[1] ** 2)'], {}), '(direction_vector[0] ** 2 + direction_vector[1] ** 2)\n', (3401, 3454), True, 'import numpy as np\n'), ((3642, 3663), 'numpy.sign', 'np.sign', (['degree_error'], {}), '(degree_error)\n', (3649, 3663), True, 'import numpy as np\n'), ((3712, 3752), 'numpy.deg2rad', 'np.deg2rad', (['(omega * dt + position_old[2])'], {}), '(omega * dt + position_old[2])\n', (3722, 3752), True, 'import numpy as np\n'), ((3778, 3818), 'numpy.deg2rad', 'np.deg2rad', (['(omega * dt + position_old[2])'], {}), '(omega * dt + position_old[2])\n', (3788, 3818), True, 'import numpy as np\n'), ((3831, 3867), 'numpy.round', 'np.round', (['(v_x * dt + position_old[0])'], {}), '(v_x * dt + position_old[0])\n', (3839, 3867), True, 'import numpy as np\n'), ((3890, 3926), 'numpy.round', 'np.round', (['(v_y * dt + position_old[1])'], {}), '(v_y * dt + position_old[1])\n', (3898, 3926), True, 'import numpy as np\n'), ((4977, 4990), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4983, 4990), True, 'import numpy as np\n'), ((5033, 5046), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5039, 5046), True, 'import numpy as np\n'), ((5120, 5141), 'numpy.sin', 'np.sin', (['(theta + np.pi)'], {}), '(theta + np.pi)\n', (5126, 5141), True, 'import numpy as np\n'), ((5184, 5205), 'numpy.cos', 'np.cos', (['(theta + np.pi)'], {}), '(theta + np.pi)\n', (5190, 5205), True, 'import numpy as np\n'), ((1444, 1461), 'numpy.cos', 'np.cos', (['theta_old'], {}), '(theta_old)\n', (1450, 1461), True, 'import numpy as np\n'), ((1519, 1536), 'numpy.cos', 'np.cos', (['theta_old'], {}), '(theta_old)\n', (1525, 1536), True, 'import numpy as np\n'), ((1701, 1718), 'numpy.cos', 'np.cos', (['theta_old'], {}), '(theta_old)\n', (1707, 1718), True, 'import numpy as np\n'), ((1785, 1817), 'numpy.cos', 'np.cos', (['(theta_old + self.alpha_2)'], {}), '(theta_old + self.alpha_2)\n', (1791, 1817), True, 'import numpy as np\n'), ((1884, 1916), 'numpy.cos', 'np.cos', (['(theta_old + self.alpha_3)'], {}), '(theta_old + self.alpha_3)\n', (1890, 1916), True, 'import numpy as np\n'), ((2793, 2817), 'numpy.rad2deg', 'np.rad2deg', (['self.alpha_2'], {}), '(self.alpha_2)\n', (2803, 2817), True, 'import numpy as np\n'), ((2856, 2880), 'numpy.rad2deg', 'np.rad2deg', (['self.alpha_3'], {}), '(self.alpha_3)\n', (2866, 2880), True, 'import numpy as np\n'), ((1676, 1693), 'numpy.sin', 'np.sin', (['theta_old'], {}), '(theta_old)\n', (1682, 1693), True, 'import numpy as np\n'), ((1751, 1783), 'numpy.sin', 'np.sin', (['(theta_old + self.alpha_2)'], {}), '(theta_old + self.alpha_2)\n', (1757, 1783), True, 'import numpy as np\n'), ((1850, 1882), 'numpy.sin', 'np.sin', (['(theta_old + self.alpha_3)'], {}), '(theta_old + self.alpha_3)\n', (1856, 1882), True, 'import numpy as np\n')] |
"""
Tests for ivp_4_ode.py
"""
from nma.ivp_4_ode import runge_kutta_o4
from numpy.testing import assert_almost_equal
def test_runge_kutta_o4():
"""
This test is Example 3 in chapter 5 of the textbook.
"""
# setup
f = lambda t, y: y - t ** 2 + 1
a = 0
b = 2
N = 10
y_initial = 0.5
# exercise
result, _ = runge_kutta_o4(f, a, b, N, y_initial)
actual = 5.305363000692653 # @ t = 2
# verify
assert_almost_equal(result, actual, 6)
# teardown - no needed
| [
"numpy.testing.assert_almost_equal",
"nma.ivp_4_ode.runge_kutta_o4"
] | [((352, 389), 'nma.ivp_4_ode.runge_kutta_o4', 'runge_kutta_o4', (['f', 'a', 'b', 'N', 'y_initial'], {}), '(f, a, b, N, y_initial)\n', (366, 389), False, 'from nma.ivp_4_ode import runge_kutta_o4\n'), ((450, 488), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['result', 'actual', '(6)'], {}), '(result, actual, 6)\n', (469, 488), False, 'from numpy.testing import assert_almost_equal\n')] |
import emcee
import triangle
import scipy as sp
import numpy as np
from from_fits import create_uvdata_from_fits_file
from components import CGComponent
from model import Model, CCModel
from stats import LnPost
if __name__ == '__main__':
uv_fname = '1633+382.l22.2010_05_21.uvf'
map_fname = '1633+382.l22.2010_05_21.icn.fits'
uvdata = create_uvdata_from_fits_file(uv_fname)
# Create several components
cg1 = CGComponent(1.0, 0.0, 0.0, 1.)
cg1.add_prior(flux=(sp.stats.uniform.logpdf, [0., 3.], dict(),),
bmaj=(sp.stats.uniform.logpdf, [0, 10.], dict(),))
# Create model
mdl1 = Model(stokes='I')
# Add components to model
mdl1.add_component(cg1)
# Create posterior for data & model
lnpost = LnPost(uvdata, mdl1)
ndim = mdl1.size
nwalkers = 50
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnpost)
p_std1 = [0.1, 1., 1., 0.1]
p0 = emcee.utils.sample_ball(mdl1.p, p_std1, size=nwalkers)
pos, prob, state = sampler.run_mcmc(p0, 100)
sampler.reset()
sampler.run_mcmc(pos, 300)
p_map = np.max(sampler.flatchain[::10, :], axis=0)
# Overplot data and model
mdl = Model(stokes='I')
# cg = CGComponent(1.441, 0.76, 0.65, 3.725)
cg = CGComponent(*p_map)
mdl.add_component(cg)
uvdata.uvplot(stokes='I')
mdl.uvplot(uv=uvdata.uv)
fig = triangle.corner(sampler.flatchain[::10, :4],
labels=["$flux$", "$y$", "$x$", "$maj$"])
# Now fitting two components
cg1 = CGComponent(*p_map)
cg2 = CGComponent(0.5, 0.0, 0.0, 2.0)
cg1.add_prior(flux=(sp.stats.uniform.logpdf, [0., 3.], dict(),),
bmaj=(sp.stats.uniform.logpdf, [0, 10.], dict(),))
cg2.add_prior(flux=(sp.stats.uniform.logpdf, [0., 1.5], dict(),),
bmaj=(sp.stats.uniform.logpdf, [0, 20.], dict(),))
# Create model
mdl2 = Model(stokes='I')
# Add components to model
mdl2.add_component(cg1)
mdl2.add_component(cg2)
# Create posterior for data & model
lnpost = LnPost(uvdata, mdl2)
ndim = mdl2.size
nwalkers = 50
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnpost)
p_std1 = [0.1, 0.3, 0.3, 0.1]
p_std2 = [0.1, 1., 1., 0.1]
p0 = emcee.utils.sample_ball(mdl2.p, p_std1 + p_std2, size=nwalkers)
pos, prob, state = sampler.run_mcmc(p0, 100)
sampler.reset()
sampler.run_mcmc(pos, 300)
# Overplot data and model
p_map = np.max(sampler.flatchain[::10, :], axis=0)
mdl = Model(stokes='I')
cg1 = CGComponent(1.4, -0.4, 0.1, 1.25)
cg2 = CGComponent(0.33, 3.3, -0.3, 1.55)
mdl.add_components(cg1, cg2)
uvdata.uvplot(stokes='I')
mdl.uvplot(uv=uvdata.uv)
fig = triangle.corner(sampler.flatchain[::10, :4],
labels=["$flux$", "$y$", "$x$", "$maj$"])
mdl_image = mdl.make_image(map_fname)
mdl_image.plot(min_rel_level=1.)
# Plot cc-map
ccmodel = CCModel()
ccmodel.add_cc_from_fits(map_fname)
ccimage = ccmodel.make_image(map_fname)
ccimage.plot(min_rel_level=0.25)
# Add third component
cg1 = CGComponent(1.4, -0.4, 0.1, 1.25)
cg2 = CGComponent(0.33, 3.3, -0.3, 1.55)
cg3 = CGComponent(0.2, -10.0, 0.0, 2.0)
cg1.add_prior(flux=(sp.stats.uniform.logpdf, [0., 3.], dict(),),
bmaj=(sp.stats.uniform.logpdf, [0, 3.], dict(),))
cg2.add_prior(flux=(sp.stats.uniform.logpdf, [0., 0.5], dict(),),
bmaj=(sp.stats.uniform.logpdf, [0, 5.], dict(),))
cg3.add_prior(flux=(sp.stats.uniform.logpdf, [0., 0.5], dict(),),
bmaj=(sp.stats.uniform.logpdf, [0, 5.], dict(),))
# Create model
mdl3 = Model(stokes='I')
# Add components to model
mdl3.add_components(cg1, cg2, cg3)
# Create posterior for data & model
lnpost = LnPost(uvdata, mdl3)
ndim = mdl3.size
nwalkers = 50
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnpost)
p_std1 = [0.1, 0.3, 0.3, 0.1]
p_std2 = [0.1, 1., 1., 0.1]
p_std3 = [0.1, 1., 1., 0.1]
p0 = emcee.utils.sample_ball(mdl3.p, p_std1 + p_std2 + p_std3,
size=nwalkers)
pos, prob, state = sampler.run_mcmc(p0, 100)
sampler.reset()
sampler.run_mcmc(pos, 300)
# Check results
fig = triangle.corner(sampler.flatchain[::10, :4],
labels=["$flux$", "$y$", "$x$", "$maj$"])
mdl = Model(stokes='I')
cg1 = CGComponent(1.23, -0.51, 0.21, 0.88)
cg2 = CGComponent(0.42, 2.9, -0.6, 1.55)
cg3 = CGComponent(0.2, -8.0, -2.8, 1.98)
mdl.add_components(cg1, cg2, cg3)
uvdata.uvplot(stokes='I')
mdl.uvplot(uv=uvdata.uv)
mdl_image = mdl.make_image(map_fname)
mdl_image.plot(min_rel_level=1.)
# Plot cc-map
ccmodel = CCModel()
ccmodel.add_cc_from_fits(map_fname)
ccimage = ccmodel.make_image(map_fname)
ccimage.plot(min_rel_level=0.025)
# Add forth component
cg1 = CGComponent(1.23, -0.51, 0.21, 0.88)
cg2 = CGComponent(0.42, 2.9, -0.6, 1.55)
cg3 = CGComponent(0.2, -8.0, -2.8, 1.98)
cg4 = CGComponent(0.2, -15.0, 0.0, 2.0)
cg1.add_prior(flux=(sp.stats.uniform.logpdf, [0., 3.], dict(),),
bmaj=(sp.stats.uniform.logpdf, [0, 3.], dict(),))
cg2.add_prior(flux=(sp.stats.uniform.logpdf, [0., 0.5], dict(),),
bmaj=(sp.stats.uniform.logpdf, [0, 5.], dict(),))
cg3.add_prior(flux=(sp.stats.uniform.logpdf, [0., 0.5], dict(),),
bmaj=(sp.stats.uniform.logpdf, [0, 5.], dict(),))
cg4.add_prior(flux=(sp.stats.uniform.logpdf, [0., 0.5], dict(),),
bmaj=(sp.stats.uniform.logpdf, [0, 9.], dict(),))
# Create model
mdl4 = Model(stokes='I')
# Add components to model
mdl4.add_components(cg1, cg2, cg3, cg4)
# Create posterior for data & model
lnpost = LnPost(uvdata, mdl4)
ndim = mdl4.size
nwalkers = 50
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnpost)
p_std1 = [0.1, 0.3, 0.3, 0.1]
p_std2 = [0.1, 1., 1., 0.1]
p_std3 = [0.1, 1., 1., 0.1]
p_std4 = [0.1, 1., 1., 0.1]
p0 = emcee.utils.sample_ball(mdl4.p, p_std1 + p_std2 + p_std3 + p_std4,
size=nwalkers)
pos, prob, state = sampler.run_mcmc(p0, 100)
sampler.reset()
sampler.run_mcmc(pos, 300)
# Check results
fig = triangle.corner(sampler.flatchain[::10, :4],
labels=["$flux$", "$y$", "$x$", "$maj$"])
mdl = Model(stokes='I')
cg1 = CGComponent(1.16, -0.61, 0.26, 0.72)
cg2 = CGComponent(0.48, 2.9, -0.6, 1.25)
cg3 = CGComponent(0.03, -7.4, -1.2, 1.95)
cg4 = CGComponent(0.005, -7.5, -0, 2.15)
mdl.add_components(cg1, cg2, cg3, cg4)
uvdata.uvplot(stokes='I')
mdl.uvplot(uv=uvdata.uv)
mdl_image = mdl.make_image(map_fname)
mdl_image.plot(min_rel_level=0.001)
# Plot cc-map
ccmodel = CCModel()
ccmodel.add_cc_from_fits(map_fname)
ccimage = ccmodel.make_image(map_fname)
ccimage.plot(min_rel_level=0.025)
# Add firth component
cg1 = CGComponent(1.0, -0.51, 0.21, 0.88)
cg2 = CGComponent(0.75, 2.9, -0.6, 1.55)
cg3 = CGComponent(0.2, -8.0, -2.8, 1.98)
cg4 = CGComponent(0.05, -20.0, 0.0, 2.0)
cg5 = CGComponent(0.01, -75.0, 100.0, 25.0)
cg1.add_prior(flux=(sp.stats.uniform.logpdf, [0., 3.], dict(),),
bmaj=(sp.stats.uniform.logpdf, [0, 3.], dict(),))
cg2.add_prior(flux=(sp.stats.uniform.logpdf, [0., 3.], dict(),),
bmaj=(sp.stats.uniform.logpdf, [0, 5.], dict(),))
cg3.add_prior(flux=(sp.stats.uniform.logpdf, [0., 0.5], dict(),),
bmaj=(sp.stats.uniform.logpdf, [0, 5.], dict(),))
cg4.add_prior(flux=(sp.stats.uniform.logpdf, [0., 0.2], dict(),),
bmaj=(sp.stats.uniform.logpdf, [0, 15.], dict(),))
cg5.add_prior(flux=(sp.stats.uniform.logpdf, [0., 0.2], dict(),),
bmaj=(sp.stats.uniform.logpdf, [0, 90.], dict(),))
# Create model
mdl5 = Model(stokes='I')
# Add components to model
mdl5.add_components(cg1, cg2, cg3, cg4, cg5)
mdl_image = mdl5.make_image(map_fname)
mdl_image.plot(min_rel_level=0.001)
# Plot cc-map
ccmodel = CCModel()
ccmodel.add_cc_from_fits(map_fname)
ccimage = ccmodel.make_image(map_fname)
ccimage.plot(min_rel_level=0.025)
# Create posterior for data & model
lnpost = LnPost(uvdata, mdl5)
ndim = mdl5.size
nwalkers = 50
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnpost)
p_std1 = [0.1, 0.3, 0.3, 0.1]
p_std2 = [0.05, 1., 1., 0.1]
p_std3 = [0.01, 1., 1., 0.1]
p_std4 = [0.01, 1., 1., 1]
p_std5 = [0.01, 1., 1., 1]
p0 = emcee.utils.sample_ball(mdl5.p, p_std1 + p_std2 + p_std3 + p_std4 +
p_std5, size=nwalkers)
pos, prob, state = sampler.run_mcmc(p0, 100)
sampler.reset()
sampler.run_mcmc(pos, 300)
# Check results
fig = triangle.corner(sampler.flatchain[::10, :4],
labels=["$flux$", "$y$", "$x$", "$maj$"])
mdl = Model(stokes='I')
cg1 = CGComponent(1.16, -0.61, 0.26, 0.72)
cg2 = CGComponent(0.48, 2.9, -0.6, 1.25)
cg3 = CGComponent(0.03, -7.4, -1.2, 1.95)
cg4 = CGComponent(0.005, -7.5, -0, 2.15)
cg5 = CGComponent(0.005, -7.5, -0, 2.15)
mdl.add_components(cg1, cg2, cg3, cg4, cg5)
uvdata.uvplot(stokes='I')
mdl.uvplot(uv=uvdata.uv)
mdl_image = mdl.make_image(map_fname)
mdl_image.plot(min_rel_level=0.001)
# Plot cc-map
ccmodel = CCModel()
ccmodel.add_cc_from_fits(map_fname)
ccimage = ccmodel.make_image(map_fname)
ccimage.plot(min_rel_level=0.025)
| [
"model.CCModel",
"emcee.utils.sample_ball",
"emcee.EnsembleSampler",
"triangle.corner",
"model.Model",
"components.CGComponent",
"numpy.max",
"stats.LnPost",
"from_fits.create_uvdata_from_fits_file"
] | [((349, 387), 'from_fits.create_uvdata_from_fits_file', 'create_uvdata_from_fits_file', (['uv_fname'], {}), '(uv_fname)\n', (377, 387), False, 'from from_fits import create_uvdata_from_fits_file\n'), ((430, 461), 'components.CGComponent', 'CGComponent', (['(1.0)', '(0.0)', '(0.0)', '(1.0)'], {}), '(1.0, 0.0, 0.0, 1.0)\n', (441, 461), False, 'from components import CGComponent\n'), ((629, 646), 'model.Model', 'Model', ([], {'stokes': '"""I"""'}), "(stokes='I')\n", (634, 646), False, 'from model import Model, CCModel\n'), ((758, 778), 'stats.LnPost', 'LnPost', (['uvdata', 'mdl1'], {}), '(uvdata, mdl1)\n', (764, 778), False, 'from stats import LnPost\n'), ((832, 877), 'emcee.EnsembleSampler', 'emcee.EnsembleSampler', (['nwalkers', 'ndim', 'lnpost'], {}), '(nwalkers, ndim, lnpost)\n', (853, 877), False, 'import emcee\n'), ((919, 973), 'emcee.utils.sample_ball', 'emcee.utils.sample_ball', (['mdl1.p', 'p_std1'], {'size': 'nwalkers'}), '(mdl1.p, p_std1, size=nwalkers)\n', (942, 973), False, 'import emcee\n'), ((1086, 1128), 'numpy.max', 'np.max', (['sampler.flatchain[::10, :]'], {'axis': '(0)'}), '(sampler.flatchain[::10, :], axis=0)\n', (1092, 1128), True, 'import numpy as np\n'), ((1170, 1187), 'model.Model', 'Model', ([], {'stokes': '"""I"""'}), "(stokes='I')\n", (1175, 1187), False, 'from model import Model, CCModel\n'), ((1246, 1265), 'components.CGComponent', 'CGComponent', (['*p_map'], {}), '(*p_map)\n', (1257, 1265), False, 'from components import CGComponent\n'), ((1362, 1452), 'triangle.corner', 'triangle.corner', (['sampler.flatchain[::10, :4]'], {'labels': "['$flux$', '$y$', '$x$', '$maj$']"}), "(sampler.flatchain[::10, :4], labels=['$flux$', '$y$', '$x$',\n '$maj$'])\n", (1377, 1452), False, 'import triangle\n'), ((1519, 1538), 'components.CGComponent', 'CGComponent', (['*p_map'], {}), '(*p_map)\n', (1530, 1538), False, 'from components import CGComponent\n'), ((1549, 1580), 'components.CGComponent', 'CGComponent', (['(0.5)', '(0.0)', '(0.0)', '(2.0)'], {}), '(0.5, 0.0, 0.0, 2.0)\n', (1560, 1580), False, 'from components import CGComponent\n'), ((1888, 1905), 'model.Model', 'Model', ([], {'stokes': '"""I"""'}), "(stokes='I')\n", (1893, 1905), False, 'from model import Model, CCModel\n'), ((2045, 2065), 'stats.LnPost', 'LnPost', (['uvdata', 'mdl2'], {}), '(uvdata, mdl2)\n', (2051, 2065), False, 'from stats import LnPost\n'), ((2119, 2164), 'emcee.EnsembleSampler', 'emcee.EnsembleSampler', (['nwalkers', 'ndim', 'lnpost'], {}), '(nwalkers, ndim, lnpost)\n', (2140, 2164), False, 'import emcee\n'), ((2240, 2303), 'emcee.utils.sample_ball', 'emcee.utils.sample_ball', (['mdl2.p', '(p_std1 + p_std2)'], {'size': 'nwalkers'}), '(mdl2.p, p_std1 + p_std2, size=nwalkers)\n', (2263, 2303), False, 'import emcee\n'), ((2447, 2489), 'numpy.max', 'np.max', (['sampler.flatchain[::10, :]'], {'axis': '(0)'}), '(sampler.flatchain[::10, :], axis=0)\n', (2453, 2489), True, 'import numpy as np\n'), ((2500, 2517), 'model.Model', 'Model', ([], {'stokes': '"""I"""'}), "(stokes='I')\n", (2505, 2517), False, 'from model import Model, CCModel\n'), ((2528, 2561), 'components.CGComponent', 'CGComponent', (['(1.4)', '(-0.4)', '(0.1)', '(1.25)'], {}), '(1.4, -0.4, 0.1, 1.25)\n', (2539, 2561), False, 'from components import CGComponent\n'), ((2572, 2606), 'components.CGComponent', 'CGComponent', (['(0.33)', '(3.3)', '(-0.3)', '(1.55)'], {}), '(0.33, 3.3, -0.3, 1.55)\n', (2583, 2606), False, 'from components import CGComponent\n'), ((2709, 2799), 'triangle.corner', 'triangle.corner', (['sampler.flatchain[::10, :4]'], {'labels': "['$flux$', '$y$', '$x$', '$maj$']"}), "(sampler.flatchain[::10, :4], labels=['$flux$', '$y$', '$x$',\n '$maj$'])\n", (2724, 2799), False, 'import triangle\n'), ((2934, 2943), 'model.CCModel', 'CCModel', ([], {}), '()\n', (2941, 2943), False, 'from model import Model, CCModel\n'), ((3102, 3135), 'components.CGComponent', 'CGComponent', (['(1.4)', '(-0.4)', '(0.1)', '(1.25)'], {}), '(1.4, -0.4, 0.1, 1.25)\n', (3113, 3135), False, 'from components import CGComponent\n'), ((3146, 3180), 'components.CGComponent', 'CGComponent', (['(0.33)', '(3.3)', '(-0.3)', '(1.55)'], {}), '(0.33, 3.3, -0.3, 1.55)\n', (3157, 3180), False, 'from components import CGComponent\n'), ((3191, 3224), 'components.CGComponent', 'CGComponent', (['(0.2)', '(-10.0)', '(0.0)', '(2.0)'], {}), '(0.2, -10.0, 0.0, 2.0)\n', (3202, 3224), False, 'from components import CGComponent\n'), ((3668, 3685), 'model.Model', 'Model', ([], {'stokes': '"""I"""'}), "(stokes='I')\n", (3673, 3685), False, 'from model import Model, CCModel\n'), ((3808, 3828), 'stats.LnPost', 'LnPost', (['uvdata', 'mdl3'], {}), '(uvdata, mdl3)\n', (3814, 3828), False, 'from stats import LnPost\n'), ((3882, 3927), 'emcee.EnsembleSampler', 'emcee.EnsembleSampler', (['nwalkers', 'ndim', 'lnpost'], {}), '(nwalkers, ndim, lnpost)\n', (3903, 3927), False, 'import emcee\n'), ((4035, 4107), 'emcee.utils.sample_ball', 'emcee.utils.sample_ball', (['mdl3.p', '(p_std1 + p_std2 + p_std3)'], {'size': 'nwalkers'}), '(mdl3.p, p_std1 + p_std2 + p_std3, size=nwalkers)\n', (4058, 4107), False, 'import emcee\n'), ((4272, 4362), 'triangle.corner', 'triangle.corner', (['sampler.flatchain[::10, :4]'], {'labels': "['$flux$', '$y$', '$x$', '$maj$']"}), "(sampler.flatchain[::10, :4], labels=['$flux$', '$y$', '$x$',\n '$maj$'])\n", (4287, 4362), False, 'import triangle\n'), ((4395, 4412), 'model.Model', 'Model', ([], {'stokes': '"""I"""'}), "(stokes='I')\n", (4400, 4412), False, 'from model import Model, CCModel\n'), ((4423, 4459), 'components.CGComponent', 'CGComponent', (['(1.23)', '(-0.51)', '(0.21)', '(0.88)'], {}), '(1.23, -0.51, 0.21, 0.88)\n', (4434, 4459), False, 'from components import CGComponent\n'), ((4470, 4504), 'components.CGComponent', 'CGComponent', (['(0.42)', '(2.9)', '(-0.6)', '(1.55)'], {}), '(0.42, 2.9, -0.6, 1.55)\n', (4481, 4504), False, 'from components import CGComponent\n'), ((4515, 4549), 'components.CGComponent', 'CGComponent', (['(0.2)', '(-8.0)', '(-2.8)', '(1.98)'], {}), '(0.2, -8.0, -2.8, 1.98)\n', (4526, 4549), False, 'from components import CGComponent\n'), ((4759, 4768), 'model.CCModel', 'CCModel', ([], {}), '()\n', (4766, 4768), False, 'from model import Model, CCModel\n'), ((4928, 4964), 'components.CGComponent', 'CGComponent', (['(1.23)', '(-0.51)', '(0.21)', '(0.88)'], {}), '(1.23, -0.51, 0.21, 0.88)\n', (4939, 4964), False, 'from components import CGComponent\n'), ((4975, 5009), 'components.CGComponent', 'CGComponent', (['(0.42)', '(2.9)', '(-0.6)', '(1.55)'], {}), '(0.42, 2.9, -0.6, 1.55)\n', (4986, 5009), False, 'from components import CGComponent\n'), ((5020, 5054), 'components.CGComponent', 'CGComponent', (['(0.2)', '(-8.0)', '(-2.8)', '(1.98)'], {}), '(0.2, -8.0, -2.8, 1.98)\n', (5031, 5054), False, 'from components import CGComponent\n'), ((5065, 5098), 'components.CGComponent', 'CGComponent', (['(0.2)', '(-15.0)', '(0.0)', '(2.0)'], {}), '(0.2, -15.0, 0.0, 2.0)\n', (5076, 5098), False, 'from components import CGComponent\n'), ((5680, 5697), 'model.Model', 'Model', ([], {'stokes': '"""I"""'}), "(stokes='I')\n", (5685, 5697), False, 'from model import Model, CCModel\n'), ((5825, 5845), 'stats.LnPost', 'LnPost', (['uvdata', 'mdl4'], {}), '(uvdata, mdl4)\n', (5831, 5845), False, 'from stats import LnPost\n'), ((5899, 5944), 'emcee.EnsembleSampler', 'emcee.EnsembleSampler', (['nwalkers', 'ndim', 'lnpost'], {}), '(nwalkers, ndim, lnpost)\n', (5920, 5944), False, 'import emcee\n'), ((6084, 6170), 'emcee.utils.sample_ball', 'emcee.utils.sample_ball', (['mdl4.p', '(p_std1 + p_std2 + p_std3 + p_std4)'], {'size': 'nwalkers'}), '(mdl4.p, p_std1 + p_std2 + p_std3 + p_std4, size=\n nwalkers)\n', (6107, 6170), False, 'import emcee\n'), ((6330, 6420), 'triangle.corner', 'triangle.corner', (['sampler.flatchain[::10, :4]'], {'labels': "['$flux$', '$y$', '$x$', '$maj$']"}), "(sampler.flatchain[::10, :4], labels=['$flux$', '$y$', '$x$',\n '$maj$'])\n", (6345, 6420), False, 'import triangle\n'), ((6453, 6470), 'model.Model', 'Model', ([], {'stokes': '"""I"""'}), "(stokes='I')\n", (6458, 6470), False, 'from model import Model, CCModel\n'), ((6481, 6517), 'components.CGComponent', 'CGComponent', (['(1.16)', '(-0.61)', '(0.26)', '(0.72)'], {}), '(1.16, -0.61, 0.26, 0.72)\n', (6492, 6517), False, 'from components import CGComponent\n'), ((6528, 6562), 'components.CGComponent', 'CGComponent', (['(0.48)', '(2.9)', '(-0.6)', '(1.25)'], {}), '(0.48, 2.9, -0.6, 1.25)\n', (6539, 6562), False, 'from components import CGComponent\n'), ((6573, 6608), 'components.CGComponent', 'CGComponent', (['(0.03)', '(-7.4)', '(-1.2)', '(1.95)'], {}), '(0.03, -7.4, -1.2, 1.95)\n', (6584, 6608), False, 'from components import CGComponent\n'), ((6619, 6653), 'components.CGComponent', 'CGComponent', (['(0.005)', '(-7.5)', '(-0)', '(2.15)'], {}), '(0.005, -7.5, -0, 2.15)\n', (6630, 6653), False, 'from components import CGComponent\n'), ((6871, 6880), 'model.CCModel', 'CCModel', ([], {}), '()\n', (6878, 6880), False, 'from model import Model, CCModel\n'), ((7040, 7075), 'components.CGComponent', 'CGComponent', (['(1.0)', '(-0.51)', '(0.21)', '(0.88)'], {}), '(1.0, -0.51, 0.21, 0.88)\n', (7051, 7075), False, 'from components import CGComponent\n'), ((7086, 7120), 'components.CGComponent', 'CGComponent', (['(0.75)', '(2.9)', '(-0.6)', '(1.55)'], {}), '(0.75, 2.9, -0.6, 1.55)\n', (7097, 7120), False, 'from components import CGComponent\n'), ((7131, 7165), 'components.CGComponent', 'CGComponent', (['(0.2)', '(-8.0)', '(-2.8)', '(1.98)'], {}), '(0.2, -8.0, -2.8, 1.98)\n', (7142, 7165), False, 'from components import CGComponent\n'), ((7176, 7210), 'components.CGComponent', 'CGComponent', (['(0.05)', '(-20.0)', '(0.0)', '(2.0)'], {}), '(0.05, -20.0, 0.0, 2.0)\n', (7187, 7210), False, 'from components import CGComponent\n'), ((7221, 7258), 'components.CGComponent', 'CGComponent', (['(0.01)', '(-75.0)', '(100.0)', '(25.0)'], {}), '(0.01, -75.0, 100.0, 25.0)\n', (7232, 7258), False, 'from components import CGComponent\n'), ((7979, 7996), 'model.Model', 'Model', ([], {'stokes': '"""I"""'}), "(stokes='I')\n", (7984, 7996), False, 'from model import Model, CCModel\n'), ((8191, 8200), 'model.CCModel', 'CCModel', ([], {}), '()\n', (8198, 8200), False, 'from model import Model, CCModel\n'), ((8376, 8396), 'stats.LnPost', 'LnPost', (['uvdata', 'mdl5'], {}), '(uvdata, mdl5)\n', (8382, 8396), False, 'from stats import LnPost\n'), ((8450, 8495), 'emcee.EnsembleSampler', 'emcee.EnsembleSampler', (['nwalkers', 'ndim', 'lnpost'], {}), '(nwalkers, ndim, lnpost)\n', (8471, 8495), False, 'import emcee\n'), ((8667, 8761), 'emcee.utils.sample_ball', 'emcee.utils.sample_ball', (['mdl5.p', '(p_std1 + p_std2 + p_std3 + p_std4 + p_std5)'], {'size': 'nwalkers'}), '(mdl5.p, p_std1 + p_std2 + p_std3 + p_std4 + p_std5,\n size=nwalkers)\n', (8690, 8761), False, 'import emcee\n'), ((8922, 9012), 'triangle.corner', 'triangle.corner', (['sampler.flatchain[::10, :4]'], {'labels': "['$flux$', '$y$', '$x$', '$maj$']"}), "(sampler.flatchain[::10, :4], labels=['$flux$', '$y$', '$x$',\n '$maj$'])\n", (8937, 9012), False, 'import triangle\n'), ((9045, 9062), 'model.Model', 'Model', ([], {'stokes': '"""I"""'}), "(stokes='I')\n", (9050, 9062), False, 'from model import Model, CCModel\n'), ((9073, 9109), 'components.CGComponent', 'CGComponent', (['(1.16)', '(-0.61)', '(0.26)', '(0.72)'], {}), '(1.16, -0.61, 0.26, 0.72)\n', (9084, 9109), False, 'from components import CGComponent\n'), ((9120, 9154), 'components.CGComponent', 'CGComponent', (['(0.48)', '(2.9)', '(-0.6)', '(1.25)'], {}), '(0.48, 2.9, -0.6, 1.25)\n', (9131, 9154), False, 'from components import CGComponent\n'), ((9165, 9200), 'components.CGComponent', 'CGComponent', (['(0.03)', '(-7.4)', '(-1.2)', '(1.95)'], {}), '(0.03, -7.4, -1.2, 1.95)\n', (9176, 9200), False, 'from components import CGComponent\n'), ((9211, 9245), 'components.CGComponent', 'CGComponent', (['(0.005)', '(-7.5)', '(-0)', '(2.15)'], {}), '(0.005, -7.5, -0, 2.15)\n', (9222, 9245), False, 'from components import CGComponent\n'), ((9256, 9290), 'components.CGComponent', 'CGComponent', (['(0.005)', '(-7.5)', '(-0)', '(2.15)'], {}), '(0.005, -7.5, -0, 2.15)\n', (9267, 9290), False, 'from components import CGComponent\n'), ((9513, 9522), 'model.CCModel', 'CCModel', ([], {}), '()\n', (9520, 9522), False, 'from model import Model, CCModel\n')] |
# convex unimodal optimization function
from numpy import arange
from matplotlib import pyplot
# objective function
def objective(x):
return x[0]**2.0
# define range for input
r_min, r_max = -5.0, 5.0
# sample input range uniformly at 0.1 increments
inputs = arange(r_min, r_max, 0.1)
# compute targets
results = [objective([x]) for x in inputs]
# create a line plot of input vs result
pyplot.plot(inputs, results)
# define optimal input value
x_optima = 0.0
# draw a vertical line at the optimal input
pyplot.axvline(x=x_optima, ls='--', color='red')
# show the plot
pyplot.show()
| [
"matplotlib.pyplot.axvline",
"numpy.arange",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
] | [((262, 287), 'numpy.arange', 'arange', (['r_min', 'r_max', '(0.1)'], {}), '(r_min, r_max, 0.1)\n', (268, 287), False, 'from numpy import arange\n'), ((389, 417), 'matplotlib.pyplot.plot', 'pyplot.plot', (['inputs', 'results'], {}), '(inputs, results)\n', (400, 417), False, 'from matplotlib import pyplot\n'), ((506, 554), 'matplotlib.pyplot.axvline', 'pyplot.axvline', ([], {'x': 'x_optima', 'ls': '"""--"""', 'color': '"""red"""'}), "(x=x_optima, ls='--', color='red')\n", (520, 554), False, 'from matplotlib import pyplot\n'), ((571, 584), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (582, 584), False, 'from matplotlib import pyplot\n')] |
# based on https://github.com/adamtornhill/maat-scripts/blob/master/miner/complexity_calculations.py
import re
import numpy
from tools.encoding import detect_encoding
leading_tabs_expr = re.compile(r'^(\t+)')
leading_spaces_expr = re.compile(r'^( +)')
empty_line_expr = re.compile(r'^\s*$')
def n_log_tabs(line):
pattern = re.compile(r' +')
wo_spaces = re.sub(pattern, '', line)
m = leading_tabs_expr.search(wo_spaces)
if m:
tabs = m.group()
return len(tabs)
return 0
def n_log_spaces(line):
pattern = re.compile(r'\t+')
wo_tabs = re.sub(pattern, '', line)
m = leading_spaces_expr.search(wo_tabs)
if m:
spaces = m.group()
return len(spaces)
return 0
def complexity_of(line):
return n_log_tabs(line) + (n_log_spaces(line) / 4) # hardcoded indentation
def calculate_complexity_in(source):
encoding = detect_encoding(source)
with open(source, "r", newline='', encoding=encoding, errors='ignore') as file:
source = file.read()
lines_complexity = [complexity_of(line) for line in source.split("\n")]
return numpy.mean(lines_complexity) | [
"numpy.mean",
"re.sub",
"tools.encoding.detect_encoding",
"re.compile"
] | [((191, 212), 're.compile', 're.compile', (['"""^(\\\\t+)"""'], {}), "('^(\\\\t+)')\n", (201, 212), False, 'import re\n'), ((235, 254), 're.compile', 're.compile', (['"""^( +)"""'], {}), "('^( +)')\n", (245, 254), False, 'import re\n'), ((274, 294), 're.compile', 're.compile', (['"""^\\\\s*$"""'], {}), "('^\\\\s*$')\n", (284, 294), False, 'import re\n'), ((333, 349), 're.compile', 're.compile', (['""" +"""'], {}), "(' +')\n", (343, 349), False, 'import re\n'), ((367, 392), 're.sub', 're.sub', (['pattern', '""""""', 'line'], {}), "(pattern, '', line)\n", (373, 392), False, 'import re\n'), ((550, 568), 're.compile', 're.compile', (['"""\\\\t+"""'], {}), "('\\\\t+')\n", (560, 568), False, 'import re\n'), ((583, 608), 're.sub', 're.sub', (['pattern', '""""""', 'line'], {}), "(pattern, '', line)\n", (589, 608), False, 'import re\n'), ((890, 913), 'tools.encoding.detect_encoding', 'detect_encoding', (['source'], {}), '(source)\n', (905, 913), False, 'from tools.encoding import detect_encoding\n'), ((1122, 1150), 'numpy.mean', 'numpy.mean', (['lines_complexity'], {}), '(lines_complexity)\n', (1132, 1150), False, 'import numpy\n')] |
import os
import numpy as np
import pandas as pd
import math
import time
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression, Ridge
import statsmodels.api as sm
def SplitBasedSelectionForm(data, k, model, list_neigh, split_point1, split_point2, nb_classes, limit) :
start_time = time.time()
n = np.size(data,0) # number of instances
p = np.size(data,1) # number of features
Subgroups = set() # S is a set of the subgroups
Subgroups.add(tuple(np.arange(n))) # first S is simply all the objects O i.e S = {0}
W = dict ()
data_neigh_O, target_neigh_O_proba = sampling_sb(data,np.arange(n),list_neigh,model)
patterns = dict()
# patterns = {attribute : a, value : v, operator : '>' or '<='}
patterns[tuple(np.arange(n))] = (None,None,None)
L_Patterns = []
L_S = []
improv = True
splits = set ()
newSubgroups = set()
newSubgroups.add(tuple(np.arange(n))) # newSubgroups = {O}
loss_subgroups = dict () # for the losses of the subgroups without spliting
loss_subgroups [tuple(np.arange(n))] = calc_loss(data_neigh_O,target_neigh_O_proba, limit)
#print('loss_all = ',loss_subgroups [tuple(np.arange(n))])
#print("%s" % (time.time() - start_time))
iteration = 0
while len(Subgroups) < k and improv :
print('trace ---', iteration)
for s in newSubgroups : # s is tuple
if len(s) > 1 and loss_subgroups[s] > 0 :
list_loss_attributes = []
for a in range(0,p) :
to_continue = False
list_loss_values = []
min_v = np.min(data[s,a]) #
max_v = np.max(data[s,a]) #
if (a < split_point1) or (a >= split_point2) : # numerical / boolean
if min_v != max_v :
steps = (pd.cut(data[s,a],2, retbins=True,include_lowest=True))[1][1:-1]
to_continue = True
else :
if min_v == 0 and max_v > 0 :
steps = np.array([0])
to_continue = True
if to_continue :
len_steps = np.size(steps)
j = 0
while j < len_steps :
value = steps[j]
# subgroup1 that satisfies the condition s [a > v]
subgrp1 = tuple(np.asarray(s)[data[s,:][:,a] > value])
# generating the new dataset of neighbors of the subgroup_1 elements
data_neigh_sb1, target_neigh_sb1_proba = sampling_sb(data,subgrp1,list_neigh,model)
# subgroup2 that satisfies the condition s [a <= v]
subgrp2 = tuple(np.asarray(s)[data[s,:][:,a] <= value])
# generating the new dataset of neighbors of the subgroup_1 elements
data_neigh_sb2, target_neigh_sb2_proba = sampling_sb(data,subgrp2,list_neigh,model)
#compute the loss and update the loss_subgroups dictionnary
loss_subgroups[subgrp1] = calc_loss(data_neigh_sb1, target_neigh_sb1_proba, limit)
loss_subgroups[subgrp2] = calc_loss(data_neigh_sb2, target_neigh_sb2_proba, limit)
loss = loss_subgroups[subgrp1] + loss_subgroups[subgrp2]
#print("loss des 2 sbgrps =",loss)
# store the losses
list_loss_values.append((loss,value))
#iterate over the j
j += 1
# select the minimum loss and value that minimize the loss for each attribute a
if list_loss_values :
loss_opt_att = min(list_loss_values)
# store the optimal loss for the attribute
list_loss_attributes.append(loss_opt_att)
else :
list_loss_attributes.append((math.inf,None))
# select the minimum loss and value that minimize the loss for the subgroup s
loss_opt_s, value_opt = min(list_loss_attributes)
attribute_opt = list_loss_attributes.index(min(list_loss_attributes))
# add the best split for the subgroup (s) to the splits set
splits.add((s,attribute_opt,value_opt,loss_opt_s))
# Choose the subgroup split that leads to the minimum loss:
best_split = splits.pop()
tmp_split = best_split # to add it after
s, a, v, loss_sb = best_split
Subgroups.remove(s)
best_loss_s = loss_set(Subgroups,loss_subgroups) + loss_sb
Subgroups.add(s)
for split in splits :
s_, a_, v_, loss_sb_ = split
Subgroups.remove(s_)
if loss_set(Subgroups,loss_subgroups) + loss_sb_ < best_loss_s :
best_loss_s = loss_set(Subgroups,loss_subgroups) + loss_sb_
best_split = split
Subgroups.add(s_)
splits.add(tmp_split)
s_best, a_best, v_best, loss_sb_min = best_split
if loss_sb_min < loss_subgroups[s_best] :
Subgroups.remove(s_best)
sb1 = tuple(np.asarray(s_best)[data[s_best,:][:,a_best] > v_best])
sb2 = tuple(np.asarray(s_best)[data[s_best,:][:,a_best] <= v_best])
Subgroups.add(sb1)
Subgroups.add(sb2)
if iteration == 0 :
del patterns[s_best]
patterns[sb1] = (a_best,'>',v_best)
patterns[sb2] = (a_best,'<=',v_best)
else :
patterns[sb1] = patterns[s_best] + (a_best,'>',v_best)
patterns[sb2] = patterns[s_best] + (a_best,'<=',v_best)
del patterns[s_best]
newSubgroups = {sb1, sb2}
splits.remove(best_split)
else :
improv = False
iteration = iteration + 1
#print('{:.2e}'.format(loss_set(Subgroups,loss_subgroups)))
#print("%s" % (time.time() - start_time))
S_copy = set ()
S_copy = Subgroups.copy()
patterns_copie = dict ()
patterns_copie = patterns.copy()
L_Patterns.append(patterns_copie)
L_S.append(S_copy)
return(L_S, L_Patterns)
def lin_models_for_sim(S,data_test,list_neigh,model,limit) :
W_ = dict()
for s in S :
data_neigh_s, target_neigh_s_proba = sampling_sb(data_test,s,list_neigh,model)
lr = Ridge(alpha = 1)
model_lr = lr.fit(data_neigh_s[:,:limit],target_neigh_s_proba)
W_[s] = model_lr
del lr
del model_lr
return W_
def sampling_sb(dataset, subgroup, list_neigh, model) :
n_neighs = list_neigh[0][0].shape[0]
subgroup = np.asarray(subgroup)
size = subgroup.size
all_data = np.zeros((size*n_neighs, dataset.shape[1]))
all_target = np.zeros((size*n_neighs, 19))
for i in range(0,subgroup.size) :
all_data[i*n_neighs : (i+1)*n_neighs,:] = list_neigh[subgroup[i]][0]
all_target[i*n_neighs : (i+1)*n_neighs,:] = list_neigh[subgroup[i]][1]
return (all_data, all_target)
def calc_loss (data,target_proba, limit) :
lr = Ridge(alpha = 1)
model_lr = lr.fit(data[:,:limit],target_proba)
target_lr = model_lr.predict(data[:,:limit])
return sum(sum(np.square(target_proba-target_lr)))
def loss_set(Subgroups, loss_subgroups):
loss = 0
if bool (Subgroups) == False : #empty
return 0
else :
for s in Subgroups :
loss = loss + loss_subgroups[s]
return loss
| [
"numpy.size",
"numpy.asarray",
"numpy.square",
"numpy.zeros",
"time.time",
"pandas.cut",
"numpy.min",
"numpy.max",
"numpy.arange",
"numpy.array",
"sklearn.linear_model.Ridge"
] | [((345, 356), 'time.time', 'time.time', ([], {}), '()\n', (354, 356), False, 'import time\n'), ((365, 381), 'numpy.size', 'np.size', (['data', '(0)'], {}), '(data, 0)\n', (372, 381), True, 'import numpy as np\n'), ((411, 427), 'numpy.size', 'np.size', (['data', '(1)'], {}), '(data, 1)\n', (418, 427), True, 'import numpy as np\n'), ((7439, 7459), 'numpy.asarray', 'np.asarray', (['subgroup'], {}), '(subgroup)\n', (7449, 7459), True, 'import numpy as np\n'), ((7500, 7545), 'numpy.zeros', 'np.zeros', (['(size * n_neighs, dataset.shape[1])'], {}), '((size * n_neighs, dataset.shape[1]))\n', (7508, 7545), True, 'import numpy as np\n'), ((7561, 7592), 'numpy.zeros', 'np.zeros', (['(size * n_neighs, 19)'], {}), '((size * n_neighs, 19))\n', (7569, 7592), True, 'import numpy as np\n'), ((7898, 7912), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': '(1)'}), '(alpha=1)\n', (7903, 7912), False, 'from sklearn.linear_model import LinearRegression, Ridge\n'), ((671, 683), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (680, 683), True, 'import numpy as np\n'), ((7147, 7161), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': '(1)'}), '(alpha=1)\n', (7152, 7161), False, 'from sklearn.linear_model import LinearRegression, Ridge\n'), ((527, 539), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (536, 539), True, 'import numpy as np\n'), ((816, 828), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (825, 828), True, 'import numpy as np\n'), ((979, 991), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (988, 991), True, 'import numpy as np\n'), ((1126, 1138), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1135, 1138), True, 'import numpy as np\n'), ((8034, 8069), 'numpy.square', 'np.square', (['(target_proba - target_lr)'], {}), '(target_proba - target_lr)\n', (8043, 8069), True, 'import numpy as np\n'), ((1709, 1727), 'numpy.min', 'np.min', (['data[s, a]'], {}), '(data[s, a])\n', (1715, 1727), True, 'import numpy as np\n'), ((1759, 1777), 'numpy.max', 'np.max', (['data[s, a]'], {}), '(data[s, a])\n', (1765, 1777), True, 'import numpy as np\n'), ((5757, 5775), 'numpy.asarray', 'np.asarray', (['s_best'], {}), '(s_best)\n', (5767, 5775), True, 'import numpy as np\n'), ((5847, 5865), 'numpy.asarray', 'np.asarray', (['s_best'], {}), '(s_best)\n', (5857, 5865), True, 'import numpy as np\n'), ((2364, 2378), 'numpy.size', 'np.size', (['steps'], {}), '(steps)\n', (2371, 2378), True, 'import numpy as np\n'), ((2201, 2214), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (2209, 2214), True, 'import numpy as np\n'), ((1951, 2007), 'pandas.cut', 'pd.cut', (['data[s, a]', '(2)'], {'retbins': '(True)', 'include_lowest': '(True)'}), '(data[s, a], 2, retbins=True, include_lowest=True)\n', (1957, 2007), True, 'import pandas as pd\n'), ((2626, 2639), 'numpy.asarray', 'np.asarray', (['s'], {}), '(s)\n', (2636, 2639), True, 'import numpy as np\n'), ((3001, 3014), 'numpy.asarray', 'np.asarray', (['s'], {}), '(s)\n', (3011, 3014), True, 'import numpy as np\n')] |
import argparse
import glob
import json
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
from fever_utils import make_sentence_id
def calculate_scores(args):
evidences = {}
with open(args.dataset_file, 'r', encoding='utf-8') as f:
for line in f:
line_json = json.loads(line.strip())
evidence_sets = []
if line_json['label'] != 'NOT ENOUGH INFO':
for annotator in line_json['evidence']:
evidence_set = [make_sentence_id(evidence[2], evidence[3]) for evidence in annotator]
evidence_sets.append(evidence_set)
evidences[line_json['id']] = evidence_sets
def aggregate(scores):
if args.num_classes == 4:
# filter out samples predicted weak and remove weak scores
scores = scores[np.argmax(scores, axis=1) != 3][:, :3]
if len(scores) == 0:
return 1
if args.strategy == 'first':
return np.argmax(scores[0])
elif args.strategy == 'sum':
return np.argmax(np.sum(np.exp(scores), axis=0))
elif args.strategy == 'nei_default':
maxes = np.argmax(scores, axis=1)
if (0 in maxes and 2 in maxes) or (0 not in maxes and 2 not in maxes):
return 1
elif 0 in maxes:
return 0
elif 2 in maxes:
return 2
return -1
elif args.strategy == 'max':
return np.argmax(np.max(np.exp(scores), axis=0))
return -1
for scores_file in sorted(glob.glob(f'{args.scores_files_prefix}*')):
labels = []
pred_labels = []
fever_scores = []
with open(args.id_file, 'r', encoding='utf-8') as f_id, open(scores_file, 'r', encoding='utf-8') as f_scores:
curr_query = None
curr_label = None # actual label for current query
curr_scores = []
curr_evidences = []
for id_line, scores_line in zip(f_id, f_scores):
query_id, sent_ids, label_str = id_line.strip().split('\t')
query_id = int(query_id)
if query_id != curr_query:
if curr_query is not None:
# aggregate to get predicted label
pred_label = aggregate(np.array(curr_scores))
pred_labels.append(pred_label)
# calculate FEVER score
fever_scores.append(int(pred_label == curr_label and (pred_label == 1 or \
any([set(ev_set).issubset(set(curr_evidences)) for ev_set in evidences[curr_query]]))))
curr_query = query_id
curr_scores.clear()
curr_evidences.clear()
# save actual label
if label_str == 'false':
curr_label = 0
elif label_str == 'weak':
curr_label = 1
elif label_str == 'true':
curr_label = 2
labels.append(curr_label)
# save predicted evidence(s) and scores
if args.num_classes == 3:
_, false_score, nei_score, true_score = scores_line.strip().split('\t')
scores = [float(false_score), float(nei_score), float(true_score)]
elif args.num_classes == 4:
_, false_score, ignore_score, true_score, nei_score = scores_line.strip().split('\t')
scores = [float(false_score), float(nei_score), float(true_score), float(ignore_score)]
curr_scores.append(scores)
curr_evidences.extend(sent_ids.strip().split(' '))
# handle last query
pred_label = aggregate(np.array(curr_scores))
pred_labels.append(pred_label)
fever_scores.append(int(pred_label == curr_label and (pred_label == 1 or \
any([set(ev_set).issubset(set(curr_evidences)) for ev_set in evidences[curr_query]]))))
print(scores_file)
print(f'Label Accuracy: {accuracy_score(labels, pred_labels)}')
print(f'Predicted Label F1 Scores: {f1_score(labels, pred_labels, average=None)}')
print(f'Predicted Label Distribution: {[pred_labels.count(i) for i in range(args.num_classes)]}')
print(f'FEVER Score: {sum(fever_scores) / len(fever_scores)}')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Calculates various metrics of label prediction output files.')
parser.add_argument('--id_file', required=True, help='Input query-doc pair ids file.')
parser.add_argument('--scores_files_prefix', required=True, help='Prefix of all T5 label prediction scores files.')
parser.add_argument('--dataset_file', help='FEVER dataset file.')
parser.add_argument('--num_classes', type=int, default=3, help='Number of label prediction classes.')
parser.add_argument('--strategy', help='Format of scores file and method of aggregation if applicable.')
args = parser.parse_args()
calculate_scores(args)
| [
"argparse.ArgumentParser",
"numpy.argmax",
"sklearn.metrics.accuracy_score",
"fever_utils.make_sentence_id",
"sklearn.metrics.f1_score",
"numpy.array",
"numpy.exp",
"glob.glob"
] | [((4584, 4688), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Calculates various metrics of label prediction output files."""'}), "(description=\n 'Calculates various metrics of label prediction output files.')\n", (4607, 4688), False, 'import argparse\n'), ((1607, 1648), 'glob.glob', 'glob.glob', (['f"""{args.scores_files_prefix}*"""'], {}), "(f'{args.scores_files_prefix}*')\n", (1616, 1648), False, 'import glob\n'), ((1012, 1032), 'numpy.argmax', 'np.argmax', (['scores[0]'], {}), '(scores[0])\n', (1021, 1032), True, 'import numpy as np\n'), ((3894, 3915), 'numpy.array', 'np.array', (['curr_scores'], {}), '(curr_scores)\n', (3902, 3915), True, 'import numpy as np\n'), ((1196, 1221), 'numpy.argmax', 'np.argmax', (['scores'], {'axis': '(1)'}), '(scores, axis=1)\n', (1205, 1221), True, 'import numpy as np\n'), ((517, 559), 'fever_utils.make_sentence_id', 'make_sentence_id', (['evidence[2]', 'evidence[3]'], {}), '(evidence[2], evidence[3])\n', (533, 559), False, 'from fever_utils import make_sentence_id\n'), ((858, 883), 'numpy.argmax', 'np.argmax', (['scores'], {'axis': '(1)'}), '(scores, axis=1)\n', (867, 883), True, 'import numpy as np\n'), ((1106, 1120), 'numpy.exp', 'np.exp', (['scores'], {}), '(scores)\n', (1112, 1120), True, 'import numpy as np\n'), ((4224, 4259), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['labels', 'pred_labels'], {}), '(labels, pred_labels)\n', (4238, 4259), False, 'from sklearn.metrics import accuracy_score, f1_score\n'), ((4311, 4354), 'sklearn.metrics.f1_score', 'f1_score', (['labels', 'pred_labels'], {'average': 'None'}), '(labels, pred_labels, average=None)\n', (4319, 4354), False, 'from sklearn.metrics import accuracy_score, f1_score\n'), ((2370, 2391), 'numpy.array', 'np.array', (['curr_scores'], {}), '(curr_scores)\n', (2378, 2391), True, 'import numpy as np\n'), ((1533, 1547), 'numpy.exp', 'np.exp', (['scores'], {}), '(scores)\n', (1539, 1547), True, 'import numpy as np\n')] |
import time
from numpy import random
from pandas import DataFrame
from openpyxl import Workbook
from openpyxl.styles import Font, Border, Side, Alignment
from openpyxl.styles.cell_style import StyleArray
from openpyxl.styles.named_styles import NamedStyle
from openpyxl.utils.dataframe import dataframe_to_rows
ft = Font(bold=True)
al = Alignment(horizontal="center")
side = Side(style="thin", color="000000")
border = Border(left=side, right=side, top=side, bottom=side)
highlight = NamedStyle(name="Pandas Title", font=ft, alignment=al, border=border)
def openpyxl_in_memory(df):
"""
Import a dataframe into openpyxl
"""
wb = Workbook()
ws = wb.active
for r in dataframe_to_rows(df, index=True, header=True):
ws.append(r)
for c in ws['A'] + ws['1']:
c.style = 'Pandas'
wb.save("pandas_openpyxl.xlsx")
from openpyxl.cell import WriteOnlyCell
def openpyxl_stream(df):
"""
Write a dataframe straight to disk
"""
wb = Workbook(write_only=True)
ws = wb.create_sheet()
cell = WriteOnlyCell(ws)
cell.style = 'Pandas'
def format_first_row(row, cell):
for c in row:
cell.value = c
yield cell
rows = dataframe_to_rows(df)
first_row = format_first_row(next(rows), cell)
ws.append(first_row)
for row in rows:
row = list(row)
cell.value = row[0]
row[0] = cell
ws.append(row)
wb.save("openpyxl_stream.xlsx")
def read_write(df1):
"""
Create a worksheet from a Pandas dataframe and read it back into another one
"""
from itertools import islice
wb = Workbook()
ws = wb.active
for r in dataframe_to_rows(df1, index=True, header=True):
ws.append(r)
data = ws.values
cols = next(data)[1:]
data = list(data)
idx = [r[0] for r in data]
data = (islice(r, 1, None) for r in data)
df2 = DataFrame(data, index=idx, columns=cols)
ws = wb.create_sheet()
for r in dataframe_to_rows(df2, index=True, header=True):
ws.append(r)
wb.save("read-write.xlsx")
def using_pandas(df):
df.to_excel('pandas.xlsx', sheet_name='Sheet1', engine='openpyxl')
def using_xlsxwriter(df):
df.to_excel('pandas.xlsx', sheet_name='Sheet1', engine='xlsxwriter')
if __name__ == "__main__":
#df = DataFrame(random.rand(500000, 100))
df = DataFrame(random.rand(1000, 100))
#start = time.clock()
#using_pandas(df)
#print("pandas openpyxl {0:0.2f}s".format(time.clock()-start))
#start = time.clock()
#using_xlsxwriter(df)
#print("pandas xlsxwriter {0:0.2f}s".format(time.clock()-start))
start = time.clock()
openpyxl_in_memory(df)
print("openpyxl in memory {0:0.2f}s".format(time.clock()-start))
start = time.clock()
openpyxl_stream(df)
print("openpyxl streaming {0:0.2f}s".format(time.clock()-start))
| [
"pandas.DataFrame",
"openpyxl.utils.dataframe.dataframe_to_rows",
"openpyxl.Workbook",
"openpyxl.styles.Font",
"openpyxl.cell.WriteOnlyCell",
"time.clock",
"openpyxl.styles.Alignment",
"openpyxl.styles.named_styles.NamedStyle",
"itertools.islice",
"numpy.random.rand",
"openpyxl.styles.Border",
... | [((319, 334), 'openpyxl.styles.Font', 'Font', ([], {'bold': '(True)'}), '(bold=True)\n', (323, 334), False, 'from openpyxl.styles import Font, Border, Side, Alignment\n'), ((340, 370), 'openpyxl.styles.Alignment', 'Alignment', ([], {'horizontal': '"""center"""'}), "(horizontal='center')\n", (349, 370), False, 'from openpyxl.styles import Font, Border, Side, Alignment\n'), ((378, 412), 'openpyxl.styles.Side', 'Side', ([], {'style': '"""thin"""', 'color': '"""000000"""'}), "(style='thin', color='000000')\n", (382, 412), False, 'from openpyxl.styles import Font, Border, Side, Alignment\n'), ((422, 474), 'openpyxl.styles.Border', 'Border', ([], {'left': 'side', 'right': 'side', 'top': 'side', 'bottom': 'side'}), '(left=side, right=side, top=side, bottom=side)\n', (428, 474), False, 'from openpyxl.styles import Font, Border, Side, Alignment\n'), ((487, 556), 'openpyxl.styles.named_styles.NamedStyle', 'NamedStyle', ([], {'name': '"""Pandas Title"""', 'font': 'ft', 'alignment': 'al', 'border': 'border'}), "(name='Pandas Title', font=ft, alignment=al, border=border)\n", (497, 556), False, 'from openpyxl.styles.named_styles import NamedStyle\n'), ((650, 660), 'openpyxl.Workbook', 'Workbook', ([], {}), '()\n', (658, 660), False, 'from openpyxl import Workbook\n'), ((694, 740), 'openpyxl.utils.dataframe.dataframe_to_rows', 'dataframe_to_rows', (['df'], {'index': '(True)', 'header': '(True)'}), '(df, index=True, header=True)\n', (711, 740), False, 'from openpyxl.utils.dataframe import dataframe_to_rows\n'), ((993, 1018), 'openpyxl.Workbook', 'Workbook', ([], {'write_only': '(True)'}), '(write_only=True)\n', (1001, 1018), False, 'from openpyxl import Workbook\n'), ((1058, 1075), 'openpyxl.cell.WriteOnlyCell', 'WriteOnlyCell', (['ws'], {}), '(ws)\n', (1071, 1075), False, 'from openpyxl.cell import WriteOnlyCell\n'), ((1225, 1246), 'openpyxl.utils.dataframe.dataframe_to_rows', 'dataframe_to_rows', (['df'], {}), '(df)\n', (1242, 1246), False, 'from openpyxl.utils.dataframe import dataframe_to_rows\n'), ((1642, 1652), 'openpyxl.Workbook', 'Workbook', ([], {}), '()\n', (1650, 1652), False, 'from openpyxl import Workbook\n'), ((1686, 1733), 'openpyxl.utils.dataframe.dataframe_to_rows', 'dataframe_to_rows', (['df1'], {'index': '(True)', 'header': '(True)'}), '(df1, index=True, header=True)\n', (1703, 1733), False, 'from openpyxl.utils.dataframe import dataframe_to_rows\n'), ((1915, 1955), 'pandas.DataFrame', 'DataFrame', (['data'], {'index': 'idx', 'columns': 'cols'}), '(data, index=idx, columns=cols)\n', (1924, 1955), False, 'from pandas import DataFrame\n'), ((1997, 2044), 'openpyxl.utils.dataframe.dataframe_to_rows', 'dataframe_to_rows', (['df2'], {'index': '(True)', 'header': '(True)'}), '(df2, index=True, header=True)\n', (2014, 2044), False, 'from openpyxl.utils.dataframe import dataframe_to_rows\n'), ((2666, 2678), 'time.clock', 'time.clock', ([], {}), '()\n', (2676, 2678), False, 'import time\n'), ((2788, 2800), 'time.clock', 'time.clock', ([], {}), '()\n', (2798, 2800), False, 'import time\n'), ((1870, 1888), 'itertools.islice', 'islice', (['r', '(1)', 'None'], {}), '(r, 1, None)\n', (1876, 1888), False, 'from itertools import islice\n'), ((2390, 2412), 'numpy.random.rand', 'random.rand', (['(1000)', '(100)'], {}), '(1000, 100)\n', (2401, 2412), False, 'from numpy import random\n'), ((2754, 2766), 'time.clock', 'time.clock', ([], {}), '()\n', (2764, 2766), False, 'import time\n'), ((2873, 2885), 'time.clock', 'time.clock', ([], {}), '()\n', (2883, 2885), False, 'import time\n')] |
import os
import re
import codecs
import numpy as np
models_path = "./models"
eval_path = "./evaluation"
eval_temp = os.path.join(eval_path, "temp")
eval_script = os.path.join(eval_path, "conlleval")
def create_dico(item_list):
"""
Create a dictionary of items from a list of list of items.
"""
assert type(item_list) is list
dico = {}
for items in item_list:
for item in items:
if item not in dico:
dico[item] = 1
else:
dico[item] += 1
return dico
def create_mapping(dico):
"""
Create a mapping (item to ID / ID to item) from a dictionary.
Items are ordered by decreasing frequency.
"""
sorted_items = sorted(dico.items(), key=lambda x: (-x[1], x[0]))
id_to_item = {i: v[0] for i, v in enumerate(sorted_items) if v[1] > 2}
item_to_id = {v: k for k, v in id_to_item.items()}
return item_to_id, id_to_item
def zero_digits(s):
"""
Replace every digit in a string by a zero.
"""
return re.sub('\d', '0', s)
def get_embedding_dict(filename):
word_to_embd={}
count=0
f=open(filename, "r")
emb=np.zeros([1,20])
fl =f.readlines()
for x in fl:
x1=x.split(' ')
emb=np.asarray(x1[1:len(x1)-1])
count+=1
emb=np.reshape(emb,(1,len(emb)))
word_to_embd[x1[0]]=emb.astype(float)
return word_to_embd
| [
"numpy.zeros",
"os.path.join",
"re.sub"
] | [((119, 150), 'os.path.join', 'os.path.join', (['eval_path', '"""temp"""'], {}), "(eval_path, 'temp')\n", (131, 150), False, 'import os\n'), ((165, 201), 'os.path.join', 'os.path.join', (['eval_path', '"""conlleval"""'], {}), "(eval_path, 'conlleval')\n", (177, 201), False, 'import os\n'), ((1032, 1053), 're.sub', 're.sub', (['"""\\\\d"""', '"""0"""', 's'], {}), "('\\\\d', '0', s)\n", (1038, 1053), False, 'import re\n'), ((1156, 1173), 'numpy.zeros', 'np.zeros', (['[1, 20]'], {}), '([1, 20])\n', (1164, 1173), True, 'import numpy as np\n')] |
import os
import fnmatch
import shutil
import csv
import pandas as pd
import numpy as np
import glob
import datetime
print(os.path.realpath(__file__))
def FindResults(TaskList, VisitFolder, PartID):
for j in TaskList:
TempFile = glob.glob(os.path.join(VisitFolder,(PartID+'_'+j+'*.csv')))
# Ideally the file names shoudl be checked to pick the latest one
if len(TempFile) > 0:
TaskList[j]['DataFile'] = TempFile[-1]
TaskList[j]['Completed'] = True
return TaskList
def ListOfExpectedResults():
# This list could be a structure
# This list is the list of names in the structure
# Then each would have a flag as to whether it was found
# It can each have the results
TaskList = {}
TaskList['Stroop_Color'] = {}
TaskList['Stroop_Color']['Completed'] = False
TaskList['Stroop_Word'] = {}
TaskList['Stroop_Word']['Completed'] = False
TaskList['Stroop_ColorWord'] = {}
TaskList['Stroop_ColorWord']['Completed'] = False
TaskList['WCST'] = {}
TaskList['WCST']['Completed'] = False
TaskList['DigitSpan_Forward'] = {}
TaskList['DigitSpan_Forward']['Completed'] = False
TaskList['DigitSpan_Backward'] = {}
TaskList['DigitSpan_Backward']['Completed'] = False
TaskList['Matrices_Main'] = {}
TaskList['Matrices_Main']['Completed'] = False
TaskList['DMS_Stair'] = {}
TaskList['DMS_Stair']['Completed'] = False
TaskList['DMS_Block'] = {}
TaskList['DMS_Block']['Completed'] = False
TaskList['VSTM_Stair'] = {}
TaskList['VSTM_Stair']['Completed'] = False
TaskList['VSTM_Block'] = {}
TaskList['VSTM_Block']['Completed'] = False
TaskList['Speed_PatternComp'] = {}
TaskList['Speed_PatternComp']['Completed'] = False
TaskList['Vocab_Antonyms'] = {}
TaskList['Vocab_Antonyms']['Completed'] = False
return TaskList
def ReadFile(VisitFolder, subid, TaskTag):
# Find the file that matches the TaskTag
# If multiple CSV files are found then the user is prompted to pick one.
# Un selected files are renamed with XXX at their beginning.
# The next time this program is run on this folder there will now only be one file
# available and the user will not be prompted again
Data = []
# List all files in the visit folder
ll = os.listdir(VisitFolder)
# create the string you are looking for which is a combo of the subid and the task name
SearchString = subid + '_' + TaskTag
matching = fnmatch.filter(ll,SearchString+'*.csv')
# It is possible that there are multipel files with similar names.
# The following asks the user for the correct one and then renames the others
count = 1
if len(matching) > 1:
# There are more than one file!
print('There are multiple files found for %s in folder: %s'%(SearchString, VisitFolder))
for i in matching:
# print the name and size of files
SizeOfFile = np.round(os.stat(os.path.join(VisitFolder,matching[0])).st_size/1048)
print('\t%d) %s, size = %0.0f kB'%(count, i,SizeOfFile))
count += 1
sel = input('Which one should be kept? (Press return to skip)')
if len(sel) > 0:
SelectedFile = matching[int(sel)-1]
# Rename the unselected files so they will hopefully not be selected the next time!
count = 1
for i in matching:
if not count == int(sel):
OutName = 'XXX_' + i
print(OutName)
shutil.move(os.path.join(VisitFolder,i), os.path.join(VisitFolder, OutName))
count += 1
else:
SelectedFile = False
elif len(matching) == 1:
SelectedFile= matching[0]
else:
SelectedFile = False
print('Did not find any files!!!')
if SelectedFile != False:
# Now open the file
InputFile = os.path.join(VisitFolder, SelectedFile)
# Read whole file into a dataframe
# Note, in order for the data to be read as a dataframe all columns need to have headings.
# If not an error is thrown
Data = pd.read_csv(InputFile)
# If the data is to be read into a big list use this:
# fid = open(InputFile, 'r')
# Data = csv.reader(fid)
# Data = list(Data)
return Data
def ProcessVSTMBlock(Data):
if len(Data) > 0:
Out = {}
# If there is an entry that is -99 it is a missing value and needs to be changed to NaN
Data = Data.replace(-99, np.nan)
TabNResp = pd.pivot_table(Data, values = 'Corr', index = 'Load', aggfunc = 'count')
TabRT = pd.pivot_table(Data, values = 'RT', index = 'Load', aggfunc = np.mean)
TabAcc = pd.pivot_table(Data, values = 'Corr', index = 'Load', aggfunc = np.mean)
Out['NResp'] = TabNResp
Out['RT'] = TabRT
Out['Acc'] = TabAcc
else:
Out = {}
Out['NResp'] = -9999
Out['Acc'] = -9999
Out['RT'] = -9999
return Out
def ProcessDMSBlock(Data):
if len(Data) > 0:
Out = {}
# This finds the number of trials for which a response was made
TabNResp = pd.pivot_table(Data, values = 'resp.corr', index = 'Load', aggfunc = 'count')
# What is the average RT broken by load
TabRT = pd.pivot_table(Data, values = 'resp.rt', index = 'Load', aggfunc = np.mean)
# What is the average accuracy
TabAcc = pd.pivot_table(Data, values = 'resp.corr', index = 'Load', aggfunc = np.mean)
Out['NResp'] = TabNResp
Out['RT'] = TabRT
Out['Acc'] = TabAcc
else:
Out = {}
Out['NResp'] = -9999
Out['Acc'] = -9999
Out['RT'] = -9999
return Out
def ProcessDMSBlockv2(Data):
Out = {}
if len(Data) > 0:
#cycle over load levels and save as relative load and absolute load
UniqueLoad = Data['Load'].unique()
UniqueLoad = UniqueLoad[~np.isnan(UniqueLoad)]
UniqueLoad.sort()
count = 1
for i in UniqueLoad:
temp = Data[Data['Load']==i]
# find acc
Acc = (temp['resp.corr'].mean())
RT = (temp['resp.rt'].mean())
NResp = (temp['resp.corr'].count())
Tag1 = 'RelLoad%02d'%(count)
Tag2 = 'AbsLoad%02d'%(i)
Out[Tag1+'_Acc'] = Acc
Out[Tag2+'_Acc'] = Acc
Out[Tag1+'_RT'] = RT
Out[Tag2+'_RT'] = RT
Out[Tag1+'_NResp'] = NResp
Out[Tag2+'_NResp'] = NResp
count += 1
else:
for i in range(1,6):
Tag1 = 'RelLoad%02d'%(i)
Tag2 = 'AbsLoad%02d'%(i)
Out[Tag1+'_Acc'] = -9999
Out[Tag2+'_Acc'] = -9999
Out[Tag1+'_RT'] = -9999
Out[Tag2+'_RT'] = -9999
Out[Tag1+'_NResp'] = -9999
Out[Tag2+'_NResp'] = -9999
return Out
def CalculateDMSLoad(OneLineOfData):
# calculate load from CSV results file
Stim = OneLineOfData['TL']+OneLineOfData['TM']+OneLineOfData['TR']
Stim = Stim + OneLineOfData['CL']+OneLineOfData['CM']+OneLineOfData['CR']
Stim = Stim + OneLineOfData['BL']+OneLineOfData['BM']+OneLineOfData['BR']
if not OneLineOfData.isnull()['TL']:
Load = 9 - Stim.count('*')
else:
Load = np.nan
#OneLineOfData['Load'] = Load
return Load
def CheckDMSDataFrameForLoad(Data):
if len(Data) > 0:
# some versions of the DMS files do not have a column of load values
if not 'Load' in Data.index:
Load = []
for index, row in Data.iterrows():
Load.append(CalculateDMSLoad(row))
Data['Load'] = Load
return Data
def ProcessPattComp(Data):
if len(Data) > 10:
try:
# First remove the practice rows from the data file
Data_Run = Data[Data['Run.thisN'].notnull()]
Out = {}
LevelsOfDiff = Data_Run['Difficulty'].unique()
LevelsOfDiff.sort()
for i in LevelsOfDiff:
temp = Data_Run[Data_Run['Difficulty'] == i]
Tag = 'Load%02d'%(i)
Out[Tag + '_Acc'] = temp['resp.corr'].mean()
Out[Tag + '_RT'] = temp['resp.rt'].mean()
Out[Tag + '_NResp'] = temp['resp.corr'].count()
except:
Out = {}
for i in range(1,4):
Tag = 'Load%02d'%(i)
Out[Tag + '_Acc'] = -9999
Out[Tag + '_RT'] = -9999
Out[Tag + '_NResp'] = -9999
else:
Out = {}
for i in range(1,4):
Tag = 'Load%02d'%(i)
Out[Tag + '_Acc'] = -9999
Out[Tag + '_RT'] = -9999
Out[Tag + '_NResp'] = -9999
return Out
def ProcessAntonym(Data):
if len(Data) > 10:
# First remove the practice rows from the data file
Data_Run = Data[Data['trials.thisN'].notnull()]
Out = {}
Out['NResp'] = Data_Run['resp.corr'].count()
Out['Acc'] = Data_Run['resp.corr'].mean()
Out['RT'] = Data_Run['resp.rt'].mean()
else:
Out = {}
Out['NResp'] = -9999
Out['Acc'] = -9999
Out['RT'] = -9999
return Out
def CheckWCSTErrors(CurrentRow, CurrentRule, PreviousRule):
RuleList = []
RuleList.append('Color')
RuleList.append('Shape')
RuleList.append('Count')
# Make this so it gets passed one row at a time because passing the entire DF is too much
Sel = CurrentRow['Card%02d%s'%(int(CurrentRow['Card']),RuleList[CurrentRule])]
Probe = CurrentRow['Probe%s'%(RuleList[CurrentRule])]
# Do they match?
Match = Sel == Probe
Error = True
PersError = False
if Match:
Error = False
elif not Match:
# If an error is made does it match the previous rule?
Error = True
PreviousProbe = CurrentRow['Probe%s'%(RuleList[PreviousRule])]
if Sel == PreviousProbe:
PersError = True
return Error, PersError, Sel, Probe
def ProcessWCST(Data):
if len(Data) > 10:
# Remove the practice trials
# The data file has two parts, one for practice and one for the actual task
try:
FindTask = Data[Data['TrialNum'].str.match('TrialNum')].index[0]
Data_Run = Data.iloc[FindTask+1:]
PreviousRule = -1
# Start counters for the number of errors
NumTrials = 0
NumErrors = 0
NumPersErrors = 0
# Cycle over each data row
for i, CurrentRow in Data_Run.iterrows():
NumTrials += 1
# extrcat the current rule
CurrentRule = int(CurrentRow['Rule'])
if (PreviousRule != -1) and (CurrentRule != LastTrialRule):
# If previous rule is -1 then leave it
# if the current rule is different from the rule on the last trial, then change the previous rule
# Then update the previous rule because the rules have changed
PreviousRule = LastTrialRule
# Check for errors on this row
(Error, PersError, Sel, Probe) = CheckWCSTErrors(CurrentRow, CurrentRule, PreviousRule)
# update error counters
if Error:
NumErrors += 1
if PersError:
NumPersErrors += 1
LastTrialRule = CurrentRule
#print('%d, CurrentRule = %d, Probe = %d, Sel = %d, Error = %r, PerError = %r'%(i, CurrentRule, Probe, Sel, Error, PersError))
#print('Number of Trials: %d, Number of Errors: %d, Number Pers Errors: %d'%(NumTrials, NumErrors, NumPersErrors))
Out = {}
Out['NTrials'] = NumTrials
Out['NErrors'] = NumErrors
Out['NPersErrors'] = NumPersErrors
except:
Out = {}
Out['NTrials'] = NumTrials
Out['NErrors'] = NumErrors
Out['NPersErrors'] = NumPersErrors
else:
Out = {}
Out['NTrials'] = -9999
Out['NErrors'] = -9999
Out['NPersErrors'] = -9999
return Out
def ProcessMatrices(Data):
if len(Data) > 0:
# How many trials were completed
NTrials = Data['key_resp_2.corr'].count()
# How many trials were answered correctly
NCorr = Data['key_resp_2.corr'].sum()
# What is the percent accuracy
Acc = Data['key_resp_2.corr'].mean()
Out = {}
Out['Acc'] = Acc
Out['NTrials'] = NTrials
Out['NCorr'] = NCorr
else:
Out = {}
Out['Acc'] = -9999
Out['NTrials'] = -9999
Out['NCorr'] = -9999
return Out
def ProcessStroopColor(Data):
# Stroop color uses the shape color to determine the test colors which is the
# same as the TEXT color
# Mapping is
# Red -- v
# Green -- b
# Yellow - n
# Blue - m
if len(Data) > 0:
# First remove the practice rows from the data file
Data_Run = Data[Data['trials.thisN'].notnull()]
Out = {}
Out['Acc'] = Data_Run['resp.corr'].mean()
Out['NTrials'] = Data_Run['resp.corr'].count()
Out['NCorr'] = Data_Run['resp.corr'].sum()
Out['RT'] = Data_Run['resp.rt'].mean()
else:
Out = {}
Out['Acc'] = -9999
Out['NTrials'] = -9999
Out['NCorr'] = -9999
Out['RT'] = -9999
return Out
def ProcessStroopWord(Data):
# Stroop color uses the shape color to determine the test colors which is the
# same as the TEXT color
# Mapping is
# Red -- v
# Green -- b
# Yellow - n
# Blue - m
if len(Data) > 0:
# First remove the practice rows from the data file
Data_Run = Data[Data['trials.thisN'].notnull()]
Out = {}
Out['Acc'] = Data_Run['resp.corr'].mean()
Out['NTrials'] = Data_Run['resp.corr'].count()
Out['NCorr'] = Data_Run['resp.corr'].sum()
Out['RT'] = Data_Run['resp.rt'].mean()
else:
Out = {}
Out['Acc'] = -9999
Out['NTrials'] = -9999
Out['NCorr'] = -9999
Out['RT'] = -9999
return Out
def ProcessStroopColorWord(Data):
# Stroop color uses the shape color to determine the test colors which is the
# same as the TEXT color
# Mapping is
# Red -- v
# Green -- b
# Yellow - n
# Blue - m
if len(Data) > 0:
# First remove the practice rows from the data file
Data_Run = Data[Data['trials.thisN'].notnull()]
Data_Run_Con = Data[Data['Congruency']=='Con']
Data_Run_Incon = Data[Data['Congruency']=='Incon']
Out = {}
Out['All_Acc'] = Data_Run['resp.corr'].mean()
Out['All_NTrials'] = Data_Run['resp.corr'].count()
Out['All_NCorr'] = Data_Run['resp.corr'].sum()
Out['All_RT'] = Data_Run['resp.rt'].mean()
Out['Con_Acc'] = Data_Run_Con['resp.corr'].mean()
Out['Con_NTrials'] = Data_Run_Con['resp.corr'].count()
Out['Con_NCorr'] = Data_Run_Con['resp.corr'].sum()
Out['Con_RT'] = Data_Run_Con['resp.rt'].mean()
Out['Incon_Acc'] = Data_Run_Incon['resp.corr'].mean()
Out['Incon_NTrials'] = Data_Run_Incon['resp.corr'].count()
Out['Incon_NCorr'] = Data_Run_Incon['resp.corr'].sum()
Out['Incon_RT'] = Data_Run_Incon['resp.rt'].mean()
#
# Out['Acc'] = pd.pivot_table(Data_Run, values = 'resp.corr', index = 'Congruency', aggfunc = np.mean)
# Out['NCorr'] = pd.pivot_table(Data_Run, values = 'resp.corr', index = 'Congruency', aggfunc = np.sum)
# Out['NTrials'] = pd.pivot_table(Data_Run, values = 'resp.corr', index = 'Congruency', aggfunc = 'count')
# Out['RT'] = pd.pivot_table(Data_Run, values = 'resp.rt', index = 'Congruency', aggfunc = np.mean)
else:
Out = {}
Out['Acc'] = -9999
Out['NTrials'] = -9999
Out['NCorr'] = -9999
Out['RT'] = -9999
return Out
def ProcessDigitSpan(Data, Dir):
StairLoad = []
Correct = []
if len(Data) > 0:
# cycle over each row
for i, CurrentRow in Data.iterrows():
match, Load = ProcessDigitSpanOneRow(CurrentRow, Dir)
StairLoad.append(Load)
print(match)
if match:
Correct.append(1)
else:
Correct.append(0)
Capacity, NReversals = CalculateCapacity(StairLoad)
NTrials = len(Data)
Out = {}
Out['Capacity'] = Capacity
Out['NReversals'] = NReversals
Out['NTrials'] = NTrials
Out['NCorrect'] = sum(Correct)
else:
Out = {}
Out['Capacity'] = -9999
Out['NReversals'] = -9999
Out['NTrials'] = -9999
Out['NCorrect'] = -9999
print(Correct)
return Out
def ProcessDigitSpanOneRow(Row, Dir):
StrTest = Row['Digits']
Test = [];
for i in StrTest:
if i.isdigit():
Test.append(int(i))
# This is stored as a string
StrResp = Row['resp.keys']
Resp = [];
for i in StrResp:
if i.isdigit():
Resp.append(int(i))
# If this is the backward span, flip the list
if Dir == 'Backward':
# Are the test sequence and the response the same?
Test.reverse()
match = Test == Resp
else:
match = Test == Resp
# What is the load?
Load = len(Test)
return match, Load
def CalculateCapacity(StairLoad):
# Take as input the load levels
Rev = []
# find out when the load is increasing and when it is decreasing
Up = False
Down = False
Previous = 0
for i in StairLoad:
if i > Previous:
Up = True
Rev.append(1)
elif i < Previous:
Down = True
Rev.append(-1)
else:
Rev.append(Rev[-1])
Previous = i
# any changes in the direction are reversals
Rev = np.diff(Rev)
Rev = np.nonzero(Rev)[0]
RevLoads = np.array(StairLoad)[Rev]
NReversals = len(RevLoads)
Capacity = RevLoads.mean()
return Capacity, NReversals
def PutDataIntoOutputFile():
# There will be a single output resultsvfile
# it will have these columns:
# partID
# visitID, which will often be 1,2,3
# data checked, this cannot be changed by the program but only by a human
# data completeness flag
#
#
# First, the part id and visit id are read from the folder names.
# Then the output data is checked to find this person. If found the data checked flag is TRUE
# if yes, check to see if data is complete in out file.
# if not, then load all data and see if the missing data is now available
df2 = pd.DataFrame.from_dict(FlatResults, orient='index')
pass
def FlattenDict(Results):
# cycle over tasks
FlatResults = {}
for i in Results.keys():
for j in Results[i].keys():
FlatResults['%s_%s'%(i,j)] = Results[i][j]
# print('%s_%s: %0.2f'%(i,j,Results[i][j]))
return FlatResults
def CycleOverDataFolders(AllOutDataFolder):
#cycle over folders
df = pd.DataFrame()
ListOfDict = []
# get all sub dirs
subdirs = glob.glob(os.path.join(AllOutDataFolder,'*/'))
for subdir in subdirs:
# check subdir based on some criteria
CurDir = os.path.split(subdir)[0]
CurDir = os.path.split(CurDir)[-1]
if CurDir.isdigit():
#enter the directory and find visit folders
VisitFolders = glob.glob(os.path.join(subdir,'*/'))
for visFold in VisitFolders:
CurVis = os.path.split(visFold)[0]
CurVis = os.path.split(CurVis)[-1]
if CurVis[-4:-2] == 'V0':
subid = CurDir
Visid = CurVis
print('%s, %s'%(subid, Visid))
Results = LoadRawData(os.path.join(AllOutDataFolder, subid, Visid),subid)
FlatResults = FlattenDict(Results)
# add subid and visitid
FlatResults['AAsubid'] = subid
FlatResults['AAVisid'] = Visid
FlatResults['AAChecked'] = 0
ListOfDict.append(FlatResults)
df = pd.DataFrame(ListOfDict)
return df
def LoadOutDataFile(OutDataFilePathName):
# Make a data frame from CSV file
OutDF = pd.read_csv(OutDataFilePathName)
return OutDF
def IsVisitInOutDataFile(DF, subid, Visid):
Flag = False
index = -1
SubIndex = DF.index[DF['AAsubid'] == int(subid)]
VisIndex = DF.index[DF['AAVisid'] == Visid]
if (len(SubIndex) > 0) and (len(VisIndex) > 0):
if SubIndex[0] == VisIndex[0]:
Flag = True
index = SubIndex[0]
return Flag, index
def IsDataChecked(DF, index):
# has a user checked this data?
Flag = False
if DD.loc[index]['AAChecked'] == 1:
Flag = True
return Flag
def WriteOneSubjToOutDataFile(OneSubData, OutFile):
pass
def WriteHeaderToOutDataFile(OneSubData, OutFileFID):
pass
def LocateOutDataFile():
BaseDir = '/Users/jasonsteffener'
OutDataFolder = os.path.join(BaseDir, 'Dropbox/steffenercolumbia/Projects/MyProjects/NeuralCognitiveMapping/NeuroPsychData')
BaseFileName = 'NCM_Master_NP'
# What files exist with this name?
Files = glob.glob(os.path.join(OutDataFolder, BaseFileName + '*.csv'))
now = datetime.datetime.now()
NowString = now.strftime("_updated_%b-%d-%Y_%H-%M.csv")
NewOutFileName = BaseFileName + NowString
if len(Files) == 0:
FileName = os.path.join(OutDataFolder,NewOutFileName)
else:
# this will open an existing file
FileName = Files[-1]
return FileName
def LoadRawData(VisitFolder, subid):
print('working on %s'%(subid))
Results = {}
# Stroop
Data = ReadFile(VisitFolder, subid, 'Stroop_Color_')
Results['StrpC'] = ProcessStroopColor(Data)
Data = ReadFile(VisitFolder, subid, 'Stroop_Word_')
Results['StrpW'] = ProcessStroopWord(Data)
Data = ReadFile(VisitFolder, subid, 'Stroop_ColorWord')
Results['StrpCW'] = ProcessStroopColorWord(Data)
# Wisconsin Card Sort
Data = ReadFile(VisitFolder, subid, 'WCST')
Results['WCST'] = ProcessWCST(Data)
# Antonyms
Data = ReadFile(VisitFolder, subid, 'Vocab_Antonyms')
Results['Ant'] = ProcessAntonym(Data)
# Digit Span
# Forward
Data = ReadFile(VisitFolder, subid, 'DigitSpan_Forward')
Dir = 'Forward'
Results['DSFor'] = ProcessDigitSpan(Data, Dir)
# Backward
Data = ReadFile(VisitFolder, subid, 'DigitSpan_Backward')
Dir = 'Backward'
Results['DSBack'] = ProcessDigitSpan(Data, Dir)
# Pattern Comparison
Data = ReadFile(VisitFolder, subid, 'Speed_PatternComp')
Results['PComp'] = ProcessPattComp(Data)
# Matrics
Data = ReadFile(VisitFolder, subid, 'Matrices_Main')
Results['Matr'] = ProcessMatrices(Data)
Data = ReadFile(VisitFolder, subid, 'DMS_Block_BehRun1')
Data = CheckDMSDataFrameForLoad(Data)
Results['DMSBeh1'] = ProcessDMSBlockv2(Data)
Data = ReadFile(VisitFolder, subid, 'VSTM_Block_BehRun1')
Results['VSTMBeh1'] = ProcessVSTMBlock(Data)
# Data = ReadFile(VisitFolder, subid, 'DMS_Block_MRIRun1')
# Data = CheckDMSDataFrameForLoad(Data)
# Results['DMSMRI1'] = ProcessDMSBlockv2(Data)
#
# Data = ReadFile(VisitFolder, subid, 'DMS_Block_MRIRun2')
# Data = CheckDMSDataFrameForLoad(Data)
# Results['DMSMRI2'] = ProcessDMSBlockv2(Data)
#
# Data = ReadFile(VisitFolder, subid, 'DMS_Block_BehRun1')
# Data = CheckDMSDataFrameForLoad(Data)
# Results['DMSBeh1'] = ProcessDMSBlockv2(Data)
#
return Results
| [
"pandas.DataFrame",
"fnmatch.filter",
"pandas.DataFrame.from_dict",
"pandas.pivot_table",
"pandas.read_csv",
"os.path.realpath",
"datetime.datetime.now",
"numpy.isnan",
"numpy.nonzero",
"numpy.diff",
"numpy.array",
"os.path.split",
"os.path.join",
"os.listdir"
] | [((125, 151), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (141, 151), False, 'import os\n'), ((2463, 2486), 'os.listdir', 'os.listdir', (['VisitFolder'], {}), '(VisitFolder)\n', (2473, 2486), False, 'import os\n'), ((2635, 2677), 'fnmatch.filter', 'fnmatch.filter', (['ll', "(SearchString + '*.csv')"], {}), "(ll, SearchString + '*.csv')\n", (2649, 2677), False, 'import fnmatch\n'), ((18347, 18359), 'numpy.diff', 'np.diff', (['Rev'], {}), '(Rev)\n', (18354, 18359), True, 'import numpy as np\n'), ((19139, 19190), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['FlatResults'], {'orient': '"""index"""'}), "(FlatResults, orient='index')\n", (19161, 19190), True, 'import pandas as pd\n'), ((19548, 19562), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (19560, 19562), True, 'import pandas as pd\n'), ((20684, 20708), 'pandas.DataFrame', 'pd.DataFrame', (['ListOfDict'], {}), '(ListOfDict)\n', (20696, 20708), True, 'import pandas as pd\n'), ((20825, 20857), 'pandas.read_csv', 'pd.read_csv', (['OutDataFilePathName'], {}), '(OutDataFilePathName)\n', (20836, 20857), True, 'import pandas as pd\n'), ((21624, 21741), 'os.path.join', 'os.path.join', (['BaseDir', '"""Dropbox/steffenercolumbia/Projects/MyProjects/NeuralCognitiveMapping/NeuroPsychData"""'], {}), "(BaseDir,\n 'Dropbox/steffenercolumbia/Projects/MyProjects/NeuralCognitiveMapping/NeuroPsychData'\n )\n", (21636, 21741), False, 'import os\n'), ((21892, 21915), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (21913, 21915), False, 'import datetime\n'), ((4077, 4116), 'os.path.join', 'os.path.join', (['VisitFolder', 'SelectedFile'], {}), '(VisitFolder, SelectedFile)\n', (4089, 4116), False, 'import os\n'), ((4310, 4332), 'pandas.read_csv', 'pd.read_csv', (['InputFile'], {}), '(InputFile)\n', (4321, 4332), True, 'import pandas as pd\n'), ((4746, 4812), 'pandas.pivot_table', 'pd.pivot_table', (['Data'], {'values': '"""Corr"""', 'index': '"""Load"""', 'aggfunc': '"""count"""'}), "(Data, values='Corr', index='Load', aggfunc='count')\n", (4760, 4812), True, 'import pandas as pd\n'), ((4835, 4899), 'pandas.pivot_table', 'pd.pivot_table', (['Data'], {'values': '"""RT"""', 'index': '"""Load"""', 'aggfunc': 'np.mean'}), "(Data, values='RT', index='Load', aggfunc=np.mean)\n", (4849, 4899), True, 'import pandas as pd\n'), ((4927, 4993), 'pandas.pivot_table', 'pd.pivot_table', (['Data'], {'values': '"""Corr"""', 'index': '"""Load"""', 'aggfunc': 'np.mean'}), "(Data, values='Corr', index='Load', aggfunc=np.mean)\n", (4941, 4993), True, 'import pandas as pd\n'), ((5376, 5447), 'pandas.pivot_table', 'pd.pivot_table', (['Data'], {'values': '"""resp.corr"""', 'index': '"""Load"""', 'aggfunc': '"""count"""'}), "(Data, values='resp.corr', index='Load', aggfunc='count')\n", (5390, 5447), True, 'import pandas as pd\n'), ((5518, 5587), 'pandas.pivot_table', 'pd.pivot_table', (['Data'], {'values': '"""resp.rt"""', 'index': '"""Load"""', 'aggfunc': 'np.mean'}), "(Data, values='resp.rt', index='Load', aggfunc=np.mean)\n", (5532, 5587), True, 'import pandas as pd\n'), ((5654, 5725), 'pandas.pivot_table', 'pd.pivot_table', (['Data'], {'values': '"""resp.corr"""', 'index': '"""Load"""', 'aggfunc': 'np.mean'}), "(Data, values='resp.corr', index='Load', aggfunc=np.mean)\n", (5668, 5725), True, 'import pandas as pd\n'), ((18370, 18385), 'numpy.nonzero', 'np.nonzero', (['Rev'], {}), '(Rev)\n', (18380, 18385), True, 'import numpy as np\n'), ((18404, 18423), 'numpy.array', 'np.array', (['StairLoad'], {}), '(StairLoad)\n', (18412, 18423), True, 'import numpy as np\n'), ((19630, 19666), 'os.path.join', 'os.path.join', (['AllOutDataFolder', '"""*/"""'], {}), "(AllOutDataFolder, '*/')\n", (19642, 19666), False, 'import os\n'), ((21829, 21880), 'os.path.join', 'os.path.join', (['OutDataFolder', "(BaseFileName + '*.csv')"], {}), "(OutDataFolder, BaseFileName + '*.csv')\n", (21841, 21880), False, 'import os\n'), ((22070, 22113), 'os.path.join', 'os.path.join', (['OutDataFolder', 'NewOutFileName'], {}), '(OutDataFolder, NewOutFileName)\n', (22082, 22113), False, 'import os\n'), ((254, 307), 'os.path.join', 'os.path.join', (['VisitFolder', "(PartID + '_' + j + '*.csv')"], {}), "(VisitFolder, PartID + '_' + j + '*.csv')\n", (266, 307), False, 'import os\n'), ((19757, 19778), 'os.path.split', 'os.path.split', (['subdir'], {}), '(subdir)\n', (19770, 19778), False, 'import os\n'), ((19799, 19820), 'os.path.split', 'os.path.split', (['CurDir'], {}), '(CurDir)\n', (19812, 19820), False, 'import os\n'), ((6169, 6189), 'numpy.isnan', 'np.isnan', (['UniqueLoad'], {}), '(UniqueLoad)\n', (6177, 6189), True, 'import numpy as np\n'), ((19947, 19973), 'os.path.join', 'os.path.join', (['subdir', '"""*/"""'], {}), "(subdir, '*/')\n", (19959, 19973), False, 'import os\n'), ((20040, 20062), 'os.path.split', 'os.path.split', (['visFold'], {}), '(visFold)\n', (20053, 20062), False, 'import os\n'), ((20091, 20112), 'os.path.split', 'os.path.split', (['CurVis'], {}), '(CurVis)\n', (20104, 20112), False, 'import os\n'), ((3711, 3739), 'os.path.join', 'os.path.join', (['VisitFolder', 'i'], {}), '(VisitFolder, i)\n', (3723, 3739), False, 'import os\n'), ((3740, 3774), 'os.path.join', 'os.path.join', (['VisitFolder', 'OutName'], {}), '(VisitFolder, OutName)\n', (3752, 3774), False, 'import os\n'), ((20322, 20366), 'os.path.join', 'os.path.join', (['AllOutDataFolder', 'subid', 'Visid'], {}), '(AllOutDataFolder, subid, Visid)\n', (20334, 20366), False, 'import os\n'), ((3121, 3159), 'os.path.join', 'os.path.join', (['VisitFolder', 'matching[0]'], {}), '(VisitFolder, matching[0])\n', (3133, 3159), False, 'import os\n')] |
#coding:utf-8
###################################################
# File Name: dataloader.py
# Author: <NAME>
# mail: @
# Created Time: Wed 21 Mar 2018 07:04:35 PM CST
#=============================================================
import os
import sys
import time
import datetime
import gensim
import codecs
import numpy as np
import tensorflow as tf
sys.path.append('../')
from tensorflow.contrib import learn
from nltk.util import ngrams
from setting import *
from common.strutil import stringhandler
special_words = set(['<num>', '<phone>'])
def generate_subword(word_uni, max_num):
'''
@params: word_uni, unicode
max_num, the maximum number of subword
@return suwords, utf8
'''
cur_num = 0
subwords = []
for i in xrange(2, len(word_uni)):
subword_iter = ngrams(word_uni, i)
for subword in subword_iter:
if cur_num >= max_num:
break
subword = ''.join(subword).encode('utf-8')
subwords.append(subword)
cur_num += 1
return subwords, cur_num
def trans_input_expand_subword(x_text, vocab_processor, seq_len, max_num=8):
vocab_dict = vocab_processor.vocabulary_._mapping
x = []
all_nums = []
for text in x_text:
text_indices = [] #当前问句的每个word的subwords, list
cur_nums = [] #当前问句的每个word的subword个数
words = text.split(' ')
print(text, 'len:', len(words))
for word_uni in words:
subwords, subword_num = generate_subword(word_uni, max_num)
subwords_str = ' '.join(subwords)
print(subwords_str)
word_subword_indices = [vocab_dict[word_uni] if word_uni in vocab_dict else 0]
subword_indices = [vocab_dict[i.decode('utf-8')] if i.decode('utf-8') in vocab_dict else 0 for i in subwords]
word_subword_indices.extend(subword_indices)
word_subword_indices += [0] * (max_num - subword_num) #subword padding
text_indices.append(word_subword_indices)
cur_nums.append(subword_num)
text_indices = text_indices + [[0] * (max_num)] * (seq_len - len(words)) #word padding
x.append(text_indices)
all_nums.append(cur_nums)
return x, all_nums
def get_vocab_idx2word(vocab_dict):
vocab_idx2word = {}
for word in vocab_dict:
idx = vocab_dict[word]
vocab_idx2word[idx] = word
return vocab_idx2word
def trans_to_padded_text(x, vocab_dict):
vocab_idx2word = get_vocab_idx2word(vocab_dict)
padded_x_text = []
for text_indices in x:
padded_text = [vocab_idx2word[idx] for idx in text_indices]
padded_x_text.append(' '.join(padded_text))
return padded_x_text
def trans_input_to_sparse(x_text, vocab_processor, seq_len, max_num=8):
'''
@breif: prepare data for SparseTensor which has params 'indice', 'values' and 'shape'
there is:
sparse_x = tf.sparse_placeholder(tf.int32)
shape = [seq_len, max_subword_num]
emb = tf.nn.embedding_lookup_sparse(embedding, sparse_x, None, combiner='mean')
...
feed_dict = {x:(indices, values, shape)}
...
'''
vocab_dict = vocab_processor.vocabulary_._mapping # vocab_dict 必须fit的是unicode编码
sparse_values = []
all_nums = []
sparse_indices = []
left_start = 0
for text in x_text:
text_IDs = [] #当前问句的每个word的subwords在词表中的id, list, 对应sparse_values
cur_nums = [] #当前问句的每个word的subword个数
cur_indices = [] #当前每个word(subword)的稀疏索引, 对应sparse_indices
#word's sparse_indices and sparse_values
words = text.split(' ')
for word_bias, word_uni in enumerate(words):
#get word_subwords_vocab_indices
subwords, subword_num = generate_subword(word_uni, max_num)
subwords_str = ' '.join(subwords)
word_subword_indices = [vocab_dict[word_uni] if word_uni in vocab_dict else 0]
subword_indices = [vocab_dict[i.decode('utf-8')] if i.decode('utf-8') in vocab_dict else 0 for i in subwords]
word_subword_indices.extend(subword_indices)
#word_subword sparse indices
for right_start in xrange(len(word_subword_indices)):
cur_indices.append([left_start + word_bias, right_start])
cur_nums.append(subword_num + 1)
text_IDs.extend(word_subword_indices)
#padding words' sparse_indices and sparse_values
for padding_word_bias in xrange(len(words), seq_len):
text_IDs.append(0)
cur_bias = left_start + padding_word_bias
cur_indices.append([cur_bias, 0])
cur_nums.append(1)
left_start += seq_len
sparse_indices.extend(cur_indices)
sparse_values.extend(text_IDs)
all_nums.extend(cur_nums)
return sparse_indices, sparse_values, all_nums
def recomb_sent_with_subword(words, max_num=8):
new_words = []
for word_uni in words:
word_len = len(word_uni)
new_words.append(word_uni)
if word_uni in special_words:
break
grams, _ = generate_subword(word_uni, max_num)
grams_uni = [i.decode('utf-8') for i in grams]
new_words.extend(grams_uni)
return new_words
def expand_batch_sents_with_subword(texts, batch_size, max_num=8):
new_texts = []
for text_str in texts:
text = text_str.split(' ')
new_text = recomb_sent_with_subword(text, max_num)
num_iter = int(len(new_text) - 1) / batch_size + 1
for batch_num in range(num_iter):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, len(new_text))
cur_batch = new_text[start_index:end_index]
cur_batch_str = ' '.join(cur_batch)
new_texts.append(cur_batch_str)
return new_texts
def get_stopwords_set(stop_words_file):
'''
@breif: read stop_words file
'''
stop_set = set()
with codecs.open(stop_words_file, 'r', 'utf8') as fr:
for word in fr:
word = word.strip()
stop_set.add(word)
return stop_set
def one_hot_encode(list):
array = np.array(list)
max_class = array.max() + 1
return np.eye(max_class)[array]
def load_bin_vec(file_name, vocab, ksize=100):
time_str = datetime.datetime.now().isoformat()
print('{}:开始筛选数据词汇...'.format(time_str))
word_vecs = {}
#model = gensim.models.Word2Vec.load_word2vec_format(file_name, binary=True)
model = gensim.models.KeyedVectors.load_word2vec_format(file_name, binary=True)
#model = gensim.models.KeyedVectors.load_word2vec_format(file_name, binary=False)
for word in vocab:
try:
word_vecs[word] = model[word]
except:
word_vecs[word] = np.random.uniform(-1.0, 1.0, ksize).astype(np.float32)
return word_vecs
def get_word_vecs(word_vecs_path, vocab, vocab_idx_map, k, is_random=False):
word_vecs = load_bin_vec(word_vecs_path, vocab, k)
time_str = datetime.datetime.now().isoformat()
print('{}:生成嵌入层参数W...'.format(time_str))
vocab_size = len(word_vecs)
W = np.random.uniform(-1.0, 1.0, size=[vocab_size, k]).astype(np.float32)
if not is_random:
print('非随机初始化')
for i, word in enumerate(word_vecs):
idx = vocab_idx_map[word]
W[idx] = word_vecs[word]
time_str = datetime.datetime.now().isoformat()
print("{}:生成嵌入层参数W完毕".format(time_str))
return W
def write_label_file(label2idx, output_file):
dir_name = os.path.dirname(output_file)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with open(output_file, 'w') as fw:
labels = sorted(label2idx.items(), key=lambda x:x[1])
for label, value in labels:
fw.write(label + '\t' + str(value) + '\n')
def read_labels_file(label_file):
#get real label
idx2label = {}
label2idx = {}
with codecs.open(label_file, 'r', 'utf8') as fr:
for line in fr:
line = line.strip().lower()
line_info = line.split('\t')
label = line_info[0]
label_idx = line_info[1]
idx2label[int(label_idx)] = label
label2idx[label] = int(label_idx)
return label2idx, idx2label
def read_code_file(code_file):
#get real label
label2code = {}
with open(code_file, 'r') as fr:
for line in fr:
line = line.strip().decode('utf8')
line_info = line.split('\t')
label = line_info[0]
code_value = line_info[1]
label2code[label] = code_value
return label2code
def process_sentence(text, stop_set, label2idx):
uni_sents = []
sent_segs = []
sent, word_segs_str = stringhandler.split_word_and_seg(text, stop_set)
uni_sents.append(sent.decode('utf-8'))
sent_segs.append(word_segs_str)
return uni_sents, sent_segs
def load_test_data(data_file):
uni_sents = []
sent_segs = []
labels = []
stop_set = get_stop_words_set(STOP_WORDS_FILE)
with open(data_file, 'r') as lines:
for line in lines:
line = line.strip().lower()
line_info = line.split('\t')
trunks_str = line_info[0]
sent, word_segs_str = stringhandler.split_word_and_seg(trunks_str, stop_set)
uni_sents.append(sent.decode('utf-8'))
sent_segs.append(word_segs_str)
label = line_info[1]
labels.append(label)
label2idx, _ = read_labels_file(LABEL_FILE)
label_indices = [label2idx[label] if label in label2idx else 0 for label in labels]
one_hot_labels = one_hot_encode(label_indices)
return [uni_sents, sent_segs, one_hot_labels]
def load_data_and_labels(data_file):
stop_set = get_stop_words_set(STOP_WORDS_FILE)
uni_sents = []
sent_segs = []
labels = []
enum_index = 0
label2idx = {}
with open(data_file, 'r') as lines:
for line in lines:
line = line.strip().lower()
line_info = line.split('\t')
if len(line_info) < 2:
continue
trunks_str = line_info[0]
sent, word_segs_str = stringhandler.split_word_and_seg(trunks_str, stop_set)
uni_sents.append(sent.decode('utf-8'))
sent_segs.append(word_segs_str)
label = line_info[1]
labels.append(label)
if label not in label2idx:
label2idx[label] = enum_index
enum_index += 1
label_indices = [label2idx[label] for label in labels]
one_hot_labels = one_hot_encode(label_indices)
#print('one hot labels:', one_hot_labels)
write_label_file(label2idx, LABEL_FILE)
return [uni_sents, sent_segs, one_hot_labels]
def batch_iter(data, batch_size, num_epochs, shuffle=True):
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1
for epoch in range(num_epochs):
if shuffle:
shuffle_indice = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indice]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
def batch_iter2(data, batch_size, num_epochs, shuffle=True):
data = np.array(data)
data_size = len(data)
data = one_hot_encode(data)
rs = []
for epoch in range(num_epochs):
if shuffle:
shuffle_indice = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indice]
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
rs.append(shuffled_data[start_index:end_index])
return rs
if __name__ == '__main__':
print(embed)
#batch_embed = tf.split(embed, 2)
batch_embed = tf.reshape(embed, [-1, 5, 10])
#batch_embed = tf.expand_dims(batch_embed, -1)
#batch_embed = tf.concat(batch_embed, -1)
rs = batch_iter2(batch_embed)
print(rs)
| [
"sys.path.append",
"numpy.random.uniform",
"common.strutil.stringhandler.split_word_and_seg",
"nltk.util.ngrams",
"codecs.open",
"os.makedirs",
"os.path.dirname",
"tensorflow.reshape",
"os.path.exists",
"numpy.array",
"numpy.arange",
"gensim.models.KeyedVectors.load_word2vec_format",
"numpy.... | [((351, 373), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (366, 373), False, 'import sys\n'), ((6242, 6256), 'numpy.array', 'np.array', (['list'], {}), '(list)\n', (6250, 6256), True, 'import numpy as np\n'), ((6582, 6653), 'gensim.models.KeyedVectors.load_word2vec_format', 'gensim.models.KeyedVectors.load_word2vec_format', (['file_name'], {'binary': '(True)'}), '(file_name, binary=True)\n', (6629, 6653), False, 'import gensim\n'), ((7623, 7651), 'os.path.dirname', 'os.path.dirname', (['output_file'], {}), '(output_file)\n', (7638, 7651), False, 'import os\n'), ((8834, 8882), 'common.strutil.stringhandler.split_word_and_seg', 'stringhandler.split_word_and_seg', (['text', 'stop_set'], {}), '(text, stop_set)\n', (8866, 8882), False, 'from common.strutil import stringhandler\n'), ((10926, 10940), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (10934, 10940), True, 'import numpy as np\n'), ((11559, 11573), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (11567, 11573), True, 'import numpy as np\n'), ((12170, 12200), 'tensorflow.reshape', 'tf.reshape', (['embed', '[-1, 5, 10]'], {}), '(embed, [-1, 5, 10])\n', (12180, 12200), True, 'import tensorflow as tf\n'), ((811, 830), 'nltk.util.ngrams', 'ngrams', (['word_uni', 'i'], {}), '(word_uni, i)\n', (817, 830), False, 'from nltk.util import ngrams\n'), ((6046, 6087), 'codecs.open', 'codecs.open', (['stop_words_file', '"""r"""', '"""utf8"""'], {}), "(stop_words_file, 'r', 'utf8')\n", (6057, 6087), False, 'import codecs\n'), ((6300, 6317), 'numpy.eye', 'np.eye', (['max_class'], {}), '(max_class)\n', (6306, 6317), True, 'import numpy as np\n'), ((7663, 7687), 'os.path.exists', 'os.path.exists', (['dir_name'], {}), '(dir_name)\n', (7677, 7687), False, 'import os\n'), ((7697, 7718), 'os.makedirs', 'os.makedirs', (['dir_name'], {}), '(dir_name)\n', (7708, 7718), False, 'import os\n'), ((8017, 8053), 'codecs.open', 'codecs.open', (['label_file', '"""r"""', '"""utf8"""'], {}), "(label_file, 'r', 'utf8')\n", (8028, 8053), False, 'import codecs\n'), ((6388, 6411), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6409, 6411), False, 'import datetime\n'), ((7090, 7113), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7111, 7113), False, 'import datetime\n'), ((7216, 7266), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)'], {'size': '[vocab_size, k]'}), '(-1.0, 1.0, size=[vocab_size, k])\n', (7233, 7266), True, 'import numpy as np\n'), ((7467, 7490), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7488, 7490), False, 'import datetime\n'), ((9353, 9407), 'common.strutil.stringhandler.split_word_and_seg', 'stringhandler.split_word_and_seg', (['trunks_str', 'stop_set'], {}), '(trunks_str, stop_set)\n', (9385, 9407), False, 'from common.strutil import stringhandler\n'), ((10269, 10323), 'common.strutil.stringhandler.split_word_and_seg', 'stringhandler.split_word_and_seg', (['trunks_str', 'stop_set'], {}), '(trunks_str, stop_set)\n', (10301, 10323), False, 'from common.strutil import stringhandler\n'), ((11140, 11160), 'numpy.arange', 'np.arange', (['data_size'], {}), '(data_size)\n', (11149, 11160), True, 'import numpy as np\n'), ((11751, 11771), 'numpy.arange', 'np.arange', (['data_size'], {}), '(data_size)\n', (11760, 11771), True, 'import numpy as np\n'), ((6864, 6899), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)', 'ksize'], {}), '(-1.0, 1.0, ksize)\n', (6881, 6899), True, 'import numpy as np\n')] |
from sklearn.preprocessing import LabelEncoder
from imutils.face_utils import FaceAligner
from sklearn.svm import SVC
from imutils import paths
import tensorflow as tf
from tensorflow import logging
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from tensorflow.python.util import deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
import face_recognition
import numpy as np
import imutils
import random
import pickle
import heapq
import dlib
import cv2
"""
ONE BIG CLASS OF FACE RECOGNITION
Created By: merkaba/pradhonoaji
Created on: 30-Aug-2019
"""
class FaceOrchestrator(object):
# default path for encodings file pickle
file_encodings = './models/encodings.pickle'
# path for SVM model and label
file_svm_model = './models/svm_model.pickle'
file_svm_label = './models/svm_label.pickle'
# path for face detector model
file_face_detector = './models/frozen_inference_graph_face.pb'
# path for face landmark used in face aligner
file_face_landmark = './models/shape_predictor_68_face_landmarks.dat'
file_haar = './models/haarcascade_frontalface_alt2.xml'
# final face width (=height ~square rectangle) for recognition in pixel
# ==> face ratio
target_face_width = 256
target_face_height = 286
# percentage of zooming, calculated based on the ratio
# of distance between left eye to the border of image and iage width,
# Zoom In < target_percent_face < Zoom out
# target_face_percent = 0.3
# filename separator between name and random string generated from UUID
name_separator = '__'
# image array (BGR)
image = []
# boxes coordinate of faces (left, right, top, bottom)
boxes = []
# detected faces
faces_detected = []
# normalized faces
faces_aligned = []
# database
data = {}
# SVM model
svm_model = None
svm_label = None
# face detection method: AI or HAAR or HOG
fd_method = 'AI'
fd_ai_min_score = 0.6
# face recognition method: ML or DELTA
fr_method = 'DELTA'
fr_max_delta = 0.6 # delta max
fr_min_probability = 0.5 # SVM probability
def __init__(self):
# initiate tensorflow object for face detection
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.file_face_detector, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
with self.detection_graph.as_default():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(graph=self.detection_graph, config=config)
self.windowNotSet = True
# initiate face aligner
predictor = dlib.shape_predictor(self.file_face_landmark)
# percentage_face = (self.target_face_percent, self.target_face_percent)
# construct face aligner
self.faceAligner = FaceAligner(predictor,
desiredFaceWidth=self.target_face_width,
desiredFaceHeight=self.target_face_height,
desiredLeftEye=(0.3, 0.3))
# load encodings
self.load_encodings()
# load svm models
self.load_svm_model()
# cascade haar
self.face_cascade = cv2.CascadeClassifier(self.file_haar)
def run(self):
"""image: bgr image
return (boxes, scores, classes, num_detections)
"""
# image_np = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)
image_np = self.rgb_image
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
# start_time = time.time()
(boxes, scores, classes, num_detections) = self.sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# elapsed_time = time.time() - start_time
# print('[Info] inference time cost: {}'.format(elapsed_time))
return (boxes, scores, classes, num_detections)
def set_image_to_be_processed(self, image):
"""
image = array of image (frame)
"""
self.image = image
self.rgb_image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)
def detect_faces(self):
"""
DETECT FACES USING MOBILENET SSD MODEL (TENSORFLOW) OR HAAR CASCADE
@output-boxes: a list of rectangle face (left, right, top, bottom)
"""
newboxes = []
faces_detected = []
if self.fd_method == 'AI':
# detect Face using Deep Learning: architecture MobileNet, method SSD
(boxes, scores, __, __) = self.run()
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
max_boxes_to_draw = 20
for i in range(min(max_boxes_to_draw, boxes.shape[0])):
# check teh score
if scores[i] > self.fd_ai_min_score:
box = tuple(boxes[i].tolist())
ymin, xmin, ymax, xmax = box
im_height, im_width, __ = self.image.shape
# convert tensorflow normalized coordinate to absolute coordinate
(left, right, top, bottom) = (int(xmin*im_width), int(xmax*im_width), int(ymin*im_height), int(ymax*im_height))
face = self.image[top:bottom, left:right]
newboxes.append((left, right, top, bottom))
faces_detected.append(face)
self.boxes = newboxes
self.faces_detected = faces_detected
elif self.fd_method == 'HOG':
# detect Face using HOG
# self.rgb_image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)
boxes = face_recognition.face_locations(self.rgb_image, model='hog')
for box in boxes:
top, right, bottom, left = box
face = self.image[top:bottom, left:right]
newboxes.append((left, right, top, bottom))
faces_detected.append(face)
self.boxes = newboxes
self.faces_detected = faces_detected
elif self.fd_method == 'HAAR':
gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
faces = self.face_cascade.detectMultiScale(
gray,
scaleFactor=1.05,
minNeighbors=9,
minSize=(30,30),
flags = cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
newboxes.append((x, y+w, y, y+h))
face = self.image[y:y+h, x:x+w]
faces_detected.append(face)
self.boxes = newboxes
self.faces_detected = faces_detected
def align_faces(self, target_face_percent=0.3):
"""
ALIGN FACES BASED ON FACE LANDMARK USING DLIB
Note: align face before generate face encoding
@param-image: image which contain face (or faces)
@param-boxes: list of rectangle of face (left, right, top, bottom)
@output-faces_aligned: list of aligned face image
"""
self.__set_target_face_percentage(target_face_percent)
faces_aligned = []
gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
# for left, right, top, bottom in boxes:
for box in self.boxes:
left, right, top, bottom = box
rect = dlib.rectangle(left=left, right=right, top=top, bottom=bottom)
facealigned = self.faceAligner.align(self.image, gray, rect)
faces_aligned.append(facealigned)
self.faces_aligned = faces_aligned
def recognize_faces(self):
if self.fr_method == 'DELTA':
return self.recognize_faces_and_draw_knn_2(self.fr_max_delta)
else:
return self.recognize_faces_and_draw_svm(self.fr_min_probability)
def recognize_faces_and_draw_knn(self, minDistance=0.5):
"""
RECOGNIZE ALL FACES DETECTED IN A FRAME
minDistance: less value more strict
Method Boolean
"""
index = 0
new_image = self.image.copy()
# loop all faces
name = None
for left, right, top, bottom in self.boxes:
cv2.rectangle(new_image,(left,top),(right,bottom),(0,255,0),2)
if self.faces_aligned == [] or self.faces_aligned is None:
face = self.faces_detected[index]
else:
face = self.faces_aligned[index]
# generate encoding for the face
encoding = self.generate_encoding(face)
# match a face with all faces in database
# for encoding in encodings:
matches = face_recognition.compare_faces(self.data["encodings"], encoding, minDistance)
name = "Unknown"
if True in matches:
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
for i in matchedIdxs:
name = self.data["names"][i]
counts[name] = counts.get(name, 0) + 1
name = max(counts, key=counts.get)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(new_image, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 0), 4)
index = index + 1
return new_image, name
def recognize_faces_and_draw_knn_2(self, minDistance=0.5):
"""
RECOGNIZE ALL FACES DETECTED IN A FRAME, RETURN AN IMAGE AND LIST OF NAME
minDistance: less value more strict
Method Sort Minimum
"""
index = 0
new_image = self.image.copy()
# loop all faces
top_names = []
name = None
for left, right, top, bottom in self.boxes:
# draw rectangle around the face in frame
cv2.rectangle(new_image,(left,top),(right,bottom),(0,255,0),2)
if self.faces_aligned == [] or self.faces_aligned is None:
face = self.faces_detected[index]
else:
face = self.faces_aligned[index]
# generate encoding for the face
encoding = self.generate_encoding(face)
# get distance value
dict_index = -1
# match_index = []
match_value = []
match_name = []
for db_encoding in self.data['encodings']:
dict_index = dict_index + 1
# compare face
dist = np.linalg.norm(db_encoding - encoding)
if dist < minDistance:
# match_index.append(dict_index)
match_value.append(dist)
match_name.append(self.data['names'][dict_index])
if match_value != []:
# sort by 3-smallest distance value (ascending)
sorted_index = np.argsort(match_value)
name = match_name[sorted_index[0]]
top_names.append(name)
# write on the displayed window, for each value for this face
yy = 0
for k in sorted_index:
yy = yy + 1
y = top + (yy * 25)
xname = "{}: {:.2f}%".format(match_name[k], (1-match_value[k]) * 100)
cv2.putText(new_image, xname, (right+20, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
if yy == 3:
break
else:
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(new_image, 'Unknown', (right+20, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
index = index + 1
return new_image, top_names
def recognize_faces_and_draw_svm(self, minProba=0.5):
"""
RECOGNIZE ALL FACES DETECTED IN A FRAME
minDistance: less value more strict
"""
index = 0
alpha = 0.6
new_image = self.image.copy()
text1 = None
text2 = None
text3 = None
top_names = []
# loop all faces
for left, right, top, bottom in self.boxes:
cv2.rectangle(new_image,(left,top),(right,bottom),(0,255,0),2)
if self.faces_aligned == [] or self.faces_aligned is None:
face = self.faces_detected[index]
else:
face = self.faces_aligned[index]
# generate encoding for current face
encoding = self.generate_encoding(face)
encoding = encoding.reshape(1, -1)
# match a face with all faces in database
# perform classification to recognize the face
preds = self.svm_model.predict_proba(encoding)[0]
# get the biggest probability
j = np.argmax(preds)
proba = preds[j]
# overlay = new_image.copy()
left_rect = right + 10
top_rect = top
# if the biggest probability is bigger than allowed, then sort 3 biggest probabilities
if proba > minProba:
# get 3 biggest probability SVM
big3_index = heapq.nlargest(3, range(len(preds)), preds.take)
name = self.svm_label.classes_[big3_index[0]]
prob = preds[big3_index[0]]
text1 = "{}: {:.2f}%".format(name, prob * 100)
top_names.append(name)
name = self.svm_label.classes_[big3_index[1]]
prob = preds[big3_index[1]]
text2 = "{}: {:.2f}%".format(name, prob * 100)
top_names.append(name)
name = self.svm_label.classes_[big3_index[2]]
prob = preds[big3_index[2]]
text3 = "{}: {:.2f}%".format(name, prob * 100)
top_names.append(name)
right_rect = left_rect + 150
bottom_rect = top + 75
# cv2.rectangle(overlay, (left_rect, top_rect), (right_rect, bottom_rect), (0, 0, 0), -1)
# new_image = cv2.addWeighted(overlay, alpha, new_image, 1 - alpha, 0)
# *** 2MPX
cv2.putText(new_image, text1, (left_rect+10, top_rect+20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(new_image, text2, (left_rect+10, top_rect+40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(new_image, text3, (left_rect+10, top_rect+60), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
# *** 8MPX
# cv2.putText(new_image, text1, (left_rect+10, top_rect+20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
# cv2.putText(new_image, text2, (left_rect+10, top_rect+40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
# cv2.putText(new_image, text3, (left_rect+10, top_rect+60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
# *** 12MPX
# cv2.putText(new_image, text1, (left_rect+10, top_rect+20), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 4)
# cv2.putText(new_image, text2, (left_rect+10, top_rect+100), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 4)
# cv2.putText(new_image, text3, (left_rect+10, top_rect+180), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 4)
# print(text1)
# print(text2)
# print(text3)
else:
# name = 'Unknown'
text1 = 'Unknown'
# print(text1)
right_rect = left_rect + 100
bottom_rect = top + 35
# cv2.rectangle(overlay, (left_rect, top_rect), (right_rect, bottom_rect), (0, 0, 0), -1)
# new_image = cv2.addWeighted(overlay, alpha, new_image, 1 - alpha, 0)
# *** 2MPX
cv2.putText(new_image, text1, (left_rect+10, top_rect+20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
# cv2.putText(new_image, text1, (left_rect+10, top_rect+20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
top_names.append('Unknown')
# text_title = "threshold {:.2f}%".format(minProba * 100)
# cv2.putText(new_image, text_title, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 0, 0), 1)
index = index + 1
return new_image, top_names
def generate_encoding(self, faceImg):
"""
GENERATE LIST OF 128D VECTOR FROM A FACE IMAGE ARRAY
Note: to increase accuracy, shape and landmark must be consistent !
Please align face image before call this function !
@param-faceImg: a BGR face image (aligned, normalized)
@output-encodings: 128D face vector
"""
boxes = []
h, w, __ = faceImg.shape
# top, right, bottom, left
box = (0, w-1, h-1, 0)
boxes.append(box)
rgb = cv2.cvtColor(faceImg, cv2.COLOR_BGR2RGB) # switch to RGB for DLIB
encodings = face_recognition.face_encodings(rgb, boxes)
return encodings[0]
def set_target_face_width(self, target_face_width):
""" width of final aligned face image to be recognized, in pixel """
self.target_face_width = target_face_width
self.faceAligner.desiredFaceWidth = target_face_width
def set_target_face_height(self, target_face_height):
""" width of final aligned face image to be recognized, in pixel """
self.target_face_height = target_face_height
self.faceAligner.desiredFaceHeight = target_face_height
def __set_target_face_percentage(self, target_face_percent):
""" Zoom In < target_percent_face < Zoom out """
# self.target_face_percent = target_face_percent
# percentage_face = (self.target_face_percent, self.target_face_percent)
self.faceAligner.desiredLeftEye = (target_face_percent, target_face_percent)
def insert_encoding(self, encoding, name):
print('[Info] Loading ' + self.file_encodings + '...')
# loading from pickel file encoding
if self.load_encodings() == True:
# extract names and encodings
knownEncodings = self.data['encodings']
knownNames = self.data['names']
# appned the new encoding
knownEncodings.append(encoding)
knownNames.append(name)
# dump back to file
self.data['encodings'] = knownEncodings
self.data['names'] = knownNames
print('[Info] Serializing encodings...')
data = {'encodings': knownEncodings, 'names': knownNames}
f = open(self.file_encodings, 'wb')
f.write(pickle.dumps(data))
f.close()
print('[Info] Encoding saved on disk.')
else:
print('[Error] Failed to save encoding to disk.')
def encode_images_and_save(self, faceImgPath='./database/normalized'):
"""
@path: imagePaths of normalized face images need to generate and save the encodings
Note that all face images should have been normalized before calling this method
"""
knownEncodings = []
knownNames = []
imagePaths = list(paths.list_images(faceImgPath))
total = 0
for (i, imagePath) in enumerate(imagePaths):
print('[Info] processing image {}/{}'.format(i + 1, len(imagePaths)))
head_tail = os.path.split(imagePath)
filename = head_tail[1]
name, d = filename.split(self.name_separator)
image = cv2.imread(imagePath) # read image
encoding = self.generate_encoding(image)
if encoding is not None or encoding != []:
knownEncodings.append(encoding)
knownNames.append(name)
total += 1
print('[Info] serializing {} encodings...'.format(total))
data = {'encodings': knownEncodings, 'names': knownNames}
f = open(self.file_encodings, 'wb')
f.write(pickle.dumps(data))
f.close()
print('[Info] done.')
def load_encodings(self):
"""
Load encoding from pickle file to class member data
"""
print('[Info] Loading ' + self.file_encodings + '...')
try:
self.data = pickle.loads(open(self.file_encodings, 'rb').read())
except Exception as ex:
print('[Error] load encoding: ' + str(ex))
return False
return True
def save_encodings(self, path='./database/'):
"""
Save encoding to pickle file, from class member data
"""
total = len(self.data['encodings'])
print('[Info] serializing {} encodings...'.format(total))
f = open(self.file_encodings, 'wb')
f.write(pickle.dumps(self.data))
f.close()
print('[Info] done.')
def load_svm_model(self):
print('[Info] Loading ' + self.file_svm_model + '...')
try:
self.svm_model = pickle.loads(open(self.file_svm_model, 'rb').read())
self.svm_label = pickle.loads(open(self.file_svm_label, 'rb').read())
except Exception as ex:
# print('[Error] No such file ' + self.file_svm_model + ' or ' + self.file_svm_label + '...')
print('[Error] ' + str(ex))
def save_faces_detected(self, filename_suffix, outPath='./database/detected/'):
"""
Write detected faces (stored in this class) into disk
filename_suffix: suffix of filename generated, example:
filename_suffix = 'AJI', then filename will be AJI__1, AJI__2, etc.
"""
if self.faces_detected == [] or self.faces_detected is None:
print('[Error] No detected faces stored in this class')
return
import uuid
for (i, face) in enumerate(self.faces_detected):
print('[Info] writing face image {}/{}'.format(i + 1, len(self.faces_detected)))
filename = outPath + filename_suffix + self.name_separator + str(uuid.uuid4()) + '.jpg'
cv2.imwrite(filename, face)
def save_faces_normalized(self, filename_suffix, outPath='./database/normalized/'):
"""
Write normalized (aligned) faces (stored in this class) into disk
filename_suffix: suffix of filename generated, example:
filename_suffix = 'AJI', then filename will be AJI__<some guid random1>, AJI__<some guid random1>, etc.
"""
if self.faces_aligned == [] or self.faces_aligned is None:
print('[Error] No aligned faces stored in this class')
return
import uuid
for (i, face) in enumerate(self.faces_aligned):
print('[Info] writing face image {}/{}'.format(i + 1, len(self.faces_detected)))
filename = outPath + filename_suffix + self.name_separator + str(uuid.uuid4()) + '.jpg'
cv2.imwrite(filename, face)
#@@ UTILITIES @@#
def mass_align_images(self, srcPath='./database/raw/', detectedPath='./database/detected/', alignedPath='./database/normalized/'):
"""
CAUTION: 1 file only contains 1 face need to be detected !!!
The algorithm will only pick the biggest face if multiple faces appear in the picture
Detect faces for all images in srcPath
Write all detected faces into detectedPath
Write all normalized faces into alignedPath
"""
# self.set_target_face_percentage(0.33)
imagePaths = list(paths.list_images(srcPath))
for (i, imagePath) in enumerate(imagePaths):
head_tail = os.path.split(imagePath)
# split path and file
tail = head_tail[1]
# split filename and extension
splits = tail.split('.')
# get filename only without extension
suffix = splits[0]
#
print('[Info] processing image {}/{} ({})'.format(i + 1, len(imagePaths), tail))
img = cv2.imread(imagePath)
self.set_image_to_be_processed(img)
self.fd_method = 'AI'
self.fd_ai_min_score = 0.6
self.detect_faces()
self.align_faces(target_face_percent=0.28) # tight cropped, FaceNet papper
h0 = 0
w0 = 0
face_a = []
face_d = []
# only pick biggest face detected
for (j, detectedImg) in enumerate(self.faces_detected):
h,w,__ = detectedImg.shape
if(h*w > h0*w0):
h0 = h
w0 = w
face_d = detectedImg
face_a = self.faces_aligned[j]
filename_d = detectedPath + suffix + '_detect.jpg'
filename_a = alignedPath + suffix + '_align.jpg'
if face_d != []:
cv2.imwrite(filename_d, face_d)
cv2.imwrite(filename_a, face_a)
def create_database_from_video(self, videopath, num_sample=3):
"""
Create aligned & detected face, write into database folder
"""
from imutils.video import count_frames
imsizewidth = 300
random_number = 20
max_faces = num_sample
nframe = count_frames(videopath)
random_frame = []
for x in range(random_number):
random_frame.append(random.randint(1,nframe))
dface = 0
cap = cv2.VideoCapture(videopath)
for frame_no in random_frame:
_, frame = cap.read()
# _, frame = cap.read()
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_no)
print('Get face(s) on frame: ', int(cap.get(cv2.CAP_PROP_POS_FRAMES)))
__, frame = cap.read()
self.set_image_to_be_processed(frame)
self.detect_faces()
self.align_faces(target_face_percent=0.28)
if self.faces_aligned != []:
self.save_faces_detected(filename_suffix='FROM_VIDEO')
self.save_faces_normalized(filename_suffix='FROM_VIDEO')
dface = dface + 1
if dface >= max_faces:
break
cap.release()
print('Done')
def insert_database_encode_from_video(self, name, videopath, num_sample=3):
"""
Create detected, aligned and encoding face,
write into database folder,
inserted into encodings.pickle
"""
from imutils.video import count_frames
imsizewidth = 300
random_number = 20
max_faces = num_sample
nframe = count_frames(videopath)
random_frame = []
for x in range(random_number):
random_frame.append(random.randint(1,nframe))
dface = 0
cap = cv2.VideoCapture(videopath)
for frame_no in random_frame:
_, frame = cap.read()
# _, frame = cap.read()
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_no)
print('Get face on frame: ', int(cap.get(cv2.CAP_PROP_POS_FRAMES)))
__, frame = cap.read()
self.set_image_to_be_processed(frame)
self.detect_faces()
self.align_faces(target_face_percent=0.25)
if self.faces_aligned != []:
e = self.generate_encoding(self.faces_aligned[0])
self.insert_encoding(e, name)
self.save_faces_detected(filename_suffix=name)
self.save_faces_normalized(filename_suffix=name)
dface = dface + 1
if dface >= max_faces:
break
cap.release()
print('Done')
def remove_encoding(self, pos=-1, name=''):
"""
Remove specific encoding from encodings pickle file
"""
if(self.load_encodings()):
encodings = self.data['encodings']
names = self.data['names']
if pos > -1:
del encodings[pos]
del names[pos]
else:
# remove all occurences of name in names
names = [e for e in names if e not in (name)]
data = {'encodings' : encodings, 'names' : names}
self.data = data
self.save_encodings()
@staticmethod
def rename_files(filename_suffix, name_separator, srcPath):
import uuid
for filename in os.listdir(srcPath):
dst = srcPath + filename_suffix + name_separator + str(uuid.uuid4()) + '.jpg'
src = srcPath + filename
os.rename(src, dst)
@staticmethod
def train_svm_model(file_encodings='./models/encodings.pickle', outpath='./models'):
"""
@file_encodings: pickle file contains encodings where SVM need to be trained to
@outpath: directory for SVM model to be saved (name is svm_model.pickle)
"""
# load the face embeddings
print('[Info] loading face encodings...')
data = pickle.loads(open(file_encodings, 'rb').read())
# encode the labels
print('[Info] encoding labels...')
le = LabelEncoder()
labels = le.fit_transform(data['names'])
# train the model used to accept the 128-d embeddings of the face and
# then produce the actual face recognition
print('[Info] training SVM model...')
recognizer = SVC(C=1.0, kernel='linear', probability=True)
recognizer.fit(data['encodings'], labels)
# write the actual face recognition model to disk
print('[Info] serializing SVM model and label...')
model_filename = outpath + '/svm_model.pickle'
f = open(model_filename, 'wb')
f.write(pickle.dumps(recognizer))
f.close()
# write the label encoder to disk
label_filename = outpath + '/svm_label.pickle'
f = open(label_filename, 'wb')
f.write(pickle.dumps(le))
f.close()
print('[Info] done.')
@staticmethod
def resize(img, scale=0.3):
__, w, __ = img.shape
return imutils.resize(img, width=int(scale*w))
@staticmethod
def draw_rectangle(img, rectangles, linewidth=2):
"""
@img = input image to draw the rectangles
@rectangles = bounding box with coord (left, right, top, bottom)
"""
for left, right, top, bottom in rectangles:
cv2.rectangle(img,(left,top),(right,bottom),(0,255,0),linewidth)
| [
"face_recognition.compare_faces",
"numpy.argmax",
"numpy.argsort",
"tensorflow.ConfigProto",
"numpy.linalg.norm",
"sklearn.svm.SVC",
"cv2.rectangle",
"cv2.CascadeClassifier",
"dlib.rectangle",
"dlib.shape_predictor",
"imutils.face_utils.FaceAligner",
"imutils.paths.list_images",
"random.rand... | [((361, 423), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (395, 423), True, 'import tensorflow as tf\n'), ((2380, 2390), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2388, 2390), True, 'import tensorflow as tf\n'), ((3036, 3081), 'dlib.shape_predictor', 'dlib.shape_predictor', (['self.file_face_landmark'], {}), '(self.file_face_landmark)\n', (3056, 3081), False, 'import dlib\n'), ((3226, 3363), 'imutils.face_utils.FaceAligner', 'FaceAligner', (['predictor'], {'desiredFaceWidth': 'self.target_face_width', 'desiredFaceHeight': 'self.target_face_height', 'desiredLeftEye': '(0.3, 0.3)'}), '(predictor, desiredFaceWidth=self.target_face_width,\n desiredFaceHeight=self.target_face_height, desiredLeftEye=(0.3, 0.3))\n', (3237, 3363), False, 'from imutils.face_utils import FaceAligner\n'), ((3586, 3623), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['self.file_haar'], {}), '(self.file_haar)\n', (3607, 3623), False, 'import cv2\n'), ((4120, 4152), 'numpy.expand_dims', 'np.expand_dims', (['image_np'], {'axis': '(0)'}), '(image_np, axis=0)\n', (4134, 4152), True, 'import numpy as np\n'), ((5413, 5456), 'cv2.cvtColor', 'cv2.cvtColor', (['self.image', 'cv2.COLOR_BGR2RGB'], {}), '(self.image, cv2.COLOR_BGR2RGB)\n', (5425, 5456), False, 'import cv2\n'), ((8572, 8616), 'cv2.cvtColor', 'cv2.cvtColor', (['self.image', 'cv2.COLOR_BGR2GRAY'], {}), '(self.image, cv2.COLOR_BGR2GRAY)\n', (8584, 8616), False, 'import cv2\n'), ((18544, 18584), 'cv2.cvtColor', 'cv2.cvtColor', (['faceImg', 'cv2.COLOR_BGR2RGB'], {}), '(faceImg, cv2.COLOR_BGR2RGB)\n', (18556, 18584), False, 'import cv2\n'), ((18631, 18674), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['rgb', 'boxes'], {}), '(rgb, boxes)\n', (18662, 18674), False, 'import face_recognition\n'), ((27132, 27155), 'imutils.video.count_frames', 'count_frames', (['videopath'], {}), '(videopath)\n', (27144, 27155), False, 'from imutils.video import count_frames\n'), ((27326, 27353), 'cv2.VideoCapture', 'cv2.VideoCapture', (['videopath'], {}), '(videopath)\n', (27342, 27353), False, 'import cv2\n'), ((28527, 28550), 'imutils.video.count_frames', 'count_frames', (['videopath'], {}), '(videopath)\n', (28539, 28550), False, 'from imutils.video import count_frames\n'), ((28721, 28748), 'cv2.VideoCapture', 'cv2.VideoCapture', (['videopath'], {}), '(videopath)\n', (28737, 28748), False, 'import cv2\n'), ((30380, 30399), 'os.listdir', 'os.listdir', (['srcPath'], {}), '(srcPath)\n', (30390, 30399), False, 'import os\n'), ((31113, 31127), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (31125, 31127), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((31378, 31423), 'sklearn.svm.SVC', 'SVC', ([], {'C': '(1.0)', 'kernel': '"""linear"""', 'probability': '(True)'}), "(C=1.0, kernel='linear', probability=True)\n", (31381, 31423), False, 'from sklearn.svm import SVC\n'), ((2468, 2481), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (2479, 2481), True, 'import tensorflow as tf\n'), ((2796, 2812), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (2810, 2812), True, 'import tensorflow as tf\n'), ((2890, 2943), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.detection_graph', 'config': 'config'}), '(graph=self.detection_graph, config=config)\n', (2900, 2943), True, 'import tensorflow as tf\n'), ((5923, 5940), 'numpy.squeeze', 'np.squeeze', (['boxes'], {}), '(boxes)\n', (5933, 5940), True, 'import numpy as np\n'), ((5963, 5981), 'numpy.squeeze', 'np.squeeze', (['scores'], {}), '(scores)\n', (5973, 5981), True, 'import numpy as np\n'), ((8763, 8825), 'dlib.rectangle', 'dlib.rectangle', ([], {'left': 'left', 'right': 'right', 'top': 'top', 'bottom': 'bottom'}), '(left=left, right=right, top=top, bottom=bottom)\n', (8777, 8825), False, 'import dlib\n'), ((9625, 9695), 'cv2.rectangle', 'cv2.rectangle', (['new_image', '(left, top)', '(right, bottom)', '(0, 255, 0)', '(2)'], {}), '(new_image, (left, top), (right, bottom), (0, 255, 0), 2)\n', (9638, 9695), False, 'import cv2\n'), ((10099, 10176), 'face_recognition.compare_faces', 'face_recognition.compare_faces', (["self.data['encodings']", 'encoding', 'minDistance'], {}), "(self.data['encodings'], encoding, minDistance)\n", (10129, 10176), False, 'import face_recognition\n'), ((10613, 10702), 'cv2.putText', 'cv2.putText', (['new_image', 'name', '(left, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(2)', '(255, 0, 0)', '(4)'], {}), '(new_image, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, \n 0, 0), 4)\n', (10624, 10702), False, 'import cv2\n'), ((11280, 11350), 'cv2.rectangle', 'cv2.rectangle', (['new_image', '(left, top)', '(right, bottom)', '(0, 255, 0)', '(2)'], {}), '(new_image, (left, top), (right, bottom), (0, 255, 0), 2)\n', (11293, 11350), False, 'import cv2\n'), ((13660, 13730), 'cv2.rectangle', 'cv2.rectangle', (['new_image', '(left, top)', '(right, bottom)', '(0, 255, 0)', '(2)'], {}), '(new_image, (left, top), (right, bottom), (0, 255, 0), 2)\n', (13673, 13730), False, 'import cv2\n'), ((14304, 14320), 'numpy.argmax', 'np.argmax', (['preds'], {}), '(preds)\n', (14313, 14320), True, 'import numpy as np\n'), ((20922, 20952), 'imutils.paths.list_images', 'paths.list_images', (['faceImgPath'], {}), '(faceImgPath)\n', (20939, 20952), False, 'from imutils import paths\n'), ((21135, 21159), 'os.path.split', 'os.path.split', (['imagePath'], {}), '(imagePath)\n', (21148, 21159), False, 'import os\n'), ((21277, 21298), 'cv2.imread', 'cv2.imread', (['imagePath'], {}), '(imagePath)\n', (21287, 21298), False, 'import cv2\n'), ((21748, 21766), 'pickle.dumps', 'pickle.dumps', (['data'], {}), '(data)\n', (21760, 21766), False, 'import pickle\n'), ((22554, 22577), 'pickle.dumps', 'pickle.dumps', (['self.data'], {}), '(self.data)\n', (22566, 22577), False, 'import pickle\n'), ((23862, 23889), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'face'], {}), '(filename, face)\n', (23873, 23889), False, 'import cv2\n'), ((24717, 24744), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'face'], {}), '(filename, face)\n', (24728, 24744), False, 'import cv2\n'), ((25347, 25373), 'imutils.paths.list_images', 'paths.list_images', (['srcPath'], {}), '(srcPath)\n', (25364, 25373), False, 'from imutils import paths\n'), ((25454, 25478), 'os.path.split', 'os.path.split', (['imagePath'], {}), '(imagePath)\n', (25467, 25478), False, 'import os\n'), ((25840, 25861), 'cv2.imread', 'cv2.imread', (['imagePath'], {}), '(imagePath)\n', (25850, 25861), False, 'import cv2\n'), ((30543, 30562), 'os.rename', 'os.rename', (['src', 'dst'], {}), '(src, dst)\n', (30552, 30562), False, 'import os\n'), ((31707, 31731), 'pickle.dumps', 'pickle.dumps', (['recognizer'], {}), '(recognizer)\n', (31719, 31731), False, 'import pickle\n'), ((31908, 31924), 'pickle.dumps', 'pickle.dumps', (['le'], {}), '(le)\n', (31920, 31924), False, 'import pickle\n'), ((32425, 32497), 'cv2.rectangle', 'cv2.rectangle', (['img', '(left, top)', '(right, bottom)', '(0, 255, 0)', 'linewidth'], {}), '(img, (left, top), (right, bottom), (0, 255, 0), linewidth)\n', (32438, 32497), False, 'import cv2\n'), ((2500, 2545), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['self.file_face_detector', '"""rb"""'], {}), "(self.file_face_detector, 'rb')\n", (2514, 2545), True, 'import tensorflow as tf\n'), ((2682, 2724), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (2701, 2724), True, 'import tensorflow as tf\n'), ((7013, 7073), 'face_recognition.face_locations', 'face_recognition.face_locations', (['self.rgb_image'], {'model': '"""hog"""'}), "(self.rgb_image, model='hog')\n", (7044, 7073), False, 'import face_recognition\n'), ((11947, 11985), 'numpy.linalg.norm', 'np.linalg.norm', (['(db_encoding - encoding)'], {}), '(db_encoding - encoding)\n', (11961, 11985), True, 'import numpy as np\n'), ((12329, 12352), 'numpy.argsort', 'np.argsort', (['match_value'], {}), '(match_value)\n', (12339, 12352), True, 'import numpy as np\n'), ((13027, 13126), 'cv2.putText', 'cv2.putText', (['new_image', '"""Unknown"""', '(right + 20, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(255, 0, 0)', '(2)'], {}), "(new_image, 'Unknown', (right + 20, y), cv2.FONT_HERSHEY_SIMPLEX,\n 1, (255, 0, 0), 2)\n", (13038, 13126), False, 'import cv2\n'), ((15728, 15840), 'cv2.putText', 'cv2.putText', (['new_image', 'text1', '(left_rect + 10, top_rect + 20)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 0, 255)', '(2)'], {}), '(new_image, text1, (left_rect + 10, top_rect + 20), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\n', (15739, 15840), False, 'import cv2\n'), ((15849, 15961), 'cv2.putText', 'cv2.putText', (['new_image', 'text2', '(left_rect + 10, top_rect + 40)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 0, 255)', '(2)'], {}), '(new_image, text2, (left_rect + 10, top_rect + 40), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\n', (15860, 15961), False, 'import cv2\n'), ((15970, 16082), 'cv2.putText', 'cv2.putText', (['new_image', 'text3', '(left_rect + 10, top_rect + 60)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 0, 255)', '(2)'], {}), '(new_image, text3, (left_rect + 10, top_rect + 60), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\n', (15981, 16082), False, 'import cv2\n'), ((17448, 17560), 'cv2.putText', 'cv2.putText', (['new_image', 'text1', '(left_rect + 10, top_rect + 20)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 0, 255)', '(2)'], {}), '(new_image, text1, (left_rect + 10, top_rect + 20), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\n', (17459, 17560), False, 'import cv2\n'), ((20379, 20397), 'pickle.dumps', 'pickle.dumps', (['data'], {}), '(data)\n', (20391, 20397), False, 'import pickle\n'), ((26716, 26747), 'cv2.imwrite', 'cv2.imwrite', (['filename_d', 'face_d'], {}), '(filename_d, face_d)\n', (26727, 26747), False, 'import cv2\n'), ((26766, 26797), 'cv2.imwrite', 'cv2.imwrite', (['filename_a', 'face_a'], {}), '(filename_a, face_a)\n', (26777, 26797), False, 'import cv2\n'), ((27256, 27281), 'random.randint', 'random.randint', (['(1)', 'nframe'], {}), '(1, nframe)\n', (27270, 27281), False, 'import random\n'), ((28651, 28676), 'random.randint', 'random.randint', (['(1)', 'nframe'], {}), '(1, nframe)\n', (28665, 28676), False, 'import random\n'), ((7463, 7507), 'cv2.cvtColor', 'cv2.cvtColor', (['self.image', 'cv2.COLOR_BGR2GRAY'], {}), '(self.image, cv2.COLOR_BGR2GRAY)\n', (7475, 7507), False, 'import cv2\n'), ((12775, 12870), 'cv2.putText', 'cv2.putText', (['new_image', 'xname', '(right + 20, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(255, 0, 0)', '(2)'], {}), '(new_image, xname, (right + 20, y), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (255, 0, 0), 2)\n', (12786, 12870), False, 'import cv2\n'), ((23826, 23838), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (23836, 23838), False, 'import uuid\n'), ((24681, 24693), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (24691, 24693), False, 'import uuid\n'), ((30469, 30481), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (30479, 30481), False, 'import uuid\n')] |
"""Basic functionality"""
import numpy as np
import pennylane as qml
from tqdm.notebook import tqdm
def vqe(circuit, H, dev, optimizer, steps, params, sparse=False, bar=True, diff_method="adjoint"):
"""
Performs the VQE (Variational Quantum Eigensolver) process for a given circuit and Hamiltonian.
Optimizes a function of the form C(theta) = < psi(theta) | H | psi(theta) >.
Args
circuit (function): A quantum function, implementing a series of parametrized gates
H (qml.Hamiltonian, qml.SparseHamiltonian): A Hamiltonian to be optimized
dev (qml.Device): The device on which to perform VQE
optimizer (qml.GradientDescentOptimizer): The optimizer used during VQE
steps (int): The number of steps taken by the VQE procedure
params (Iterable): Initial parameters for VQE optimization
Kwargs
sparse (bool): Indicated whether to simulate using sparse methods
bar (bool): Indicates whether to display a progress bar during optimization
diff_method (str): The differentiation method to use for VQE (Note: Only works for non-sparse VQE)
Returns
(Optimized energy, optimized parameters): (float, Iterable)
"""
diff_method = "parameter-shift" if sparse else diff_method
@qml.qnode(dev, diff_method=diff_method)
def cost_fn(params):
circuit(params)
return qml.expval(H)
nums = tqdm(range(steps)) if bar else range(steps)
for s in nums:
params, energy, grad = optimizer.step_and_cost_and_grad(cost_fn, params)
if np.allclose(grad, 0.0):
break
if bar:
nums.set_description("Energy = {}".format(energy))
return energy, params
def adapt_vqe(H, dev, operator_pool, hf_state, optimizer, max_steps, vqe_steps, bar=False):
"""Performs the original ADAPT-VQE procedure using the sparse VQE method.
See [arXiv:1812.11173v2] for more details.
Args
H (qml.Hamiltonian): A Hamiltonian used to perform VQE
dev (qml.Device): A device on which to perform the simulations
operator_pool (Iterable[function]): A collection of parametrized quantum gates which will make up the operator pool
Each element is of type (float or array) -> (qml.Operation)
hf_state (array): The Hartree-Fock state
optimizer (qml.GradientDescentOptimizer): The optimizer used for VQE
steps (float): The number of times the adaptive loop should be executed
vqe_steps (float): The number of steps that VQE should take, for each adaptive loop
Kwargs
bar (bool): Specifies whether to show a progress bar
Returns
(Iterable[function]): The sequence of quantum operations yielded from ADAPT-VQE
(Iterable[float]): The optimized parameters of the circuit consisting of the outputted quantum operations
"""
optimal_params = []
seq = []
termination = False
counter = 0
while not termination and counter < max_steps:
grads = []
for op in operator_pool:
# Constructs the new circuit
@qml.qnode(dev, diff_method='parameter-shift')
def cost_fn(param):
qml.BasisState(hf_state, wires=dev.wires)
for operation, p in zip(seq, optimal_params):
operation(p)
op(param)
return qml.expval(H)
# Computes the gradient of the circuit
grad_fn = qml.grad(cost_fn)(0.0)
grads.append(grad_fn)
abs_ops = [abs(x) for x in grads]
if np.allclose(abs_ops, 0.0):
termination = True
break
chosen_op = operator_pool[abs_ops.index(max(abs_ops))]
def vqe_circuit(params):
qml.BasisState(hf_state, wires=dev.wires)
for operation, p in zip(seq, params[:len(params) - 1]):
operation(p)
chosen_op(params[len(params) - 1])
energy, optimal_params = vqe(vqe_circuit, H, dev, optimizer, vqe_steps, optimal_params + [0.0], sparse=True, bar=bar)
seq.append(chosen_op)
counter += 1
return seq, optimal_params
def gate_pool(active_electrons, active_orbitals):
"""
Generates a gate pool and single and double excitations
"""
singles, doubles = qml.qchem.excitations(electrons=active_electrons, orbitals=2 * active_orbitals)
pool = []
for s in singles:
pool.append(lambda p, w=s: qml.SingleExcitation(p, wires=w))
for d in doubles:
pool.append(lambda p, w=d: qml.DoubleExcitation(p, wires=w))
return pool
def compute_state(circuit, dev, optimal_params):
"""Returns the statevector yielded from a parametrized circuit
Args
circuit (func): A quantum function representing a circuit
dev (qml.device): The device on which to execute the circuit
optimal_params (Iterable): The parameters to be fed into the circuit
Returns
numpy.array
"""
@qml.qnode(dev)
def ansatz(params):
circuit(params)
return qml.state()
return ansatz(optimal_params) | [
"pennylane.qchem.excitations",
"numpy.allclose",
"pennylane.expval",
"pennylane.BasisState",
"pennylane.DoubleExcitation",
"pennylane.qnode",
"pennylane.grad",
"pennylane.SingleExcitation",
"pennylane.state"
] | [((1283, 1322), 'pennylane.qnode', 'qml.qnode', (['dev'], {'diff_method': 'diff_method'}), '(dev, diff_method=diff_method)\n', (1292, 1322), True, 'import pennylane as qml\n'), ((4310, 4389), 'pennylane.qchem.excitations', 'qml.qchem.excitations', ([], {'electrons': 'active_electrons', 'orbitals': '(2 * active_orbitals)'}), '(electrons=active_electrons, orbitals=2 * active_orbitals)\n', (4331, 4389), True, 'import pennylane as qml\n'), ((4989, 5003), 'pennylane.qnode', 'qml.qnode', (['dev'], {}), '(dev)\n', (4998, 5003), True, 'import pennylane as qml\n'), ((1387, 1400), 'pennylane.expval', 'qml.expval', (['H'], {}), '(H)\n', (1397, 1400), True, 'import pennylane as qml\n'), ((1569, 1591), 'numpy.allclose', 'np.allclose', (['grad', '(0.0)'], {}), '(grad, 0.0)\n', (1580, 1591), True, 'import numpy as np\n'), ((3579, 3604), 'numpy.allclose', 'np.allclose', (['abs_ops', '(0.0)'], {}), '(abs_ops, 0.0)\n', (3590, 3604), True, 'import numpy as np\n'), ((5067, 5078), 'pennylane.state', 'qml.state', ([], {}), '()\n', (5076, 5078), True, 'import pennylane as qml\n'), ((3100, 3145), 'pennylane.qnode', 'qml.qnode', (['dev'], {'diff_method': '"""parameter-shift"""'}), "(dev, diff_method='parameter-shift')\n", (3109, 3145), True, 'import pennylane as qml\n'), ((3764, 3805), 'pennylane.BasisState', 'qml.BasisState', (['hf_state'], {'wires': 'dev.wires'}), '(hf_state, wires=dev.wires)\n', (3778, 3805), True, 'import pennylane as qml\n'), ((3194, 3235), 'pennylane.BasisState', 'qml.BasisState', (['hf_state'], {'wires': 'dev.wires'}), '(hf_state, wires=dev.wires)\n', (3208, 3235), True, 'import pennylane as qml\n'), ((3380, 3393), 'pennylane.expval', 'qml.expval', (['H'], {}), '(H)\n', (3390, 3393), True, 'import pennylane as qml\n'), ((3468, 3485), 'pennylane.grad', 'qml.grad', (['cost_fn'], {}), '(cost_fn)\n', (3476, 3485), True, 'import pennylane as qml\n'), ((4462, 4494), 'pennylane.SingleExcitation', 'qml.SingleExcitation', (['p'], {'wires': 'w'}), '(p, wires=w)\n', (4482, 4494), True, 'import pennylane as qml\n'), ((4553, 4585), 'pennylane.DoubleExcitation', 'qml.DoubleExcitation', (['p'], {'wires': 'w'}), '(p, wires=w)\n', (4573, 4585), True, 'import pennylane as qml\n')] |
#!/usr/bin/env python
import os.path as osp
import sys
sys.path.append(osp.join(osp.dirname(__file__), 'tools'))
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
import caffe
import cv2
import numpy as np
import time, os, sys
import rospy
import cv2
from cv_bridge import CvBridge
#from hector_worldmodel_msgs.msg import ImagePercept
from sensor_msgs.msg import Image, CameraInfo
import object
from object import Object, ObjectDetection
class TopicDetect:
'''
Topic we want to observe for object detections, and the rate to do it at.
TODO: Move detection image publishing in here, and make new topic have similar name to old one
'''
def __init__(self, name, rate):
self.name = name
#self.rate = rospy.Rate(rate)
class ObjectDetector:
'''
This is a ROS node that keeps a faster rcnn model alive and ready to work on the GPU.
It accepts images from specified topics, and publishes subsequent object detections.
'''
detection_threshold = .5 # Minimum softmax score
pubImagePercept = None # Publisher for object detection ImagePercept output
pubObjectDetector = None # Publisher for object detection Image output
imageSubChannels = [] # ROS topics we subscribe to for images to classify
imageMsg = None # Most recent image message from Image subscription topic
CvBridge = None # ROS CVBridge object
objectDefinitions = None # List of Objects
camInfoMsg = None # Temporary place to hold camera info
textThickness = 1
textHeight = 15
textLeftPad = 2
def __init__(self, gpu_id = 0, cfg_file = "experiments/cfgs/msu.yml",
prototxt = "models/msupool/ZF/faster_rcnn_alt_opt/faster_rcnn_test.pt",
caffemodel = "output/faster_rcnn_alt_opt/msupool/ZF_faster_rcnn_final.caffemodel",
imageSubChannels = [TopicDetect("sensors/camF/", 60)]):
'''
@param gpu_id: The ID of the GPU to use for caffe model
@param cfg_file: Path to the config file used for the caffe model
@param prototxt: Path to network structure definition for caffe model
@param caffemodel: Path to caffemodel containing trained network weights
@param imageSubChannels: ROS topics we subscribe to for images to classify
'''
global cfg
#Initialize faster r-cnn
cfg_from_file(self.fixPath(cfg_file))
self.cfg = cfg
self.cfg.GPU_ID = gpu_id
caffe.set_mode_gpu()
caffe.set_device(self.cfg.GPU_ID)
self.net = caffe.Net(self.fixPath(prototxt), self.fixPath(caffemodel), caffe.TEST)
self.net.name = os.path.splitext(os.path.basename(caffemodel))[0]
#Initialize ROS
rospy.init_node("object_detector")
#self.pubImagePercept = rospy.Publisher('worldmodel/image_percept', ImagePercept, queue_size=10)
self.pubObjectDetector = rospy.Publisher('object_detector', Image, queue_size=10)
self.imageSubChannels = imageSubChannels
for sub in self.imageSubChannels:
rospy.Subscriber(sub.name + "image_raw", Image, self.subImageCB)
rospy.Subscriber(sub.name + "camera_info", CameraInfo, self.camInfoCB)
self.CvBridge = CvBridge()
rate = rospy.Rate(10)
while not rospy.is_shutdown():
if self.imageMsg is not None:
image = self.CvBridge.imgmsg_to_cv2(self.imageMsg, "bgr8")
objects = self.detect(image)
self.publishDetections(objects)
self.imageMsg = None
rate.sleep()
def camInfoCB(self, camInfo):
self.camInfoMsg = camInfo
def fixPath(self, path):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), path)
def subImageCB(self, image):
#TODO: multiple image topics
#rospy.loginfo("Detector image updated")
if self.imageMsg is None:
self.imageMsg = image
def publishDetections(self, objects):
'''
for o in objects:
#ImagePercept
msgImagePercept = ImagePercept()
msgImagePercept.header = self.imageMsg.header
msgImagePercept.camera_info = self.camInfoMsg
msgImagePercept.info.class_id = str(o.classID)
msgImagePercept.info.object_id = str(o.obj.objectID)
msgImagePercept.info.name = o.obj.name()
msgImagePercept.x = (o.xMin + o.xMax) / 2 #Center point of object
msgImagePercept.y = (o.yMin + o.yMax) / 2
msgImagePercept.width = (o.xMax - o.xMin) / msgImagePercept.camera_info.width
msgImagePercept.height = (o.yMax - o.yMin) / msgImagePercept.camera_info.height
msgImagePercept.distance = o.distance()
self.pubImagePercept.publish(msgImagePercept)
'''
#Image
image = self.CvBridge.imgmsg_to_cv2(self.imageMsg, "rgb8")
for o in objects:
cv2.rectangle(image, (o.xMin, o.yMin), (o.xMax, o.yMax), (0, 255, 0))
cv2.putText(image, o.obj.name(), (o.xMin + self.textLeftPad, o.yMin + self.textHeight),
cv2.FONT_HERSHEY_SIMPLEX, .5, (0,255,0), self.textThickness)
cv2.putText(image, "{:.1f}%".format(o.confidence*100), (o.xMin + self.textLeftPad, o.yMin + self.textHeight*2),
cv2.FONT_HERSHEY_SIMPLEX, .5, (0,255,0), self.textThickness)
cv2.putText(image, "{:.1f}m".format(o.distance()), (o.xMin + self.textLeftPad, o.yMin + self.textHeight*3),
cv2.FONT_HERSHEY_SIMPLEX, .5, (0,255,0), self.textThickness)
self.pubObjectDetector.publish(self.CvBridge.cv2_to_imgmsg(image, "rgb8"))
# Wrapper for faster-rcnn detections
# Excluded max_per_image - look into it if it becomes a problem
def detect(self, image):
'''
@param image: cv2 image to detect objects in
'''
scores, boxes = im_detect(self.net, image)
objects = []
for j in xrange(1, Object.num_classes):
inds = np.where(scores[:, j] > self.detection_threshold)[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep, :]
#all_boxes[j][i] = cls_dets
#print cls_dets
# Each detection from frcnn is an array containing [xMin, yMin, xMax, yMax, confidence]
for det in cls_dets:
avgColor = self.avgColor(image, det[0], det[1], det[2]-det[0], det[3]-det[1])
objects.append(ObjectDetection(j, det[0], det[1], det[2], det[3], det[4], avgColor))
#objects.append(ObjectDetection(2, 200, 100, 500, 440, .99, (10, 10, 10)))
return objects
def avgColor(self, image, x, y, width, height):
'''
Get average color of middle crop of object
'''
crop = .5
xSub = (width * crop)/2
ySub = (height * crop)/2
return cv2.mean(image[x+xSub:y+ySub, width-xSub:height-ySub])
if __name__ == '__main__':
detector = ObjectDetector()
| [
"cv_bridge.CvBridge",
"caffe.set_mode_gpu",
"rospy.Subscriber",
"os.path.basename",
"os.path.dirname",
"os.path.realpath",
"rospy.Publisher",
"rospy.Rate",
"numpy.hstack",
"caffe.set_device",
"rospy.is_shutdown",
"numpy.where",
"rospy.init_node",
"cv2.rectangle",
"fast_rcnn.nms_wrapper.n... | [((81, 102), 'os.path.dirname', 'osp.dirname', (['__file__'], {}), '(__file__)\n', (92, 102), True, 'import os.path as osp\n'), ((2390, 2410), 'caffe.set_mode_gpu', 'caffe.set_mode_gpu', ([], {}), '()\n', (2408, 2410), False, 'import caffe\n'), ((2413, 2446), 'caffe.set_device', 'caffe.set_device', (['self.cfg.GPU_ID'], {}), '(self.cfg.GPU_ID)\n', (2429, 2446), False, 'import caffe\n'), ((2621, 2655), 'rospy.init_node', 'rospy.init_node', (['"""object_detector"""'], {}), "('object_detector')\n", (2636, 2655), False, 'import rospy\n'), ((2782, 2838), 'rospy.Publisher', 'rospy.Publisher', (['"""object_detector"""', 'Image'], {'queue_size': '(10)'}), "('object_detector', Image, queue_size=10)\n", (2797, 2838), False, 'import rospy\n'), ((3079, 3089), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (3087, 3089), False, 'from cv_bridge import CvBridge\n'), ((3100, 3114), 'rospy.Rate', 'rospy.Rate', (['(10)'], {}), '(10)\n', (3110, 3114), False, 'import rospy\n'), ((5385, 5411), 'fast_rcnn.test.im_detect', 'im_detect', (['self.net', 'image'], {}), '(self.net, image)\n', (5394, 5411), False, 'from fast_rcnn.test import im_detect\n'), ((6390, 6452), 'cv2.mean', 'cv2.mean', (['image[x + xSub:y + ySub, width - xSub:height - ySub]'], {}), '(image[x + xSub:y + ySub, width - xSub:height - ySub])\n', (6398, 6452), False, 'import cv2\n'), ((2921, 2985), 'rospy.Subscriber', 'rospy.Subscriber', (["(sub.name + 'image_raw')", 'Image', 'self.subImageCB'], {}), "(sub.name + 'image_raw', Image, self.subImageCB)\n", (2937, 2985), False, 'import rospy\n'), ((2989, 3059), 'rospy.Subscriber', 'rospy.Subscriber', (["(sub.name + 'camera_info')", 'CameraInfo', 'self.camInfoCB'], {}), "(sub.name + 'camera_info', CameraInfo, self.camInfoCB)\n", (3005, 3059), False, 'import rospy\n'), ((3127, 3146), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (3144, 3146), False, 'import rospy\n'), ((4514, 4583), 'cv2.rectangle', 'cv2.rectangle', (['image', '(o.xMin, o.yMin)', '(o.xMax, o.yMax)', '(0, 255, 0)'], {}), '(image, (o.xMin, o.yMin), (o.xMax, o.yMax), (0, 255, 0))\n', (4527, 4583), False, 'import cv2\n'), ((5711, 5738), 'fast_rcnn.nms_wrapper.nms', 'nms', (['cls_dets', 'cfg.TEST.NMS'], {}), '(cls_dets, cfg.TEST.NMS)\n', (5714, 5738), False, 'from fast_rcnn.nms_wrapper import nms\n'), ((2567, 2595), 'os.path.basename', 'os.path.basename', (['caffemodel'], {}), '(caffemodel)\n', (2583, 2595), False, 'import time, os, sys\n'), ((3479, 3505), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (3495, 3505), False, 'import time, os, sys\n'), ((5481, 5530), 'numpy.where', 'np.where', (['(scores[:, j] > self.detection_threshold)'], {}), '(scores[:, j] > self.detection_threshold)\n', (5489, 5530), True, 'import numpy as np\n'), ((5620, 5669), 'numpy.hstack', 'np.hstack', (['(cls_boxes, cls_scores[:, np.newaxis])'], {}), '((cls_boxes, cls_scores[:, np.newaxis]))\n', (5629, 5669), True, 'import numpy as np\n'), ((6041, 6109), 'object.ObjectDetection', 'ObjectDetection', (['j', 'det[0]', 'det[1]', 'det[2]', 'det[3]', 'det[4]', 'avgColor'], {}), '(j, det[0], det[1], det[2], det[3], det[4], avgColor)\n', (6056, 6109), False, 'from object import Object, ObjectDetection\n')] |
from collections.abc import MutableSequence
import warnings
import io
import copy
import numpy as np
import pandas as pd
from . import endf
import openmc.checkvalue as cv
from .resonance import Resonances
def _add_file2_contributions(file32params, file2params):
"""Function for aiding in adding resonance parameters from File 2 that are
not always present in File 32. Uses already imported resonance data.
Paramaters
----------
file32params : pandas.Dataframe
Incomplete set of resonance parameters contained in File 32.
file2params : pandas.Dataframe
Resonance parameters from File 2. Ordered by energy.
Returns
-------
parameters : pandas.Dataframe
Complete set of parameters ordered by L-values and then energy
"""
# Use l-values and competitiveWidth from File 2 data
# Re-sort File 2 by energy to match File 32
file2params = file2params.sort_values(by=['energy'])
file2params.reset_index(drop=True, inplace=True)
# Sort File 32 parameters by energy as well (maintaining index)
file32params.sort_values(by=['energy'], inplace=True)
# Add in values (.values converts to array first to ignore index)
file32params['L'] = file2params['L'].values
if 'competitiveWidth' in file2params.columns:
file32params['competitiveWidth'] = file2params['competitiveWidth'].values
# Resort to File 32 order (by L then by E) for use with covariance
file32params.sort_index(inplace=True)
return file32params
class ResonanceCovariances(Resonances):
"""Resolved resonance covariance data
Parameters
----------
ranges : list of openmc.data.ResonanceCovarianceRange
Distinct energy ranges for resonance data
Attributes
----------
ranges : list of openmc.data.ResonanceCovarianceRange
Distinct energy ranges for resonance data
"""
@property
def ranges(self):
return self._ranges
@ranges.setter
def ranges(self, ranges):
cv.check_type('resonance ranges', ranges, MutableSequence)
self._ranges = cv.CheckedList(ResonanceCovarianceRange,
'resonance range', ranges)
@classmethod
def from_endf(cls, ev, resonances):
"""Generate resonance covariance data from an ENDF evaluation.
Parameters
----------
ev : openmc.data.endf.Evaluation
ENDF evaluation
resonances : openmc.data.Resonance object
openmc.data.Resonanance object generated from the same evaluation
used to import values not contained in File 32
Returns
-------
openmc.data.ResonanceCovariances
Resonance covariance data
"""
file_obj = io.StringIO(ev.section[32, 151])
# Determine whether discrete or continuous representation
items = endf.get_head_record(file_obj)
n_isotope = items[4] # Number of isotopes
ranges = []
for iso in range(n_isotope):
items = endf.get_cont_record(file_obj)
abundance = items[1]
fission_widths = (items[3] == 1) # Flag for fission widths
n_ranges = items[4] # Number of resonance energy ranges
for j in range(n_ranges):
items = endf.get_cont_record(file_obj)
# Unresolved flags - 0: only scattering radius given
# 1: resolved parameters given
# 2: unresolved parameters given
unresolved_flag = items[2]
formalism = items[3] # resonance formalism
# Throw error for unsupported formalisms
if formalism in [0, 7]:
error = 'LRF='+str(formalism)+' covariance not supported '\
'for this formalism'
raise NotImplementedError(error)
if unresolved_flag in (0, 1):
# Resolved resonance region
resonance = resonances.ranges[j]
erange = _FORMALISMS[formalism].from_endf(ev, file_obj,
items, resonance)
ranges.append(erange)
elif unresolved_flag == 2:
warn = 'Unresolved resonance not supported. Covariance '\
'values for the unresolved region not imported.'
warnings.warn(warn)
return cls(ranges)
class ResonanceCovarianceRange:
"""Resonace covariance range. Base class for different formalisms.
Parameters
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
Attributes
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
parameters : pandas.DataFrame
Resonance parameters
covariance : numpy.array
The covariance matrix contained within the ENDF evaluation
lcomp : int
Flag indicating format of the covariance matrix within the ENDF file
file2res : openmc.data.ResonanceRange object
Corresponding resonance range with File 2 data.
mpar : int
Number of parameters in covariance matrix for each individual resonance
formalism : str
String descriptor of formalism
"""
def __init__(self, energy_min, energy_max):
self.energy_min = energy_min
self.energy_max = energy_max
def subset(self, parameter_str, bounds):
"""Produce a subset of resonance parameters and the corresponding
covariance matrix to an IncidentNeutron object.
Parameters
----------
parameter_str : str
parameter to be discriminated
(i.e. 'energy', 'captureWidth', 'fissionWidthA'...)
bounds : np.array
[low numerical bound, high numerical bound]
Returns
-------
res_cov_range : openmc.data.ResonanceCovarianceRange
ResonanceCovarianceRange object that contains a subset of the
covariance matrix (upper triangular) as well as a subset parameters
within self.file2params
"""
# Copy range and prevent change of original
res_cov_range = copy.deepcopy(self)
parameters = self.file2res.parameters
cov = res_cov_range.covariance
mpar = res_cov_range.mpar
# Create mask
mask1 = parameters[parameter_str] >= bounds[0]
mask2 = parameters[parameter_str] <= bounds[1]
mask = mask1 & mask2
res_cov_range.parameters = parameters[mask]
indices = res_cov_range.parameters.index.values
# Build subset of covariance
sub_cov_dim = len(indices)*mpar
cov_subset_vals = []
for index1 in indices:
for i in range(mpar):
for index2 in indices:
for j in range(mpar):
if index2*mpar+j >= index1*mpar+i:
cov_subset_vals.append(cov[index1*mpar+i,
index2*mpar+j])
cov_subset = np.zeros([sub_cov_dim, sub_cov_dim])
tri_indices = np.triu_indices(sub_cov_dim)
cov_subset[tri_indices] = cov_subset_vals
res_cov_range.file2res.parameters = parameters[mask]
res_cov_range.covariance = cov_subset
return res_cov_range
def sample(self, n_samples):
"""Sample resonance parameters based on the covariances provided
within an ENDF evaluation.
Parameters
----------
n_samples : int
The number of samples to produce
Returns
-------
samples : list of openmc.data.ResonanceCovarianceRange objects
List of samples size `n_samples`
"""
warn_str = 'Sampling routine does not guarantee positive values for '\
'parameters. This can lead to undefined behavior in the '\
'reconstruction routine.'
warnings.warn(warn_str)
parameters = self.parameters
cov = self.covariance
# Symmetrizing covariance matrix
cov = cov + cov.T - np.diag(cov.diagonal())
formalism = self.formalism
mpar = self.mpar
samples = []
# Handling MLBW/SLBW sampling
if formalism == 'mlbw' or formalism == 'slbw':
params = ['energy', 'neutronWidth', 'captureWidth', 'fissionWidth',
'competitiveWidth']
param_list = params[:mpar]
mean_array = parameters[param_list].values
mean = mean_array.flatten()
par_samples = np.random.multivariate_normal(mean, cov,
size=n_samples)
spin = parameters['J'].values
l_value = parameters['L'].values
for sample in par_samples:
energy = sample[0::mpar]
gn = sample[1::mpar]
gg = sample[2::mpar]
gf = sample[3::mpar] if mpar > 3 else parameters['fissionWidth'].values
gx = sample[4::mpar] if mpar > 4 else parameters['competitiveWidth'].values
gt = gn + gg + gf + gx
records = []
for j, E in enumerate(energy):
records.append([energy[j], l_value[j], spin[j], gt[j],
gn[j], gg[j], gf[j], gx[j]])
columns = ['energy', 'L', 'J', 'totalWidth', 'neutronWidth',
'captureWidth', 'fissionWidth', 'competitiveWidth']
sample_params = pd.DataFrame.from_records(records,
columns=columns)
# Copy ResonanceRange object
res_range = copy.copy(self.file2res)
res_range.parameters = sample_params
samples.append(res_range)
# Handling RM sampling
elif formalism == 'rm':
params = ['energy', 'neutronWidth', 'captureWidth',
'fissionWidthA', 'fissionWidthB']
param_list = params[:mpar]
mean_array = parameters[param_list].values
mean = mean_array.flatten()
par_samples = np.random.multivariate_normal(mean, cov,
size=n_samples)
spin = parameters['J'].values
l_value = parameters['L'].values
for sample in par_samples:
energy = sample[0::mpar]
gn = sample[1::mpar]
gg = sample[2::mpar]
gfa = sample[3::mpar] if mpar > 3 else parameters['fissionWidthA'].values
gfb = sample[4::mpar] if mpar > 3 else parameters['fissionWidthB'].values
records = []
for j, E in enumerate(energy):
records.append([energy[j], l_value[j], spin[j], gn[j],
gg[j], gfa[j], gfb[j]])
columns = ['energy', 'L', 'J', 'neutronWidth',
'captureWidth', 'fissionWidthA', 'fissionWidthB']
sample_params = pd.DataFrame.from_records(records,
columns=columns)
# Copy ResonanceRange object
res_range = copy.copy(self.file2res)
res_range.parameters = sample_params
samples.append(res_range)
return samples
class MultiLevelBreitWignerCovariance(ResonanceCovarianceRange):
"""Multi-level Breit-Wigner resolved resonance formalism covariance data.
Parameters
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
Attributes
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
parameters : pandas.DataFrame
Resonance parameters
covariance : numpy.array
The covariance matrix contained within the ENDF evaluation
mpar : int
Number of parameters in covariance matrix for each individual resonance
lcomp : int
Flag indicating format of the covariance matrix within the ENDF file
file2res : openmc.data.ResonanceRange object
Corresponding resonance range with File 2 data.
formalism : str
String descriptor of formalism
"""
def __init__(self, energy_min, energy_max, parameters, covariance, mpar,
lcomp, file2res):
super().__init__(energy_min, energy_max)
self.parameters = parameters
self.covariance = covariance
self.mpar = mpar
self.lcomp = lcomp
self.file2res = copy.copy(file2res)
self.formalism = 'mlbw'
@classmethod
def from_endf(cls, ev, file_obj, items, resonance):
"""Create MLBW covariance data from an ENDF evaluation.
Parameters
----------
ev : openmc.data.endf.Evaluation
ENDF evaluation
file_obj : file-like object
ENDF file positioned at the second record of a resonance range
subsection in MF=32, MT=151
items : list
Items from the CONT record at the start of the resonance range
subsection
resonance : openmc.data.ResonanceRange object
Corresponding resonance range with File 2 data.
Returns
-------
openmc.data.MultiLevelBreitWignerCovariance
Multi-level Breit-Wigner resonance covariance parameters
"""
# Read energy-dependent scattering radius if present
energy_min, energy_max = items[0:2]
nro, naps = items[4:6]
if nro != 0:
params, ape = endf.get_tab1_record(file_obj)
# Other scatter radius parameters
items = endf.get_cont_record(file_obj)
target_spin = items[0]
lcomp = items[3] # Flag for compatibility 0, 1, 2 - 2 is compact form
nls = items[4] # number of l-values
# Build covariance matrix for General Resolved Resonance Formats
if lcomp == 1:
items = endf.get_cont_record(file_obj)
# Number of short range type resonance covariances
num_short_range = items[4]
# Number of long range type resonance covariances
num_long_range = items[5]
# Read resonance widths, J values, etc
records = []
for i in range(num_short_range):
items, values = endf.get_list_record(file_obj)
mpar = items[2]
num_res = items[5]
num_par_vals = num_res*6
res_values = values[:num_par_vals]
cov_values = values[num_par_vals:]
energy = res_values[0::6]
spin = res_values[1::6]
gt = res_values[2::6]
gn = res_values[3::6]
gg = res_values[4::6]
gf = res_values[5::6]
for i, E in enumerate(energy):
records.append([energy[i], spin[i], gt[i], gn[i],
gg[i], gf[i]])
# Build the upper-triangular covariance matrix
cov_dim = mpar*num_res
cov = np.zeros([cov_dim, cov_dim])
indices = np.triu_indices(cov_dim)
cov[indices] = cov_values
# Compact format - Resonances and individual uncertainties followed by
# compact correlations
elif lcomp == 2:
items, values = endf.get_list_record(file_obj)
mean = items
num_res = items[5]
energy = values[0::12]
spin = values[1::12]
gt = values[2::12]
gn = values[3::12]
gg = values[4::12]
gf = values[5::12]
par_unc = []
for i in range(num_res):
res_unc = values[i*12+6 : i*12+12]
# Delete 0 values (not provided, no fission width)
# DAJ/DGT always zero, DGF sometimes nonzero [1, 2, 5]
res_unc_nonzero = []
for j in range(6):
if j in [1, 2, 5] and res_unc[j] != 0.0:
res_unc_nonzero.append(res_unc[j])
elif j in [0, 3, 4]:
res_unc_nonzero.append(res_unc[j])
par_unc.extend(res_unc_nonzero)
records = []
for i, E in enumerate(energy):
records.append([energy[i], spin[i], gt[i], gn[i],
gg[i], gf[i]])
corr = endf.get_intg_record(file_obj)
cov = np.diag(par_unc).dot(corr).dot(np.diag(par_unc))
# Compatible resolved resonance format
elif lcomp == 0:
cov = np.zeros([4, 4])
records = []
cov_index = 0
for i in range(nls):
items, values = endf.get_list_record(file_obj)
num_res = items[5]
for j in range(num_res):
one_res = values[18*j:18*(j+1)]
res_values = one_res[:6]
cov_values = one_res[6:]
records.append(list(res_values))
# Populate the coviariance matrix for this resonance
# There are no covariances between resonances in lcomp=0
cov[cov_index, cov_index] = cov_values[0]
cov[cov_index+1, cov_index+1 : cov_index+2] = cov_values[1:2]
cov[cov_index+1, cov_index+3] = cov_values[4]
cov[cov_index+2, cov_index+2] = cov_values[3]
cov[cov_index+2, cov_index+3] = cov_values[5]
cov[cov_index+3, cov_index+3] = cov_values[6]
cov_index += 4
if j < num_res-1: # Pad matrix for additional values
cov = np.pad(cov, ((0, 4), (0, 4)), 'constant',
constant_values=0)
# Create pandas DataFrame with resonance data, currently
# redundant with data.IncidentNeutron.resonance
columns = ['energy', 'J', 'totalWidth', 'neutronWidth',
'captureWidth', 'fissionWidth']
parameters = pd.DataFrame.from_records(records, columns=columns)
# Determine mpar (number of parameters for each resonance in
# covariance matrix)
nparams, params = parameters.shape
covsize = cov.shape[0]
mpar = int(covsize/nparams)
# Add parameters from File 2
parameters = _add_file2_contributions(parameters,
resonance.parameters)
# Create instance of class
mlbw = cls(energy_min, energy_max, parameters, cov, mpar, lcomp,
resonance)
return mlbw
class SingleLevelBreitWignerCovariance(MultiLevelBreitWignerCovariance):
"""Single-level Breit-Wigner resolved resonance formalism covariance data.
Single-level Breit-Wigner resolved resonance data is is identified by LRF=1
in the ENDF-6 format.
Parameters
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
Attributes
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
parameters : pandas.DataFrame
Resonance parameters
covariance : numpy.array
The covariance matrix contained within the ENDF evaluation
mpar : int
Number of parameters in covariance matrix for each individual resonance
formalism : str
String descriptor of formalism
lcomp : int
Flag indicating format of the covariance matrix within the ENDF file
file2res : openmc.data.ResonanceRange object
Corresponding resonance range with File 2 data.
"""
def __init__(self, energy_min, energy_max, parameters, covariance, mpar,
lcomp, file2res):
super().__init__(energy_min, energy_max, parameters, covariance, mpar,
lcomp, file2res)
self.formalism = 'slbw'
class ReichMooreCovariance(ResonanceCovarianceRange):
"""Reich-Moore resolved resonance formalism covariance data.
Reich-Moore resolved resonance data is identified by LRF=3 in the ENDF-6
format.
Parameters
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
Attributes
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
parameters : pandas.DataFrame
Resonance parameters
covariance : numpy.array
The covariance matrix contained within the ENDF evaluation
lcomp : int
Flag indicating format of the covariance matrix within the ENDF file
mpar : int
Number of parameters in covariance matrix for each individual resonance
file2res : openmc.data.ResonanceRange object
Corresponding resonance range with File 2 data.
formalism : str
String descriptor of formalism
"""
def __init__(self, energy_min, energy_max, parameters, covariance, mpar,
lcomp, file2res):
super().__init__(energy_min, energy_max)
self.parameters = parameters
self.covariance = covariance
self.mpar = mpar
self.lcomp = lcomp
self.file2res = copy.copy(file2res)
self.formalism = 'rm'
@classmethod
def from_endf(cls, ev, file_obj, items, resonance):
"""Create Reich-Moore resonance covariance data from an ENDF
evaluation. Includes the resonance parameters contained separately in
File 32.
Parameters
----------
ev : openmc.data.endf.Evaluation
ENDF evaluation
file_obj : file-like object
ENDF file positioned at the second record of a resonance range
subsection in MF=2, MT=151
items : list
Items from the CONT record at the start of the resonance range
subsection
resonance : openmc.data.Resonance object
openmc.data.Resonanance object generated from the same evaluation
used to import values not contained in File 32
Returns
-------
openmc.data.ReichMooreCovariance
Reich-Moore resonance covariance parameters
"""
# Read energy-dependent scattering radius if present
energy_min, energy_max = items[0:2]
nro, naps = items[4:6]
if nro != 0:
params, ape = endf.get_tab1_record(file_obj)
# Other scatter radius parameters
items = endf.get_cont_record(file_obj)
target_spin = items[0]
lcomp = items[3] # Flag for compatibility 0, 1, 2 - 2 is compact form
nls = items[4] # Number of l-values
# Build covariance matrix for General Resolved Resonance Formats
if lcomp == 1:
items = endf.get_cont_record(file_obj)
# Number of short range type resonance covariances
num_short_range = items[4]
# Number of long range type resonance covariances
num_long_range = items[5]
# Read resonance widths, J values, etc
channel_radius = {}
scattering_radius = {}
records = []
for i in range(num_short_range):
items, values = endf.get_list_record(file_obj)
mpar = items[2]
num_res = items[5]
num_par_vals = num_res*6
res_values = values[:num_par_vals]
cov_values = values[num_par_vals:]
energy = res_values[0::6]
spin = res_values[1::6]
gn = res_values[2::6]
gg = res_values[3::6]
gfa = res_values[4::6]
gfb = res_values[5::6]
for i, E in enumerate(energy):
records.append([energy[i], spin[i], gn[i], gg[i],
gfa[i], gfb[i]])
# Build the upper-triangular covariance matrix
cov_dim = mpar*num_res
cov = np.zeros([cov_dim, cov_dim])
indices = np.triu_indices(cov_dim)
cov[indices] = cov_values
# Compact format - Resonances and individual uncertainties followed by
# compact correlations
elif lcomp == 2:
items, values = endf.get_list_record(file_obj)
num_res = items[5]
energy = values[0::12]
spin = values[1::12]
gn = values[2::12]
gg = values[3::12]
gfa = values[4::12]
gfb = values[5::12]
par_unc = []
for i in range(num_res):
res_unc = values[i*12+6 : i*12+12]
# Delete 0 values (not provided in evaluation)
res_unc = [x for x in res_unc if x != 0.0]
par_unc.extend(res_unc)
records = []
for i, E in enumerate(energy):
records.append([energy[i], spin[i], gn[i], gg[i],
gfa[i], gfb[i]])
corr = endf.get_intg_record(file_obj)
cov = np.diag(par_unc).dot(corr).dot(np.diag(par_unc))
# Create pandas DataFrame with resonacne data
columns = ['energy', 'J', 'neutronWidth', 'captureWidth',
'fissionWidthA', 'fissionWidthB']
parameters = pd.DataFrame.from_records(records, columns=columns)
# Determine mpar (number of parameters for each resonance in
# covariance matrix)
nparams, params = parameters.shape
covsize = cov.shape[0]
mpar = int(covsize/nparams)
# Add parameters from File 2
parameters = _add_file2_contributions(parameters,
resonance.parameters)
# Create instance of ReichMooreCovariance
rmc = cls(energy_min, energy_max, parameters, cov, mpar, lcomp,
resonance)
return rmc
_FORMALISMS = {
0: ResonanceCovarianceRange,
1: SingleLevelBreitWignerCovariance,
2: MultiLevelBreitWignerCovariance,
3: ReichMooreCovariance
# 7: RMatrixLimitedCovariance
}
| [
"numpy.pad",
"copy.deepcopy",
"io.StringIO",
"openmc.checkvalue.check_type",
"numpy.zeros",
"numpy.triu_indices",
"copy.copy",
"openmc.checkvalue.CheckedList",
"numpy.random.multivariate_normal",
"pandas.DataFrame.from_records",
"warnings.warn",
"numpy.diag"
] | [((2011, 2069), 'openmc.checkvalue.check_type', 'cv.check_type', (['"""resonance ranges"""', 'ranges', 'MutableSequence'], {}), "('resonance ranges', ranges, MutableSequence)\n", (2024, 2069), True, 'import openmc.checkvalue as cv\n'), ((2093, 2160), 'openmc.checkvalue.CheckedList', 'cv.CheckedList', (['ResonanceCovarianceRange', '"""resonance range"""', 'ranges'], {}), "(ResonanceCovarianceRange, 'resonance range', ranges)\n", (2107, 2160), True, 'import openmc.checkvalue as cv\n'), ((2767, 2799), 'io.StringIO', 'io.StringIO', (['ev.section[32, 151]'], {}), '(ev.section[32, 151])\n', (2778, 2799), False, 'import io\n'), ((6488, 6507), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (6501, 6507), False, 'import copy\n'), ((7367, 7403), 'numpy.zeros', 'np.zeros', (['[sub_cov_dim, sub_cov_dim]'], {}), '([sub_cov_dim, sub_cov_dim])\n', (7375, 7403), True, 'import numpy as np\n'), ((7426, 7454), 'numpy.triu_indices', 'np.triu_indices', (['sub_cov_dim'], {}), '(sub_cov_dim)\n', (7441, 7454), True, 'import numpy as np\n'), ((8264, 8287), 'warnings.warn', 'warnings.warn', (['warn_str'], {}), '(warn_str)\n', (8277, 8287), False, 'import warnings\n'), ((13150, 13169), 'copy.copy', 'copy.copy', (['file2res'], {}), '(file2res)\n', (13159, 13169), False, 'import copy\n'), ((18770, 18821), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['records'], {'columns': 'columns'}), '(records, columns=columns)\n', (18795, 18821), True, 'import pandas as pd\n'), ((22234, 22253), 'copy.copy', 'copy.copy', (['file2res'], {}), '(file2res)\n', (22243, 22253), False, 'import copy\n'), ((26345, 26396), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['records'], {'columns': 'columns'}), '(records, columns=columns)\n', (26370, 26396), True, 'import pandas as pd\n'), ((8906, 8962), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov'], {'size': 'n_samples'}), '(mean, cov, size=n_samples)\n', (8935, 8962), True, 'import numpy as np\n'), ((9884, 9935), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['records'], {'columns': 'columns'}), '(records, columns=columns)\n', (9909, 9935), True, 'import pandas as pd\n'), ((10067, 10091), 'copy.copy', 'copy.copy', (['self.file2res'], {}), '(self.file2res)\n', (10076, 10091), False, 'import copy\n'), ((10531, 10587), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov'], {'size': 'n_samples'}), '(mean, cov, size=n_samples)\n', (10560, 10587), True, 'import numpy as np\n'), ((15733, 15761), 'numpy.zeros', 'np.zeros', (['[cov_dim, cov_dim]'], {}), '([cov_dim, cov_dim])\n', (15741, 15761), True, 'import numpy as np\n'), ((15788, 15812), 'numpy.triu_indices', 'np.triu_indices', (['cov_dim'], {}), '(cov_dim)\n', (15803, 15812), True, 'import numpy as np\n'), ((25031, 25059), 'numpy.zeros', 'np.zeros', (['[cov_dim, cov_dim]'], {}), '([cov_dim, cov_dim])\n', (25039, 25059), True, 'import numpy as np\n'), ((25086, 25110), 'numpy.triu_indices', 'np.triu_indices', (['cov_dim'], {}), '(cov_dim)\n', (25101, 25110), True, 'import numpy as np\n'), ((11449, 11500), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['records'], {'columns': 'columns'}), '(records, columns=columns)\n', (11474, 11500), True, 'import pandas as pd\n'), ((11632, 11656), 'copy.copy', 'copy.copy', (['self.file2res'], {}), '(self.file2res)\n', (11641, 11656), False, 'import copy\n'), ((17171, 17187), 'numpy.diag', 'np.diag', (['par_unc'], {}), '(par_unc)\n', (17178, 17187), True, 'import numpy as np\n'), ((17280, 17296), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (17288, 17296), True, 'import numpy as np\n'), ((26132, 26148), 'numpy.diag', 'np.diag', (['par_unc'], {}), '(par_unc)\n', (26139, 26148), True, 'import numpy as np\n'), ((4492, 4511), 'warnings.warn', 'warnings.warn', (['warn'], {}), '(warn)\n', (4505, 4511), False, 'import warnings\n'), ((17140, 17156), 'numpy.diag', 'np.diag', (['par_unc'], {}), '(par_unc)\n', (17147, 17156), True, 'import numpy as np\n'), ((18414, 18474), 'numpy.pad', 'np.pad', (['cov', '((0, 4), (0, 4))', '"""constant"""'], {'constant_values': '(0)'}), "(cov, ((0, 4), (0, 4)), 'constant', constant_values=0)\n", (18420, 18474), True, 'import numpy as np\n'), ((26101, 26117), 'numpy.diag', 'np.diag', (['par_unc'], {}), '(par_unc)\n', (26108, 26117), True, 'import numpy as np\n')] |
"""
"""
import os
import sys
#import tensorflow as tf
from keras.models import Model, Sequential, load_model
from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge
from keras.utils.vis_utils import plot_model
from keras.layers.merge import concatenate
from keras.optimizers import Adam
from keras import metrics
from keras.regularizers import l2
import matplotlib
if not "DISPLAY" in os.environ:
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import numpy as np
import ipdb
import data_server
import trace
trace.trace_start("trace.html")
#def Nvidia
def nvidia_model(in_shape):
#model = Sequential()
# https://stackoverflow.com/questions/41925765/keras-cropping2d-changes-color-channel
# https://stackoverflow.com/questions/34716454/where-do-i-call-the-batchnormalization-function-in-keras
# https://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf
#input_img = Input(shape=in_shape)
model = Sequential()
model.add(Cropping2D(cropping=((70, 25), (0,0)), input_shape=in_shape))
model.add(Lambda(lambda x: x / 255.0 - 0.5))
#model.add(Conv2D(3, (5, 5), activation='relu'))
model.add(Conv2D(24, (5, 5)))
model.add(BatchNormalization())
model.add(Activation(activation='relu'))
model.add(Conv2D(36, (5, 5)))
model.add(BatchNormalization())
model.add(Activation(activation='relu'))
model.add(Conv2D(48, (3, 3)))
model.add(BatchNormalization())
model.add(Activation(activation='relu'))
model.add(Conv2D(64, (3, 3)))
model.add(BatchNormalization())
model.add(Activation(activation='relu'))
model.add(Conv2D(64, (3, 3)))
model.add(BatchNormalization())
model.add(Activation(activation='relu'))
model.add(Flatten())
#model.add(Dense(1164, activation='relu', W_regularizer=l2(1e-3)))
model.add(Dense(100, activation='relu', kernel_regularizer=l2(1e-3)))
model.add(Dense(50, activation='relu', kernel_regularizer=l2(1e-3)))
model.add(Dense(10, activation='relu', kernel_regularizer=l2(1e-3)))
model.add(Dense(1))
print(model.summary())
model.compile(loss="mse", optimizer="adam")
params = {}
params["EPOCHS"] = 10
params["BATCH_SIZE"] = 64
return model, params
def work_model(in_shape, show=False):
model = Sequential()
model.add(Lambda(lambda x: x/255 - 1.0, input_shape=in_shape, name="Normalization"))
model.add(Conv2D(3, (1, 1), activation='relu', name="A_parametric_Colorspace_transformation"))
model.add(Cropping2D(cropping=(data_server.PARAMS['crop'], (0,0)), input_shape=in_shape, name="ROI_crop"))
name = "block"
for i in range(3):
model.add(Conv2D(8, (3, 3), name='block_{}_3x3conv2D_1'.format(i)))
model.add(BatchNormalization(name='block_{}_BN_1'.format(i)))
model.add(Activation(activation='relu', name='block_{}_ReLU_1'.format(i)))
model.add(Conv2D(8, (3, 3), name='block_{}_3x3conv2D_2'.format(i)))
model.add(BatchNormalization(name='block_{}_BN_2'.format(i)))
model.add(Activation(activation='relu', name='block_{}_ReLU_2'.format(i)))
model.add(Conv2D(8, (1, 1), activation='relu', name='block_{}_Conv2D_ReLU'.format(i)))
model.add(MaxPooling2D(pool_size=(2,2), name='block_{}_maxpool2D'.format(i)))
model.add(Flatten(name='FC_flatten'))
model.add(Dense(64, activation='relu', kernel_regularizer=l2(1e-3), name='FC_Dense_1'))
model.add(Dense(32, activation='relu', kernel_regularizer=l2(1e-3), name='FC_Desne_2'))
model.add(Dense(16, activation='relu', kernel_regularizer=l2(1e-3), name='FC_Dense_3'))
model.add(Dense(1, name='FC_output'))
print(model.summary())
model.compile(loss='mean_squared_error', optimizer=Adam(lr=1e-3), metrics=[metrics.mean_squared_error])
params = {}
params["EPOCHS"] = 10
params["BATCH_SIZE"] = 128
return model, params
def test_model(in_shape, show=False):
model = Sequential()
model.add(Lambda(lambda x: x/255 - 1.0, input_shape=in_shape))
model.add(Cropping2D(cropping=(data_server.PARAMS['crop'], (0,0)), input_shape=in_shape))
model.add(Conv2D(3, (1, 1), activation='relu'))
for i in range(2):
model.add(Conv2D(8, (3, 3)))
model.add(BatchNormalization())
model.add(Activation(activation='relu'))
model.add(Conv2D(8, (3, 3)))
model.add(BatchNormalization())
model.add(Activation(activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(64, activation='relu', kernel_regularizer=l2(1e-3)))
model.add(Dense(32, activation='relu', kernel_regularizer=l2(1e-3)))
model.add(Dense(16, activation='relu', kernel_regularizer=l2(1e-3)))
model.add(Dense(1))
print(model.summary())
model.compile(loss='mean_squared_error', optimizer=Adam(lr=1e-3), metrics=[metrics.mean_squared_error])
params = {}
params["EPOCHS"] = 10
params["BATCH_SIZE"] = 128
return model, params
class DenseNet(Model):
# https://towardsdatascience.com/densenet-2810936aeebb
def __init__(self, in_shape=None, show=False, inputs=None, outputs=None, name=None):
#super().__init__(inputs=inputs, outputs=x)
if inputs and outputs and name:
super().__init__(inputs=inputs, outputs=outputs, name=name)
return
channels = 8
self.stage = 0
self.set_params()
#uodel = Sequential()
inputs = Input(shape=in_shape, name="input")
x = Lambda(lambda x: x/255 - 1.0)(inputs)
#model.add(Cropping2D(cropping=(data_server.PARAMS['crop'], (0,0)), input_shape=in_shape))
# dense block
for i in range(4):
x_block = self.conv_block(x, channels)
x = concatenate([x, x_block])
x = self.bottleneck_block(x, channels)
# dense block
for i in range(4):
x_block = self.conv_block(x, channels)
x = concatenate([x, x_block])
x = self.bottleneck_block(x, channels)
# output
x = Flatten()(x)
x = Dense(64, activation='relu', kernel_regularizer=l2(1e-3), name="FC_stage_{}".format(self.stage))(x)
x = Dense(32, activation='relu', kernel_regularizer=l2(1e-3))(x)
x = Dense(16, activation='relu', kernel_regularizer=l2(1e-3))(x)
x = Dense(1)(x)
super().__init__(inputs=inputs, outputs=x, name=name)
print(self.summary())
self.compile(loss='mean_squared_error', optimizer=Adam(lr=1e-3), metrics=[metrics.mean_squared_error])
def set_params(self):
self.params = {}
self.params["EPOCHS"] = 12
self.params["BATCH_SIZE"] = 64
def bottleneck_block(self, x, channels):
method = 'bottleneck_block'
x = Conv2D(channels, (1, 1), padding='same', name="{}_conv_stage_{}".format(method, self.stage))(x)
x = MaxPooling2D((3, 3), strides=(2, 2), name='{}_pool_stage_{}'.format(method,self.stage))(x)
self.stage += 1
return x
def conv_block(self, x, channels):
method = 'conv_block'
x = Conv2D(channels, (1, 1), padding='same', name="{}_conv_1x1_stage_{}".format(method, self.stage))(x)
x = BatchNormalization(axis=3, name="{}_BN_1x1_stage_{}".format(method, self.stage))(x)
x = Activation(activation='relu', name="{}_relu_1x1_stage_{}".format(method, self.stage))(x)
x = Conv2D(channels, (3, 3), padding='same', name="{}_conv_3x3_stage_{}".format(method, self.stage))(x)
x = BatchNormalization(axis=3, name="{}_BN_3x3_stage_{}".format(method, self.stage))(x)
x = Activation(activation='relu', name="{}_relu_3x3_stage_{}".format(method, self.stage))(x)
self.stage += 1
return x
#def dense_block(self):
# #https://github.com/flyyufelix/DenseNet-Keras/blob/master/densenet121.py
def inception_model(in_shape, show=False):
model = Sequential()
model.add(Lambda(lambda x: x/255 - 1.0, input_shape=in_shape))
model.add(Cropping2D(cropping=(data_server.PARAMS['crop'], (0,0)), input_shape=in_shape))
model.add(Conv2D(3, (1, 1), activation='relu'))
for i in range(2):
model.add(Conv2D(8, (3, 3)))
model.add(BatchNormalization())
model.add(Activation(activation='relu'))
model.add(Conv2D(8, (3, 3)))
model.add(BatchNormalization())
model.add(Activation(activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(64, activation='relu', kernel_regularizer=l2(1e-3)))
model.add(Dense(32, activation='relu', kernel_regularizer=l2(1e-3)))
model.add(Dense(16, activation='relu', kernel_regularizer=l2(1e-3)))
model.add(Dense(1))
print(model.summary())
model.compile(loss='mean_squared_error', optimizer=Adam(lr=1e-3), metrics=[metrics.mean_squared_error])
params = {}
params["EPOCHS"] = 10
params["BATCH_SIZE"] = 128
return model, params
def generate_model(model_name, in_shape):
print(model_name)
if model_name == "work":
model, params = work_model(in_shape)
elif model_name == "test":
model, params = test_model(in_shape)
elif model_name == "nvidia":
model, params = nvidia_model(in_shape)
elif model_name.lower() == "densenet":
model = DenseNet(in_shape)
params = model.params
else:
raise Exception("select test/nvidia models")
return model, params
def main():
model_name = "work"
if len(sys.argv) > 1:
model_name = sys.argv[-1]
model, params = generate_model(model_name=model_name, in_shape=(160, 320, 3))
plot_model(model, to_file='output_images/model_plot.png', show_shapes=True, show_layer_names=True)
if os.path.exists("model.h5") and os.path.exists('model_weights.h5'):
return
elif not os.path.exists("model.h5") and os.path.exists('model_weights.h5'):
model.load_weights('model_weights.h5', by_name=True)
model.save('model.h5') # creates a HDF5 file 'my_model.h5'
return
EPOCHS = params["EPOCHS"]
BATCH_SIZE = params["BATCH_SIZE"]
# train_generator = data_server.batch_generator(train_type='train', batch_size=BATCH_SIZE)
# validation_generator = data_server.batch_generator(train_type='valid', batch_size=BATCH_SIZE)
# for batch_x, batch_y in train_generator:
# in_shape = batch_x[0].shape
# break
# train_generator = data_server.batch_generator(train_type='train', batch_size=BATCH_SIZE)
# imshow_cropped(batch_x[0])
# ipdb.set_trace()
train_generator = data_server.DataGenerator("train", batch_size=BATCH_SIZE, shuffle=True)
valid_generator = data_server.DataGenerator("valid", batch_size=BATCH_SIZE, shuffle=True)
validation_steps = data_server.Process().total_samples("valid") // BATCH_SIZE
train_steps = data_server.Process().total_samples("terain") // BATCH_SIZE
if not os.path.exists("model.h5") or not os.path.exists("model_weights.h5"):
history_object= model.fit_generator(
use_multiprocessing=True,
workers=3,
generator=train_generator,
verbose=1,
validation_data=valid_generator,
epochs=EPOCHS)
model.save('model.h5') # creates a HDF5 file 'my_model.h5'
model.save_weights('model_weights.h5')
fig = plt.figure()
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.savefig("output_images/loss_history.png".format(np.random.randint(1000)))
plt.close(fig)
#data_server.Process().load_metadata()
#metadata = data_server.Process().metadata
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.title",
"keras.regularizers.l2",
"keras.layers.Cropping2D",
"keras.layers.merge.concatenate",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"keras.layers.Input",
"trace.trace_start",
"matplotlib.pyplot.close",
"keras.utils.vis_utils.plot_model",
"keras.layers.Flatten",
... | [((612, 643), 'trace.trace_start', 'trace.trace_start', (['"""trace.html"""'], {}), "('trace.html')\n", (629, 643), False, 'import trace\n'), ((489, 510), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (503, 510), False, 'import matplotlib\n'), ((1051, 1063), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1061, 1063), False, 'from keras.models import Model, Sequential, load_model\n'), ((2300, 2312), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2310, 2312), False, 'from keras.models import Model, Sequential, load_model\n'), ((3848, 3860), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3858, 3860), False, 'from keras.models import Model, Sequential, load_model\n'), ((7432, 7444), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (7442, 7444), False, 'from keras.models import Model, Sequential, load_model\n'), ((9006, 9108), 'keras.utils.vis_utils.plot_model', 'plot_model', (['model'], {'to_file': '"""output_images/model_plot.png"""', 'show_shapes': '(True)', 'show_layer_names': '(True)'}), "(model, to_file='output_images/model_plot.png', show_shapes=True,\n show_layer_names=True)\n", (9016, 9108), False, 'from keras.utils.vis_utils import plot_model\n'), ((9891, 9962), 'data_server.DataGenerator', 'data_server.DataGenerator', (['"""train"""'], {'batch_size': 'BATCH_SIZE', 'shuffle': '(True)'}), "('train', batch_size=BATCH_SIZE, shuffle=True)\n", (9916, 9962), False, 'import data_server\n'), ((9982, 10053), 'data_server.DataGenerator', 'data_server.DataGenerator', (['"""valid"""'], {'batch_size': 'BATCH_SIZE', 'shuffle': '(True)'}), "('valid', batch_size=BATCH_SIZE, shuffle=True)\n", (10007, 10053), False, 'import data_server\n'), ((1076, 1137), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((70, 25), (0, 0))', 'input_shape': 'in_shape'}), '(cropping=((70, 25), (0, 0)), input_shape=in_shape)\n', (1086, 1137), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((1149, 1182), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {}), '(lambda x: x / 255.0 - 0.5)\n', (1155, 1182), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((1247, 1265), 'keras.layers.Conv2D', 'Conv2D', (['(24)', '(5, 5)'], {}), '(24, (5, 5))\n', (1253, 1265), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((1278, 1298), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1296, 1298), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((1311, 1340), 'keras.layers.Activation', 'Activation', ([], {'activation': '"""relu"""'}), "(activation='relu')\n", (1321, 1340), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((1354, 1372), 'keras.layers.Conv2D', 'Conv2D', (['(36)', '(5, 5)'], {}), '(36, (5, 5))\n', (1360, 1372), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((1385, 1405), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1403, 1405), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((1418, 1447), 'keras.layers.Activation', 'Activation', ([], {'activation': '"""relu"""'}), "(activation='relu')\n", (1428, 1447), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((1461, 1479), 'keras.layers.Conv2D', 'Conv2D', (['(48)', '(3, 3)'], {}), '(48, (3, 3))\n', (1467, 1479), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((1492, 1512), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1510, 1512), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((1525, 1554), 'keras.layers.Activation', 'Activation', ([], {'activation': '"""relu"""'}), "(activation='relu')\n", (1535, 1554), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((1568, 1586), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {}), '(64, (3, 3))\n', (1574, 1586), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((1599, 1619), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1617, 1619), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((1632, 1661), 'keras.layers.Activation', 'Activation', ([], {'activation': '"""relu"""'}), "(activation='relu')\n", (1642, 1661), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((1675, 1693), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {}), '(64, (3, 3))\n', (1681, 1693), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((1706, 1726), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1724, 1726), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((1739, 1768), 'keras.layers.Activation', 'Activation', ([], {'activation': '"""relu"""'}), "(activation='relu')\n", (1749, 1768), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((1782, 1791), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1789, 1791), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((2083, 2091), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (2088, 2091), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((2324, 2399), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255 - 1.0)'], {'input_shape': 'in_shape', 'name': '"""Normalization"""'}), "(lambda x: x / 255 - 1.0, input_shape=in_shape, name='Normalization')\n", (2330, 2399), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((2410, 2498), 'keras.layers.Conv2D', 'Conv2D', (['(3)', '(1, 1)'], {'activation': '"""relu"""', 'name': '"""A_parametric_Colorspace_transformation"""'}), "(3, (1, 1), activation='relu', name=\n 'A_parametric_Colorspace_transformation')\n", (2416, 2498), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((2506, 2607), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': "(data_server.PARAMS['crop'], (0, 0))", 'input_shape': 'in_shape', 'name': '"""ROI_crop"""'}), "(cropping=(data_server.PARAMS['crop'], (0, 0)), input_shape=\n in_shape, name='ROI_crop')\n", (2516, 2607), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((3247, 3273), 'keras.layers.Flatten', 'Flatten', ([], {'name': '"""FC_flatten"""'}), "(name='FC_flatten')\n", (3254, 3273), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((3553, 3579), 'keras.layers.Dense', 'Dense', (['(1)'], {'name': '"""FC_output"""'}), "(1, name='FC_output')\n", (3558, 3579), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((3872, 3925), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255 - 1.0)'], {'input_shape': 'in_shape'}), '(lambda x: x / 255 - 1.0, input_shape=in_shape)\n', (3878, 3925), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((3936, 4015), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': "(data_server.PARAMS['crop'], (0, 0))", 'input_shape': 'in_shape'}), "(cropping=(data_server.PARAMS['crop'], (0, 0)), input_shape=in_shape)\n", (3946, 4015), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((4028, 4064), 'keras.layers.Conv2D', 'Conv2D', (['(3)', '(1, 1)'], {'activation': '"""relu"""'}), "(3, (1, 1), activation='relu')\n", (4034, 4064), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((4361, 4370), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4368, 4370), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((4593, 4601), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (4598, 4601), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((5233, 5268), 'keras.layers.Input', 'Input', ([], {'shape': 'in_shape', 'name': '"""input"""'}), "(shape=in_shape, name='input')\n", (5238, 5268), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((7456, 7509), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255 - 1.0)'], {'input_shape': 'in_shape'}), '(lambda x: x / 255 - 1.0, input_shape=in_shape)\n', (7462, 7509), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((7520, 7599), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': "(data_server.PARAMS['crop'], (0, 0))", 'input_shape': 'in_shape'}), "(cropping=(data_server.PARAMS['crop'], (0, 0)), input_shape=in_shape)\n", (7530, 7599), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((7612, 7648), 'keras.layers.Conv2D', 'Conv2D', (['(3)', '(1, 1)'], {'activation': '"""relu"""'}), "(3, (1, 1), activation='relu')\n", (7618, 7648), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((7945, 7954), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (7952, 7954), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((8177, 8185), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (8182, 8185), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((9109, 9135), 'os.path.exists', 'os.path.exists', (['"""model.h5"""'], {}), "('model.h5')\n", (9123, 9135), False, 'import os\n'), ((9140, 9174), 'os.path.exists', 'os.path.exists', (['"""model_weights.h5"""'], {}), "('model_weights.h5')\n", (9154, 9174), False, 'import os\n'), ((10580, 10592), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10590, 10592), True, 'from matplotlib import pyplot as plt\n'), ((10595, 10635), 'matplotlib.pyplot.plot', 'plt.plot', (["history_object.history['loss']"], {}), "(history_object.history['loss'])\n", (10603, 10635), True, 'from matplotlib import pyplot as plt\n'), ((10638, 10682), 'matplotlib.pyplot.plot', 'plt.plot', (["history_object.history['val_loss']"], {}), "(history_object.history['val_loss'])\n", (10646, 10682), True, 'from matplotlib import pyplot as plt\n'), ((10685, 10727), 'matplotlib.pyplot.title', 'plt.title', (['"""model mean squared error loss"""'], {}), "('model mean squared error loss')\n", (10694, 10727), True, 'from matplotlib import pyplot as plt\n'), ((10730, 10767), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mean squared error loss"""'], {}), "('mean squared error loss')\n", (10740, 10767), True, 'from matplotlib import pyplot as plt\n'), ((10770, 10789), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (10780, 10789), True, 'from matplotlib import pyplot as plt\n'), ((10792, 10857), 'matplotlib.pyplot.legend', 'plt.legend', (["['training set', 'validation set']"], {'loc': '"""upper right"""'}), "(['training set', 'validation set'], loc='upper right')\n", (10802, 10857), True, 'from matplotlib import pyplot as plt\n'), ((10940, 10954), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (10949, 10954), True, 'from matplotlib import pyplot as plt\n'), ((3658, 3672), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (3662, 3672), False, 'from keras.optimizers import Adam\n'), ((4099, 4116), 'keras.layers.Conv2D', 'Conv2D', (['(8)', '(3, 3)'], {}), '(8, (3, 3))\n', (4105, 4116), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((4130, 4150), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4148, 4150), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((4164, 4193), 'keras.layers.Activation', 'Activation', ([], {'activation': '"""relu"""'}), "(activation='relu')\n", (4174, 4193), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((4208, 4225), 'keras.layers.Conv2D', 'Conv2D', (['(8)', '(3, 3)'], {}), '(8, (3, 3))\n', (4214, 4225), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((4239, 4259), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4257, 4259), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((4273, 4302), 'keras.layers.Activation', 'Activation', ([], {'activation': '"""relu"""'}), "(activation='relu')\n", (4283, 4302), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((4317, 4347), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (4329, 4347), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((4681, 4695), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (4685, 4695), False, 'from keras.optimizers import Adam\n'), ((5275, 5306), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255 - 1.0)'], {}), '(lambda x: x / 255 - 1.0)\n', (5281, 5306), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((5493, 5518), 'keras.layers.merge.concatenate', 'concatenate', (['[x, x_block]'], {}), '([x, x_block])\n', (5504, 5518), False, 'from keras.layers.merge import concatenate\n'), ((5649, 5674), 'keras.layers.merge.concatenate', 'concatenate', (['[x, x_block]'], {}), '([x, x_block])\n', (5660, 5674), False, 'from keras.layers.merge import concatenate\n'), ((5736, 5745), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (5743, 5745), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((5995, 6003), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (6000, 6003), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((7683, 7700), 'keras.layers.Conv2D', 'Conv2D', (['(8)', '(3, 3)'], {}), '(8, (3, 3))\n', (7689, 7700), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((7714, 7734), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (7732, 7734), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((7748, 7777), 'keras.layers.Activation', 'Activation', ([], {'activation': '"""relu"""'}), "(activation='relu')\n", (7758, 7777), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((7792, 7809), 'keras.layers.Conv2D', 'Conv2D', (['(8)', '(3, 3)'], {}), '(8, (3, 3))\n', (7798, 7809), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((7823, 7843), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (7841, 7843), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((7857, 7886), 'keras.layers.Activation', 'Activation', ([], {'activation': '"""relu"""'}), "(activation='relu')\n", (7867, 7886), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((7901, 7931), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (7913, 7931), False, 'from keras.layers import Input, Dense, Cropping2D, Lambda, Conv2D, Flatten, BatchNormalization, Activation, ELU, Dropout, MaxPooling2D, merge\n'), ((8265, 8279), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (8269, 8279), False, 'from keras.optimizers import Adam\n'), ((9226, 9260), 'os.path.exists', 'os.path.exists', (['"""model_weights.h5"""'], {}), "('model_weights.h5')\n", (9240, 9260), False, 'import os\n'), ((10217, 10243), 'os.path.exists', 'os.path.exists', (['"""model.h5"""'], {}), "('model.h5')\n", (10231, 10243), False, 'import os\n'), ((10251, 10285), 'os.path.exists', 'os.path.exists', (['"""model_weights.h5"""'], {}), "('model_weights.h5')\n", (10265, 10285), False, 'import os\n'), ((1921, 1930), 'keras.regularizers.l2', 'l2', (['(0.001)'], {}), '(0.001)\n', (1923, 1930), False, 'from keras.regularizers import l2\n'), ((1991, 2000), 'keras.regularizers.l2', 'l2', (['(0.001)'], {}), '(0.001)\n', (1993, 2000), False, 'from keras.regularizers import l2\n'), ((2061, 2070), 'keras.regularizers.l2', 'l2', (['(0.001)'], {}), '(0.001)\n', (2063, 2070), False, 'from keras.regularizers import l2\n'), ((3334, 3343), 'keras.regularizers.l2', 'l2', (['(0.001)'], {}), '(0.001)\n', (3336, 3343), False, 'from keras.regularizers import l2\n'), ((3423, 3432), 'keras.regularizers.l2', 'l2', (['(0.001)'], {}), '(0.001)\n', (3425, 3432), False, 'from keras.regularizers import l2\n'), ((3512, 3521), 'keras.regularizers.l2', 'l2', (['(0.001)'], {}), '(0.001)\n', (3514, 3521), False, 'from keras.regularizers import l2\n'), ((4431, 4440), 'keras.regularizers.l2', 'l2', (['(0.001)'], {}), '(0.001)\n', (4433, 4440), False, 'from keras.regularizers import l2\n'), ((4501, 4510), 'keras.regularizers.l2', 'l2', (['(0.001)'], {}), '(0.001)\n', (4503, 4510), False, 'from keras.regularizers import l2\n'), ((4571, 4580), 'keras.regularizers.l2', 'l2', (['(0.001)'], {}), '(0.001)\n', (4573, 4580), False, 'from keras.regularizers import l2\n'), ((6140, 6154), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (6144, 6154), False, 'from keras.optimizers import Adam\n'), ((8015, 8024), 'keras.regularizers.l2', 'l2', (['(0.001)'], {}), '(0.001)\n', (8017, 8024), False, 'from keras.regularizers import l2\n'), ((8085, 8094), 'keras.regularizers.l2', 'l2', (['(0.001)'], {}), '(0.001)\n', (8087, 8094), False, 'from keras.regularizers import l2\n'), ((8155, 8164), 'keras.regularizers.l2', 'l2', (['(0.001)'], {}), '(0.001)\n', (8157, 8164), False, 'from keras.regularizers import l2\n'), ((9195, 9221), 'os.path.exists', 'os.path.exists', (['"""model.h5"""'], {}), "('model.h5')\n", (9209, 9221), False, 'import os\n'), ((10075, 10096), 'data_server.Process', 'data_server.Process', ([], {}), '()\n', (10094, 10096), False, 'import data_server\n'), ((10149, 10170), 'data_server.Process', 'data_server.Process', ([], {}), '()\n', (10168, 10170), False, 'import data_server\n'), ((10912, 10935), 'numpy.random.randint', 'np.random.randint', (['(1000)'], {}), '(1000)\n', (10929, 10935), True, 'import numpy as np\n'), ((5803, 5812), 'keras.regularizers.l2', 'l2', (['(0.001)'], {}), '(0.001)\n', (5805, 5812), False, 'from keras.regularizers import l2\n'), ((5909, 5918), 'keras.regularizers.l2', 'l2', (['(0.001)'], {}), '(0.001)\n', (5911, 5918), False, 'from keras.regularizers import l2\n'), ((5976, 5985), 'keras.regularizers.l2', 'l2', (['(0.001)'], {}), '(0.001)\n', (5978, 5985), False, 'from keras.regularizers import l2\n')] |
"""
Project: RadarBook
File: rain.py
Created by: <NAME>
On: 3/18/2018
Created with: PyCharm
Copyright (C) 2019 Artech House (<EMAIL>)
This file is part of Introduction to Radar Using Python and MATLAB
and can not be copied and/or distributed without the express permission of Artech House.
"""
from numpy import log10, exp, array, cos
def attenuation(frequency, rain_rate, elevation_angle, polarization_tilt_angle):
"""
Calculate the attenuation due to rain.
:param frequency: The operating frequency (GHz).
:param rain_rate: The rain rate (mm/hr).
:param elevation_angle: The elevation angle (radians).
:param polarization_tilt_angle: The polarization tilt angle (radians).
:return: The specific attenuation due to rain (dB/km).
"""
# Table 2.3 Coefficients for calculating k_h
a_kh = array([-5.33980, -0.35351, -0.23789, -0.94158])
b_kh = array([-0.1008, 1.26970, 0.86036, 0.64552])
c_kh = array([1.13098, 0.45400, 0.15354, 0.16817])
d_kh = -0.18961
e_kh = 0.71147
# Table 2.4 Coefficients for calculating k_v
a_kv = array([-3.80595, -3.44965, -0.39902, 0.50167])
b_kv = array([0.56934, -0.22911, 0.73042, 1.07319])
c_kv = array([0.81061, 0.51059, 0.11899, 0.27195])
d_kv = -0.16398
e_kv = 0.63297
# Table 2.5 Coefficients for calculating alpha_h
a_ah = array([-0.14318, 0.29591, 0.32177, -5.37610, 16.1721])
b_ah = array([1.82442, 0.77564, 0.63773, -0.96230, -3.29980])
c_ah = array([-0.55187, 0.19822, 0.13164, 1.47828, 3.43990])
d_ah = 0.67849
e_ah = -1.95537
# Table 2.6 Coefficients for calculating alpha_v
a_av = array([-0.07771, 0.56727, -0.20238, -48.2991, 48.5833])
b_av = array([2.33840, 0.95545, 1.14520, 0.791669, 0.791459])
c_av = array([-0.76284, 0.54039, 0.26809, 0.116226, 0.116479])
d_av = -0.053739
e_av = 0.83433
# Calculate k_h
k_h = d_kh * log10(frequency) + e_kh
for a, b, c in zip(a_kh, b_kh, c_kh):
k_h += a * exp(-((log10(frequency) - b) / c) ** 2)
k_h = 10**k_h
# Calculate k_v
k_v = d_kv * log10(frequency) + e_kv
for a, b, c in zip(a_kv, b_kv, c_kv):
k_v += a * exp(-((log10(frequency) - b) / c) ** 2)
k_v = 10**k_v
# Calculate alpha_h
alpha_h = d_ah * log10(frequency) + e_ah
for a, b, c in zip(a_ah, b_ah, c_ah):
alpha_h += a * exp(-((log10(frequency) - b) / c) ** 2)
# Calculate alpha_v
alpha_v = d_av * log10(frequency) + e_av
for a, b, c in zip(a_av, b_av, c_av):
alpha_v += a * exp(-((log10(frequency) - b) / c) ** 2)
# Calculate k and alpha based on elevation angle and polarization
k = 0.5 * (k_h + k_v + (k_h - k_v) * cos(elevation_angle)**2 * cos(2. * polarization_tilt_angle))
alpha = 0.5 * (k_h * alpha_h + k_v * alpha_v + (k_h * alpha_h - k_v * alpha_v) * cos(elevation_angle)**2 *
cos(2. * polarization_tilt_angle)) / k
return k * rain_rate**alpha
| [
"numpy.log10",
"numpy.array",
"numpy.cos"
] | [((830, 876), 'numpy.array', 'array', (['[-5.3398, -0.35351, -0.23789, -0.94158]'], {}), '([-5.3398, -0.35351, -0.23789, -0.94158])\n', (835, 876), False, 'from numpy import log10, exp, array, cos\n'), ((889, 931), 'numpy.array', 'array', (['[-0.1008, 1.2697, 0.86036, 0.64552]'], {}), '([-0.1008, 1.2697, 0.86036, 0.64552])\n', (894, 931), False, 'from numpy import log10, exp, array, cos\n'), ((944, 985), 'numpy.array', 'array', (['[1.13098, 0.454, 0.15354, 0.16817]'], {}), '([1.13098, 0.454, 0.15354, 0.16817])\n', (949, 985), False, 'from numpy import log10, exp, array, cos\n'), ((1088, 1134), 'numpy.array', 'array', (['[-3.80595, -3.44965, -0.39902, 0.50167]'], {}), '([-3.80595, -3.44965, -0.39902, 0.50167])\n', (1093, 1134), False, 'from numpy import log10, exp, array, cos\n'), ((1146, 1190), 'numpy.array', 'array', (['[0.56934, -0.22911, 0.73042, 1.07319]'], {}), '([0.56934, -0.22911, 0.73042, 1.07319])\n', (1151, 1190), False, 'from numpy import log10, exp, array, cos\n'), ((1202, 1245), 'numpy.array', 'array', (['[0.81061, 0.51059, 0.11899, 0.27195]'], {}), '([0.81061, 0.51059, 0.11899, 0.27195])\n', (1207, 1245), False, 'from numpy import log10, exp, array, cos\n'), ((1350, 1403), 'numpy.array', 'array', (['[-0.14318, 0.29591, 0.32177, -5.3761, 16.1721]'], {}), '([-0.14318, 0.29591, 0.32177, -5.3761, 16.1721])\n', (1355, 1403), False, 'from numpy import log10, exp, array, cos\n'), ((1416, 1468), 'numpy.array', 'array', (['[1.82442, 0.77564, 0.63773, -0.9623, -3.2998]'], {}), '([1.82442, 0.77564, 0.63773, -0.9623, -3.2998])\n', (1421, 1468), False, 'from numpy import log10, exp, array, cos\n'), ((1482, 1534), 'numpy.array', 'array', (['[-0.55187, 0.19822, 0.13164, 1.47828, 3.4399]'], {}), '([-0.55187, 0.19822, 0.13164, 1.47828, 3.4399])\n', (1487, 1534), False, 'from numpy import log10, exp, array, cos\n'), ((1640, 1695), 'numpy.array', 'array', (['[-0.07771, 0.56727, -0.20238, -48.2991, 48.5833]'], {}), '([-0.07771, 0.56727, -0.20238, -48.2991, 48.5833])\n', (1645, 1695), False, 'from numpy import log10, exp, array, cos\n'), ((1707, 1759), 'numpy.array', 'array', (['[2.3384, 0.95545, 1.1452, 0.791669, 0.791459]'], {}), '([2.3384, 0.95545, 1.1452, 0.791669, 0.791459])\n', (1712, 1759), False, 'from numpy import log10, exp, array, cos\n'), ((1773, 1828), 'numpy.array', 'array', (['[-0.76284, 0.54039, 0.26809, 0.116226, 0.116479]'], {}), '([-0.76284, 0.54039, 0.26809, 0.116226, 0.116479])\n', (1778, 1828), False, 'from numpy import log10, exp, array, cos\n'), ((1907, 1923), 'numpy.log10', 'log10', (['frequency'], {}), '(frequency)\n', (1912, 1923), False, 'from numpy import log10, exp, array, cos\n'), ((2089, 2105), 'numpy.log10', 'log10', (['frequency'], {}), '(frequency)\n', (2094, 2105), False, 'from numpy import log10, exp, array, cos\n'), ((2279, 2295), 'numpy.log10', 'log10', (['frequency'], {}), '(frequency)\n', (2284, 2295), False, 'from numpy import log10, exp, array, cos\n'), ((2454, 2470), 'numpy.log10', 'log10', (['frequency'], {}), '(frequency)\n', (2459, 2470), False, 'from numpy import log10, exp, array, cos\n'), ((2721, 2755), 'numpy.cos', 'cos', (['(2.0 * polarization_tilt_angle)'], {}), '(2.0 * polarization_tilt_angle)\n', (2724, 2755), False, 'from numpy import log10, exp, array, cos\n'), ((2886, 2920), 'numpy.cos', 'cos', (['(2.0 * polarization_tilt_angle)'], {}), '(2.0 * polarization_tilt_angle)\n', (2889, 2920), False, 'from numpy import log10, exp, array, cos\n'), ((2695, 2715), 'numpy.cos', 'cos', (['elevation_angle'], {}), '(elevation_angle)\n', (2698, 2715), False, 'from numpy import log10, exp, array, cos\n'), ((2841, 2861), 'numpy.cos', 'cos', (['elevation_angle'], {}), '(elevation_angle)\n', (2844, 2861), False, 'from numpy import log10, exp, array, cos\n'), ((1999, 2015), 'numpy.log10', 'log10', (['frequency'], {}), '(frequency)\n', (2004, 2015), False, 'from numpy import log10, exp, array, cos\n'), ((2181, 2197), 'numpy.log10', 'log10', (['frequency'], {}), '(frequency)\n', (2186, 2197), False, 'from numpy import log10, exp, array, cos\n'), ((2375, 2391), 'numpy.log10', 'log10', (['frequency'], {}), '(frequency)\n', (2380, 2391), False, 'from numpy import log10, exp, array, cos\n'), ((2550, 2566), 'numpy.log10', 'log10', (['frequency'], {}), '(frequency)\n', (2555, 2566), False, 'from numpy import log10, exp, array, cos\n')] |
import torch
from torch import nn
import numpy as np
class SineLayer(nn.Module):
def __init__(self, in_dims, out_dims, bias=True, is_first=False, omega_0=30):
super().__init__()
self.omega_0 = omega_0
self.in_dims = in_dims
# If is_first=True, omega_0 is a frequency factor which simply multiplies
# the activations before the nonlinearity. Different signals may require
# different omega_0 in the first layer - this is a hyperparameter.
# If is_first=False, then the weights will be divided by omega_0 so as to
# keep the magnitude of activations constant, but boost gradients to the
# weight matrix (see supplement Sec. 1.5)
self.is_first = is_first
self.linear = nn.Linear(in_dims, out_dims, bias=bias)
self.init_weights()
def init_weights(self):
with torch.no_grad():
if self.is_first:
self.linear.weight.uniform_(-1 / self.in_dims,
1 / self.in_dims)
else:
self.linear.weight.uniform_(-np.sqrt(6 / self.in_dims) / self.omega_0,
np.sqrt(6 / self.in_dims) / self.omega_0)
def forward(self, x):
return torch.sin(self.omega_0 * self.linear(x))
class Siren(nn.Module):
def __init__(self, in_dims, hidden_dims, hidden_layers, out_dims, outermost_linear=False,
first_omega_0=30, hidden_omega_0=30.):
super().__init__()
self.net = []
self.net.append(SineLayer(in_dims, hidden_dims,
is_first=True, omega_0=first_omega_0))
for i in range(hidden_layers):
self.net.append(SineLayer(hidden_dims, hidden_dims,
is_first=False, omega_0=hidden_omega_0))
if outermost_linear:
final_linear = nn.Linear(hidden_dims, out_dims)
with torch.no_grad():
final_linear.weight.uniform_(-np.sqrt(6 / hidden_dims) / hidden_omega_0,
np.sqrt(6 / hidden_dims) / hidden_omega_0)
self.net.append(final_linear)
else:
self.net.append(SineLayer(hidden_dims, out_dims,
is_first=False, omega_0=hidden_omega_0))
self.net = nn.Sequential(*self.net)
def forward(self, x):
return self.net(x)
| [
"torch.nn.Sequential",
"torch.no_grad",
"numpy.sqrt",
"torch.nn.Linear"
] | [((763, 802), 'torch.nn.Linear', 'nn.Linear', (['in_dims', 'out_dims'], {'bias': 'bias'}), '(in_dims, out_dims, bias=bias)\n', (772, 802), False, 'from torch import nn\n'), ((2379, 2403), 'torch.nn.Sequential', 'nn.Sequential', (['*self.net'], {}), '(*self.net)\n', (2392, 2403), False, 'from torch import nn\n'), ((873, 888), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (886, 888), False, 'import torch\n'), ((1916, 1948), 'torch.nn.Linear', 'nn.Linear', (['hidden_dims', 'out_dims'], {}), '(hidden_dims, out_dims)\n', (1925, 1948), False, 'from torch import nn\n'), ((1967, 1982), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1980, 1982), False, 'import torch\n'), ((1196, 1221), 'numpy.sqrt', 'np.sqrt', (['(6 / self.in_dims)'], {}), '(6 / self.in_dims)\n', (1203, 1221), True, 'import numpy as np\n'), ((2119, 2143), 'numpy.sqrt', 'np.sqrt', (['(6 / hidden_dims)'], {}), '(6 / hidden_dims)\n', (2126, 2143), True, 'import numpy as np\n'), ((1109, 1134), 'numpy.sqrt', 'np.sqrt', (['(6 / self.in_dims)'], {}), '(6 / self.in_dims)\n', (1116, 1134), True, 'import numpy as np\n'), ((2030, 2054), 'numpy.sqrt', 'np.sqrt', (['(6 / hidden_dims)'], {}), '(6 / hidden_dims)\n', (2037, 2054), True, 'import numpy as np\n')] |
import os, pdb
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from data import v2
from layers import *
from layers.modules.feat_pooling import FeatPooling
from torch.nn.parameter import Parameter
# This function is derived from torchvision VGG make_layers()
# https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
def vgg(cfg, i, batch_norm=False):
layers = []
in_channels = i
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'C':
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU()]
else:
layers += [conv2d, nn.ReLU()]
in_channels = v
pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
layers += [pool5, conv6, nn.ReLU(), conv7, nn.ReLU()]
return layers
def add_extras(cfg, i):
# Extra layers added to VGG for feature scaling
layers = []
in_channels = i
flag = False
for k, v in enumerate(cfg):
if in_channels != 'S':
if v == 'S':
layers += [nn.Conv2d(in_channels, cfg[k + 1],
kernel_size=(1, 3)[flag], stride=2, padding=1)]
else:
layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]
flag = not flag
in_channels = v
return layers
basemodel = {
'300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
512, 512, 512],
'512': [],
}
extras = {
'300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],
'512': [],
}
class SSD_CORE(nn.Module):
def __init__(self, input_frames, size, seq_len =2, kd=3, featmap_fusion='cat'):
super(SSD_CORE, self).__init__()
self.vgg = nn.ModuleList(vgg(basemodel[str(size)], input_frames*3))
# Layer learns to scale the l2 normalized features from conv4_3
self.L2Norm = L2Norm(512, 20)
self.extras = nn.ModuleList(add_extras(extras[str(size)], 1024))
self.seq_len = seq_len
self.size = size
self.kd = kd
self.scale = kd*kd*1.0
self.fmd = [512, 1024, 512, 256, 256, 256]
self.feature_maps = [38, 19, 10, 5, 3, 1]
self.featPool0 = FeatPooling(self.fmd[0], np.identity(38 ** 2), afthresh=0.9, kd=kd, fusion_type=featmap_fusion, seq_len=seq_len)
self.featPool1 = FeatPooling(self.fmd[1], np.identity(19 ** 2), afthresh=0.9, kd=kd, fusion_type=featmap_fusion, seq_len=seq_len)
self.featPool2 = FeatPooling(self.fmd[2], np.identity(10 ** 2), afthresh=0.9, kd=kd, fusion_type=featmap_fusion, seq_len=seq_len)
self.featPool3 = FeatPooling(self.fmd[3], np.identity(5 ** 2), afthresh=0.9, kd=kd, fusion_type=featmap_fusion, seq_len=seq_len)
self.featPool4 = FeatPooling(self.fmd[4], np.identity(3 ** 2), afthresh=0.9, kd=kd, fusion_type=featmap_fusion, seq_len=seq_len)
self.featPool5 = FeatPooling(self.fmd[5], np.identity(1 ** 2), afthresh=0.9, kd=kd, fusion_type=featmap_fusion, seq_len=seq_len)
def forward(self, x):
x = x.view(-1, x.size(2), x.size(3), x.size(4))
_sources = self.baseforward(x)
sources = [0,1,2,3,4,5]
for i, s in enumerate(_sources):
sources[i] = s/self.scale
pooled_source = []
# print(sources[0].size())
pooled_source.append(self.featPool0(sources[0]))
pooled_source.append(self.featPool1(sources[1]))
pooled_source.append(self.featPool2(sources[2]))
pooled_source.append(self.featPool3(sources[3]))
pooled_source.append(self.featPool4(sources[4]))
pooled_source.append(self.featPool5(sources[5]))
#print('pooled_source size', pooled_source[0].size())
return pooled_source
def baseforward(self, x):
sources = list()
pooled_source = []
# apply vgg up to conv4_3 relu
for k in range(23):
x = self.vgg[k](x)
s = self.L2Norm(x)
sources.append(s)
# apply vgg up to fc7
for k in range(23, len(self.vgg)):
x = self.vgg[k](x)
sources.append(x)
# apply extra layers and cache source layer outputs
for k, v in enumerate(self.extras):
x = F.relu(v(x))
if k % 2 == 1:
sources.append(x)
return sources
def load_my_state_dict(self, state_dict, input_frames=1):
own_state = self.state_dict()
# print('\n\n input_frames {:d}\n\n'.format(input_frames))
# print('OWN KEYS: ', own_state.keys())
# print('Loaded KEYS: ', state_dict.keys())
# pdb.set_trace()
for name, param in state_dict.items():
name1 = name.split('.')
name2 = '.'.join(name1[2:])
# pdb.set_trace()
if name in own_state.keys() or name2 in own_state.keys():
if name2 in own_state.keys():
name = name2
# print(name)
match = False
own_size = own_state[name].size()
if isinstance(param, Parameter):
param = param.data
param_size = param.size()
try:
if len(param_size)>2 and param_size[1] != own_size[1]:
param = param.repeat(1, int(own_size[1]/param_size[1]), 1, 1)/float(own_size[1]/param_size[1])
own_state[name].copy_(param)
else:
own_state[name].copy_(param)
except Exception:
raise RuntimeError('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(), param.size()))
else:
print('NAME IS NOT IN OWN STATE::>' + name)
# pdb.set_trace()
mbox = {
'300': [4, 6, 6, 6, 4, 4], # number of boxes per feature map location
'512': [],
}
def multibox(cfg, num_classes, fusion_num_muliplier, seq_len=2, kd=3):
loc_layers = []
conf_layers = []
fmd = [512, 1024, 512, 256, 256, 256] # feature map depth/channel size
fmd_mul = fusion_num_muliplier
for i in range(len(fmd)):
inpd = fmd[i]*seq_len*kd*kd*fmd_mul
print('Feature map size', inpd)
out_dim_reg = cfg[i] * 4 * seq_len
out_dim_cls = cfg[i] * num_classes
head_reg = nn.Linear(inpd, out_dim_reg)
head_conf = nn.Linear(inpd, out_dim_cls)
head_reg.bias.data.fill_(0)
head_conf.bias.data.fill_(0)
loc_layers += [head_reg]
conf_layers += [head_conf]
return loc_layers, conf_layers
class AMTNet(nn.Module):
def __init__(self, args):
#num_classes, seq_len=2, fusion_type='cat', kd=3):
super(AMTNet, self).__init__()
self.fusion = args.fusion
self.core_base = SSD_CORE(args.input_frames_base, args.ssd_dim, args.seq_len, kd=args.kd)
if self.fusion:
self.core_extra = SSD_CORE(args.input_frames_extra, args.ssd_dim, args.seq_len, kd=args.kd)
self.fusion_type = args.fusion_type
self.num_classes = args.num_classes
self.seq_len = args.seq_len
head = multibox(mbox[str(args.ssd_dim)], args.num_classes, args.fusion_num_muliplier, args.seq_len, args.kd)
self.loc = nn.ModuleList(head[0])
self.conf = nn.ModuleList(head[1])
def forward(self, x):
pooled_base = self.core_base(x[0])
loc = list()
conf = list()
if self.fusion:
pooled_extra = self.core_extra(x[1])
# apply multibox head to source layers
# pdb.set_trace()
for (x1, x2, l, c) in zip(pooled_base, pooled_extra, self.loc, self.conf):
# print('x_norm', x.norm())
# pdb.set_trace()
if self.fusion_type == 'cat':
x = torch.cat((x1, x2), 2)
elif self.fusion_type == 'sum':
x = x1 + x2
elif self.fusion_type == 'mean':
x = (x1 + x2) / 2.0
else:
raise Exception('Supply correct fusion type')
locs = l(x)
locs = locs.view(locs.size(0), -1)
loc.append(locs)
confs = c(x)
confs = confs.view(confs.size(0), -1)
conf.append(confs) # .contiguous())
else:
# apply multibox head to source layers
for (x, l, c) in zip(pooled_base, self.loc, self.conf):
locs = l(x)
locs = locs.view(locs.size(0), -1)
loc.append(locs)
confs = c(x)
confs = confs.view(confs.size(0), -1)
conf.append(confs) # .contiguous())
# pdb.set_trace()
loc = torch.cat(loc, 1)
conf = torch.cat(conf, 1)
return conf.view(conf.size(0), -1, self.num_classes), loc.view(loc.size(0), -1, 4*self.seq_len),
| [
"torch.nn.ReLU",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.cat",
"numpy.identity",
"torch.nn.BatchNorm2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d"
] | [((967, 1015), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(kernel_size=3, stride=1, padding=1)\n', (979, 1015), True, 'import torch.nn as nn\n'), ((1028, 1086), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(1024)'], {'kernel_size': '(3)', 'padding': '(6)', 'dilation': '(6)'}), '(512, 1024, kernel_size=3, padding=6, dilation=6)\n', (1037, 1086), True, 'import torch.nn as nn\n'), ((1099, 1135), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1024)', '(1024)'], {'kernel_size': '(1)'}), '(1024, 1024, kernel_size=1)\n', (1108, 1135), True, 'import torch.nn as nn\n'), ((1165, 1174), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1172, 1174), True, 'import torch.nn as nn\n'), ((1183, 1192), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1190, 1192), True, 'import torch.nn as nn\n'), ((7298, 7326), 'torch.nn.Linear', 'nn.Linear', (['inpd', 'out_dim_reg'], {}), '(inpd, out_dim_reg)\n', (7307, 7326), True, 'import torch.nn as nn\n'), ((7347, 7375), 'torch.nn.Linear', 'nn.Linear', (['inpd', 'out_dim_cls'], {}), '(inpd, out_dim_cls)\n', (7356, 7375), True, 'import torch.nn as nn\n'), ((8227, 8249), 'torch.nn.ModuleList', 'nn.ModuleList', (['head[0]'], {}), '(head[0])\n', (8240, 8249), True, 'import torch.nn as nn\n'), ((8270, 8292), 'torch.nn.ModuleList', 'nn.ModuleList', (['head[1]'], {}), '(head[1])\n', (8283, 8292), True, 'import torch.nn as nn\n'), ((9781, 9798), 'torch.cat', 'torch.cat', (['loc', '(1)'], {}), '(loc, 1)\n', (9790, 9798), False, 'import torch\n'), ((9814, 9832), 'torch.cat', 'torch.cat', (['conf', '(1)'], {}), '(conf, 1)\n', (9823, 9832), False, 'import torch\n'), ((2698, 2718), 'numpy.identity', 'np.identity', (['(38 ** 2)'], {}), '(38 ** 2)\n', (2709, 2718), True, 'import numpy as np\n'), ((2840, 2860), 'numpy.identity', 'np.identity', (['(19 ** 2)'], {}), '(19 ** 2)\n', (2851, 2860), True, 'import numpy as np\n'), ((2982, 3002), 'numpy.identity', 'np.identity', (['(10 ** 2)'], {}), '(10 ** 2)\n', (2993, 3002), True, 'import numpy as np\n'), ((3124, 3143), 'numpy.identity', 'np.identity', (['(5 ** 2)'], {}), '(5 ** 2)\n', (3135, 3143), True, 'import numpy as np\n'), ((3265, 3284), 'numpy.identity', 'np.identity', (['(3 ** 2)'], {}), '(3 ** 2)\n', (3276, 3284), True, 'import numpy as np\n'), ((3406, 3425), 'numpy.identity', 'np.identity', (['(1 ** 2)'], {}), '(1 ** 2)\n', (3417, 3425), True, 'import numpy as np\n'), ((544, 581), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (556, 581), True, 'import torch.nn as nn\n'), ((719, 770), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'v'], {'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels, v, kernel_size=3, padding=1)\n', (728, 770), True, 'import torch.nn as nn\n'), ((629, 682), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)', 'ceil_mode': '(True)'}), '(kernel_size=2, stride=2, ceil_mode=True)\n', (641, 682), True, 'import torch.nn as nn\n'), ((1458, 1543), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'cfg[k + 1]'], {'kernel_size': '(1, 3)[flag]', 'stride': '(2)', 'padding': '(1)'}), '(in_channels, cfg[k + 1], kernel_size=(1, 3)[flag], stride=2,\n padding=1)\n', (1467, 1543), True, 'import torch.nn as nn\n'), ((1613, 1664), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'v'], {'kernel_size': '(1, 3)[flag]'}), '(in_channels, v, kernel_size=(1, 3)[flag])\n', (1622, 1664), True, 'import torch.nn as nn\n'), ((8797, 8819), 'torch.cat', 'torch.cat', (['(x1, x2)', '(2)'], {}), '((x1, x2), 2)\n', (8806, 8819), False, 'import torch\n'), ((833, 850), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['v'], {}), '(v)\n', (847, 850), True, 'import torch.nn as nn\n'), ((852, 861), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (859, 861), True, 'import torch.nn as nn\n'), ((916, 925), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (923, 925), True, 'import torch.nn as nn\n')] |
# plain.py
import numpy
__all__ = ["data_length", "convert_data"]
def data_length(line):
return len(line.strip().split())
def tokenize(data):
return data.split()
def to_word_id(data, voc, unk="UNK"):
newdata = []
unkid = voc[unk]
for d in data:
idlist = [voc[w] if w in voc else unkid for w in d]
newdata.append(idlist)
return newdata
def convert_to_array(data, dtype):
batch = len(data)
data_len = list(map(len, data))
max_len = max(data_len)
seq = numpy.zeros((max_len, batch), "int32")
mask = numpy.zeros((max_len, batch), dtype)
for idx, item in enumerate(data):
seq[:data_len[idx], idx] = item
mask[:data_len[idx], idx] = 1.0
return seq, mask
def convert_data(data, voc, unk="UNK", eos="<eos>", reverse=False, dtype="float32"):
if reverse:
data = [tokenize(item)[::-1] + [eos] for item in data]
else:
data = [tokenize(item) + [eos] for item in data]
data = to_word_id(data, voc, unk)
seq, mask = convert_to_array(data, dtype)
return seq, mask
| [
"numpy.zeros"
] | [((519, 557), 'numpy.zeros', 'numpy.zeros', (['(max_len, batch)', '"""int32"""'], {}), "((max_len, batch), 'int32')\n", (530, 557), False, 'import numpy\n'), ((569, 605), 'numpy.zeros', 'numpy.zeros', (['(max_len, batch)', 'dtype'], {}), '((max_len, batch), dtype)\n', (580, 605), False, 'import numpy\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import bqplot
import bqplot.pyplot as bqpyplot
import pandas as pd
from fastai.data_block import LabelList
from ipywidgets import widgets, Layout, IntSlider
import numpy as np
from utils_cv.common.image import im_width, im_height
from utils_cv.common.data import get_files_in_directory
class AnnotationWidget(object):
IM_WIDTH = 500 # pixels
def __init__(
self,
labels: list,
im_dir: str,
anno_path: str,
im_filenames: list = None,
):
"""Widget class to annotate images.
Args:
labels: List of abel names, e.g. ["bird", "car", "plane"].
im_dir: Directory containing the images to be annotated.
anno_path: path where to write annotations to, and (if exists) load annotations from.
im_fnames: List of image filenames. If set to None, then will auto-detect all images in the provided image directory.
"""
self.labels = labels
self.im_dir = im_dir
self.anno_path = anno_path
self.im_filenames = im_filenames
# Init
self.vis_image_index = 0
self.label_to_id = {s: i for i, s in enumerate(self.labels)}
if not im_filenames:
self.im_filenames = [
os.path.basename(s)
for s in get_files_in_directory(
im_dir,
suffixes=(
".jpg",
".jpeg",
".tif",
".tiff",
".gif",
".giff",
".png",
".bmp",
),
)
]
assert (
len(self.im_filenames) > 0
), f"Not a single image specified or found in directory {im_dir}."
# Initialize empty annotations and load previous annotations if file exist
self.annos = pd.DataFrame()
for im_filename in self.im_filenames:
if im_filename not in self.annos:
self.annos[im_filename] = pd.Series(
{"exclude": False, "labels": []}
)
if os.path.exists(self.anno_path):
print(f"Loading existing annotation from {self.anno_path}.")
with open(self.anno_path, "r") as f:
for line in f.readlines()[1:]:
vec = line.strip().split("\t")
im_filename = vec[0]
self.annos[im_filename].exclude = vec[1] == "True"
if len(vec) > 2:
self.annos[im_filename].labels = vec[2].split(",")
# Create UI and "start" widget
self._create_ui()
def show(self):
return self.ui
def update_ui(self):
im_filename = self.im_filenames[self.vis_image_index]
im_path = os.path.join(self.im_dir, im_filename)
# Update the image and info
self.w_img.value = open(im_path, "rb").read()
self.w_filename.value = im_filename
self.w_path.value = self.im_dir
# Fix the width of the image widget and adjust the height
self.w_img.layout.height = (
f"{int(self.IM_WIDTH * (im_height(im_path)/im_width(im_path)))}px"
)
# Update annotations
self.exclude_widget.value = self.annos[im_filename].exclude
for w in self.label_widgets:
w.value = False
for label in self.annos[im_filename].labels:
label_id = self.label_to_id[label]
self.label_widgets[label_id].value = True
def _create_ui(self):
"""Create and initialize widgets"""
# ------------
# Callbacks + logic
# ------------
def skip_image():
"""Return true if image should be skipped, and false otherwise."""
# See if UI-checkbox to skip images is checked
if not self.w_skip_annotated.value:
return False
# Stop skipping if image index is out of bounds
if (
self.vis_image_index <= 0
or self.vis_image_index >= len(self.im_filenames) - 1
):
return False
# Skip if image has annotation
im_filename = self.im_filenames[self.vis_image_index]
labels = self.annos[im_filename].labels
exclude = self.annos[im_filename].exclude
if exclude or len(labels) > 0:
return True
return False
def button_pressed(obj):
"""Next / previous image button callback."""
# Find next/previous image. Variable step is -1 or +1 depending on which button was pressed.
step = int(obj.value)
self.vis_image_index += step
while skip_image():
self.vis_image_index += step
self.vis_image_index = min(
max(self.vis_image_index, 0), len(self.im_filenames) - 1
)
self.w_image_slider.value = self.vis_image_index
self.update_ui()
def slider_changed(obj):
"""Image slider callback.
Need to wrap in try statement to avoid errors when slider value is not a number.
"""
try:
self.vis_image_index = int(obj["new"]["value"])
self.update_ui()
except Exception:
pass
def anno_changed(obj):
"""Label checkbox callback.
Update annotation file and write to disk
"""
# Test if call is coming from the user having clicked on a checkbox to change its state,
# rather than a change of state when e.g. the checkbox value was updated programatically. This is a bit
# of hack, but necessary since widgets.Checkbox() does not support a on_click() callback or similar.
if (
"new" in obj
and isinstance(obj["new"], dict)
and len(obj["new"]) == 0
):
# If single-label annotation then unset all checkboxes except the one which the user just clicked
if not self.w_multi_class.value:
for w in self.label_widgets:
if w.description != obj["owner"].description:
w.value = False
# Update annotation object
im_filename = self.im_filenames[self.vis_image_index]
self.annos[im_filename].labels = [
w.description for w in self.label_widgets if w.value
]
self.annos[im_filename].exclude = self.exclude_widget.value
# Write to disk as tab-separated file.
with open(self.anno_path, "w") as f:
f.write(
"{}\t{}\t{}\n".format(
"IM_FILENAME", "EXCLUDE", "LABELS"
)
)
for k, v in self.annos.items():
if v.labels != [] or v.exclude:
f.write(
"{}\t{}\t{}\n".format(
k, v.exclude, ",".join(v.labels)
)
)
# ------------
# UI - image + controls (left side)
# ------------
w_next_image_button = widgets.Button(description="Next")
w_next_image_button.value = "1"
w_next_image_button.layout = Layout(width="80px")
w_next_image_button.on_click(button_pressed)
w_previous_image_button = widgets.Button(description="Previous")
w_previous_image_button.value = "-1"
w_previous_image_button.layout = Layout(width="80px")
w_previous_image_button.on_click(button_pressed)
self.w_filename = widgets.Text(
value="", description="Name:", layout=Layout(width="200px")
)
self.w_path = widgets.Text(
value="", description="Path:", layout=Layout(width="200px")
)
self.w_image_slider = IntSlider(
min=0,
max=len(self.im_filenames) - 1,
step=1,
value=self.vis_image_index,
continuous_update=False,
)
self.w_image_slider.observe(slider_changed)
self.w_img = widgets.Image()
self.w_img.layout.width = f"{self.IM_WIDTH}px"
w_header = widgets.HBox(
children=[
w_previous_image_button,
w_next_image_button,
self.w_image_slider,
self.w_filename,
self.w_path,
]
)
# ------------
# UI - info (right side)
# ------------
# Options widgets
self.w_skip_annotated = widgets.Checkbox(
value=False, description="Skip annotated images."
)
self.w_multi_class = widgets.Checkbox(
value=False, description="Allow multi-class labeling"
)
# Label checkboxes widgets
self.exclude_widget = widgets.Checkbox(
value=False, description="EXCLUDE IMAGE"
)
self.exclude_widget.observe(anno_changed)
self.label_widgets = [
widgets.Checkbox(value=False, description=label)
for label in self.labels
]
for label_widget in self.label_widgets:
label_widget.observe(anno_changed)
# Combine UIs into tab widget
w_info = widgets.VBox(
children=[
widgets.HTML(value="Options:"),
self.w_skip_annotated,
self.w_multi_class,
widgets.HTML(value="Annotations:"),
self.exclude_widget,
*self.label_widgets,
]
)
w_info.layout.padding = "20px"
self.ui = widgets.Tab(
children=[
widgets.VBox(
children=[
w_header,
widgets.HBox(children=[self.w_img, w_info]),
]
)
]
)
self.ui.set_title(0, "Annotator")
# Fill UI with content
self.update_ui()
class ResultsWidget(object):
IM_WIDTH = 500 # pixels
def __init__(self, dataset: LabelList, y_score: np.ndarray, y_label: iter):
"""Helper class to draw and update Image classification results widgets.
Args:
dataset (LabelList): Data used for prediction, containing ImageList x and CategoryList y.
y_score (np.ndarray): Predicted scores.
y_label (iterable): Predicted labels. Note, not a true label.
"""
assert len(y_score) == len(y_label) == len(dataset)
self.dataset = dataset
self.pred_scores = y_score
self.pred_labels = y_label
# Init
self.vis_image_index = 0
self.labels = dataset.classes
self.label_to_id = {s: i for i, s in enumerate(self.labels)}
self._create_ui()
@staticmethod
def _list_sort(list1d, reverse=False, comparison_fn=lambda x: x):
"""Sorts list1f and returns (sorted list, list of indices)"""
indices = list(range(len(list1d)))
tmp = sorted(zip(list1d, indices), key=comparison_fn, reverse=reverse)
return list(map(list, list(zip(*tmp))))
def show(self):
return self.ui
def update(self):
scores = self.pred_scores[self.vis_image_index]
im = self.dataset.x[self.vis_image_index] # fastai Image object
_, sort_order = self._list_sort(scores, reverse=True)
pred_labels_str = ""
for i in sort_order:
pred_labels_str += f"{self.labels[i]} ({scores[i]:3.2f})\n"
self.w_pred_labels.value = str(pred_labels_str)
self.w_image_header.value = f"Image index: {self.vis_image_index}"
self.w_img.value = im._repr_png_()
# Fix the width of the image widget and adjust the height
self.w_img.layout.height = (
f"{int(self.IM_WIDTH * (im.size[0]/im.size[1]))}px"
)
self.w_gt_label.value = str(self.dataset.y[self.vis_image_index])
self.w_filename.value = str(
self.dataset.items[self.vis_image_index].name
)
self.w_path.value = str(
self.dataset.items[self.vis_image_index].parent
)
bqpyplot.clear()
bqpyplot.bar(
self.labels,
scores,
align="center",
alpha=1.0,
color=np.abs(scores),
scales={"color": bqplot.ColorScale(scheme="Blues", min=0)},
)
def _create_ui(self):
"""Create and initialize widgets"""
# ------------
# Callbacks + logic
# ------------
def image_passes_filters(image_index):
"""Return if image should be shown."""
actual_label = str(self.dataset.y[image_index])
bo_pred_correct = actual_label == self.pred_labels[image_index]
if (bo_pred_correct and self.w_filter_correct.value) or (
not bo_pred_correct and self.w_filter_wrong.value
):
return True
return False
def button_pressed(obj):
"""Next / previous image button callback."""
step = int(obj.value)
self.vis_image_index += step
self.vis_image_index = min(
max(0, self.vis_image_index), int(len(self.pred_labels)) - 1
)
while not image_passes_filters(self.vis_image_index):
self.vis_image_index += step
if (
self.vis_image_index <= 0
or self.vis_image_index >= int(len(self.pred_labels)) - 1
):
break
self.vis_image_index = min(
max(0, self.vis_image_index), int(len(self.pred_labels)) - 1
)
self.w_image_slider.value = self.vis_image_index
self.update()
def slider_changed(obj):
"""Image slider callback.
Need to wrap in try statement to avoid errors when slider value is not a number.
"""
try:
self.vis_image_index = int(obj["new"]["value"])
self.update()
except Exception:
pass
# ------------
# UI - image + controls (left side)
# ------------
w_next_image_button = widgets.Button(description="Next")
w_next_image_button.value = "1"
w_next_image_button.layout = Layout(width="80px")
w_next_image_button.on_click(button_pressed)
w_previous_image_button = widgets.Button(description="Previous")
w_previous_image_button.value = "-1"
w_previous_image_button.layout = Layout(width="80px")
w_previous_image_button.on_click(button_pressed)
self.w_filename = widgets.Text(
value="", description="Name:", layout=Layout(width="200px")
)
self.w_path = widgets.Text(
value="", description="Path:", layout=Layout(width="200px")
)
self.w_image_slider = IntSlider(
min=0,
max=len(self.pred_labels) - 1,
step=1,
value=self.vis_image_index,
continuous_update=False,
)
self.w_image_slider.observe(slider_changed)
self.w_image_header = widgets.Text("", layout=Layout(width="130px"))
self.w_img = widgets.Image()
self.w_img.layout.width = f"{self.IM_WIDTH}px"
w_header = widgets.HBox(
children=[
w_previous_image_button,
w_next_image_button,
self.w_image_slider,
self.w_filename,
self.w_path,
]
)
# ------------
# UI - info (right side)
# ------------
w_filter_header = widgets.HTML(
value="Filters (use Image +1/-1 buttons for navigation):"
)
self.w_filter_correct = widgets.Checkbox(
value=True, description="Correct classifications"
)
self.w_filter_wrong = widgets.Checkbox(
value=True, description="Incorrect classifications"
)
w_gt_header = widgets.HTML(value="Ground truth:")
self.w_gt_label = widgets.Text(value="")
self.w_gt_label.layout.width = "360px"
w_pred_header = widgets.HTML(value="Predictions:")
self.w_pred_labels = widgets.Textarea(value="")
self.w_pred_labels.layout.height = "200px"
self.w_pred_labels.layout.width = "360px"
w_scores_header = widgets.HTML(value="Classification scores:")
self.w_scores = bqpyplot.figure()
self.w_scores.layout.height = "250px"
self.w_scores.layout.width = "370px"
self.w_scores.fig_margin = {
"top": 5,
"bottom": 80,
"left": 30,
"right": 5,
}
# Combine UIs into tab widget
w_info = widgets.VBox(
children=[
w_filter_header,
self.w_filter_correct,
self.w_filter_wrong,
w_gt_header,
self.w_gt_label,
w_pred_header,
self.w_pred_labels,
w_scores_header,
self.w_scores,
]
)
w_info.layout.padding = "20px"
self.ui = widgets.Tab(
children=[
widgets.VBox(
children=[
w_header,
widgets.HBox(children=[self.w_img, w_info]),
]
)
]
)
self.ui.set_title(0, "Results viewer")
# Fill UI with content
self.update()
| [
"numpy.abs",
"utils_cv.common.data.get_files_in_directory",
"os.path.join",
"pandas.DataFrame",
"ipywidgets.widgets.Checkbox",
"utils_cv.common.image.im_width",
"os.path.exists",
"ipywidgets.Layout",
"ipywidgets.widgets.HBox",
"os.path.basename",
"ipywidgets.widgets.Button",
"pandas.Series",
... | [((2046, 2060), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2058, 2060), True, 'import pandas as pd\n'), ((2288, 2318), 'os.path.exists', 'os.path.exists', (['self.anno_path'], {}), '(self.anno_path)\n', (2302, 2318), False, 'import os\n'), ((2980, 3018), 'os.path.join', 'os.path.join', (['self.im_dir', 'im_filename'], {}), '(self.im_dir, im_filename)\n', (2992, 3018), False, 'import os\n'), ((7579, 7613), 'ipywidgets.widgets.Button', 'widgets.Button', ([], {'description': '"""Next"""'}), "(description='Next')\n", (7593, 7613), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((7691, 7711), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""80px"""'}), "(width='80px')\n", (7697, 7711), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((7799, 7837), 'ipywidgets.widgets.Button', 'widgets.Button', ([], {'description': '"""Previous"""'}), "(description='Previous')\n", (7813, 7837), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((7924, 7944), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""80px"""'}), "(width='80px')\n", (7930, 7944), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((8527, 8542), 'ipywidgets.widgets.Image', 'widgets.Image', ([], {}), '()\n', (8540, 8542), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((8618, 8743), 'ipywidgets.widgets.HBox', 'widgets.HBox', ([], {'children': '[w_previous_image_button, w_next_image_button, self.w_image_slider, self.\n w_filename, self.w_path]'}), '(children=[w_previous_image_button, w_next_image_button, self.\n w_image_slider, self.w_filename, self.w_path])\n', (8630, 8743), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((8994, 9061), 'ipywidgets.widgets.Checkbox', 'widgets.Checkbox', ([], {'value': '(False)', 'description': '"""Skip annotated images."""'}), "(value=False, description='Skip annotated images.')\n", (9010, 9061), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((9113, 9184), 'ipywidgets.widgets.Checkbox', 'widgets.Checkbox', ([], {'value': '(False)', 'description': '"""Allow multi-class labeling"""'}), "(value=False, description='Allow multi-class labeling')\n", (9129, 9184), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((9273, 9331), 'ipywidgets.widgets.Checkbox', 'widgets.Checkbox', ([], {'value': '(False)', 'description': '"""EXCLUDE IMAGE"""'}), "(value=False, description='EXCLUDE IMAGE')\n", (9289, 9331), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((12609, 12625), 'bqplot.pyplot.clear', 'bqpyplot.clear', ([], {}), '()\n', (12623, 12625), True, 'import bqplot.pyplot as bqpyplot\n'), ((14723, 14757), 'ipywidgets.widgets.Button', 'widgets.Button', ([], {'description': '"""Next"""'}), "(description='Next')\n", (14737, 14757), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((14835, 14855), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""80px"""'}), "(width='80px')\n", (14841, 14855), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((14943, 14981), 'ipywidgets.widgets.Button', 'widgets.Button', ([], {'description': '"""Previous"""'}), "(description='Previous')\n", (14957, 14981), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((15068, 15088), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""80px"""'}), "(width='80px')\n", (15074, 15088), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((15748, 15763), 'ipywidgets.widgets.Image', 'widgets.Image', ([], {}), '()\n', (15761, 15763), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((15838, 15963), 'ipywidgets.widgets.HBox', 'widgets.HBox', ([], {'children': '[w_previous_image_button, w_next_image_button, self.w_image_slider, self.\n w_filename, self.w_path]'}), '(children=[w_previous_image_button, w_next_image_button, self.\n w_image_slider, self.w_filename, self.w_path])\n', (15850, 15963), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((16182, 16253), 'ipywidgets.widgets.HTML', 'widgets.HTML', ([], {'value': '"""Filters (use Image +1/-1 buttons for navigation):"""'}), "(value='Filters (use Image +1/-1 buttons for navigation):')\n", (16194, 16253), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((16308, 16375), 'ipywidgets.widgets.Checkbox', 'widgets.Checkbox', ([], {'value': '(True)', 'description': '"""Correct classifications"""'}), "(value=True, description='Correct classifications')\n", (16324, 16375), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((16428, 16497), 'ipywidgets.widgets.Checkbox', 'widgets.Checkbox', ([], {'value': '(True)', 'description': '"""Incorrect classifications"""'}), "(value=True, description='Incorrect classifications')\n", (16444, 16497), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((16543, 16578), 'ipywidgets.widgets.HTML', 'widgets.HTML', ([], {'value': '"""Ground truth:"""'}), "(value='Ground truth:')\n", (16555, 16578), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((16605, 16627), 'ipywidgets.widgets.Text', 'widgets.Text', ([], {'value': '""""""'}), "(value='')\n", (16617, 16627), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((16700, 16734), 'ipywidgets.widgets.HTML', 'widgets.HTML', ([], {'value': '"""Predictions:"""'}), "(value='Predictions:')\n", (16712, 16734), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((16764, 16790), 'ipywidgets.widgets.Textarea', 'widgets.Textarea', ([], {'value': '""""""'}), "(value='')\n", (16780, 16790), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((16919, 16963), 'ipywidgets.widgets.HTML', 'widgets.HTML', ([], {'value': '"""Classification scores:"""'}), "(value='Classification scores:')\n", (16931, 16963), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((16988, 17005), 'bqplot.pyplot.figure', 'bqpyplot.figure', ([], {}), '()\n', (17003, 17005), True, 'import bqplot.pyplot as bqpyplot\n'), ((17296, 17487), 'ipywidgets.widgets.VBox', 'widgets.VBox', ([], {'children': '[w_filter_header, self.w_filter_correct, self.w_filter_wrong, w_gt_header,\n self.w_gt_label, w_pred_header, self.w_pred_labels, w_scores_header,\n self.w_scores]'}), '(children=[w_filter_header, self.w_filter_correct, self.\n w_filter_wrong, w_gt_header, self.w_gt_label, w_pred_header, self.\n w_pred_labels, w_scores_header, self.w_scores])\n', (17308, 17487), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((9447, 9495), 'ipywidgets.widgets.Checkbox', 'widgets.Checkbox', ([], {'value': '(False)', 'description': 'label'}), '(value=False, description=label)\n', (9463, 9495), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((1368, 1387), 'os.path.basename', 'os.path.basename', (['s'], {}), '(s)\n', (1384, 1387), False, 'import os\n'), ((2195, 2238), 'pandas.Series', 'pd.Series', (["{'exclude': False, 'labels': []}"], {}), "({'exclude': False, 'labels': []})\n", (2204, 2238), True, 'import pandas as pd\n'), ((8093, 8114), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""200px"""'}), "(width='200px')\n", (8099, 8114), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((8211, 8232), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""200px"""'}), "(width='200px')\n", (8217, 8232), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((12762, 12776), 'numpy.abs', 'np.abs', (['scores'], {}), '(scores)\n', (12768, 12776), True, 'import numpy as np\n'), ((15237, 15258), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""200px"""'}), "(width='200px')\n", (15243, 15258), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((15355, 15376), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""200px"""'}), "(width='200px')\n", (15361, 15376), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((15704, 15725), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""130px"""'}), "(width='130px')\n", (15710, 15725), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((1413, 1525), 'utils_cv.common.data.get_files_in_directory', 'get_files_in_directory', (['im_dir'], {'suffixes': "('.jpg', '.jpeg', '.tif', '.tiff', '.gif', '.giff', '.png', '.bmp')"}), "(im_dir, suffixes=('.jpg', '.jpeg', '.tif', '.tiff',\n '.gif', '.giff', '.png', '.bmp'))\n", (1435, 1525), False, 'from utils_cv.common.data import get_files_in_directory\n'), ((9747, 9777), 'ipywidgets.widgets.HTML', 'widgets.HTML', ([], {'value': '"""Options:"""'}), "(value='Options:')\n", (9759, 9777), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((9870, 9904), 'ipywidgets.widgets.HTML', 'widgets.HTML', ([], {'value': '"""Annotations:"""'}), "(value='Annotations:')\n", (9882, 9904), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((12807, 12847), 'bqplot.ColorScale', 'bqplot.ColorScale', ([], {'scheme': '"""Blues"""', 'min': '(0)'}), "(scheme='Blues', min=0)\n", (12824, 12847), False, 'import bqplot\n'), ((3334, 3352), 'utils_cv.common.image.im_height', 'im_height', (['im_path'], {}), '(im_path)\n', (3343, 3352), False, 'from utils_cv.common.image import im_width, im_height\n'), ((3353, 3370), 'utils_cv.common.image.im_width', 'im_width', (['im_path'], {}), '(im_path)\n', (3361, 3370), False, 'from utils_cv.common.image import im_width, im_height\n'), ((10216, 10259), 'ipywidgets.widgets.HBox', 'widgets.HBox', ([], {'children': '[self.w_img, w_info]'}), '(children=[self.w_img, w_info])\n', (10228, 10259), False, 'from ipywidgets import widgets, Layout, IntSlider\n'), ((17871, 17914), 'ipywidgets.widgets.HBox', 'widgets.HBox', ([], {'children': '[self.w_img, w_info]'}), '(children=[self.w_img, w_info])\n', (17883, 17914), False, 'from ipywidgets import widgets, Layout, IntSlider\n')] |
"""
tanh
~~~~
Plots a graph of the tanh function."""
import numpy as np
import matplotlib.pyplot as plt
z = np.arange(-5, 5, .1)
t = np.tanh(z)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(z, t)
ax.set_ylim([-1.0, 1.0])
ax.set_xlim([-5,5])
ax.grid(True)
ax.set_xlabel('z')
ax.set_title('tanh function')
plt.show()
| [
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.tanh",
"matplotlib.pyplot.show"
] | [((120, 141), 'numpy.arange', 'np.arange', (['(-5)', '(5)', '(0.1)'], {}), '(-5, 5, 0.1)\n', (129, 141), True, 'import numpy as np\n'), ((146, 156), 'numpy.tanh', 'np.tanh', (['z'], {}), '(z)\n', (153, 156), True, 'import numpy as np\n'), ((166, 178), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (176, 178), True, 'import matplotlib.pyplot as plt\n'), ((337, 347), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (345, 347), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# Import the TensorFlow and output the verion
get_ipython().system('pip install tensorflow==1.14.0')
import tensorflow as tf
print("\n\nTensorFlow version:", tf.__version__)
# In[2]:
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10
# In[3]:
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
# In[4]:
# By default, the tf.layers.dense() function uses Xavier initialization (with uniform distribution)
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
# In[5]:
# You can change this to He initialization by using the variance_scaling_initializer() function like this:
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
he_init = tf.contrib.layers.variance_scaling_initializer()
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu,
kernel_initializer=he_init, name="hidden1")
# In[6]:
# TensorFlow offers an elu() function that you can use to build your neural network. Simply set the activation
# argument when calling the dense() function like this:
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.elu, name="hidden1")
# In[7]:
# TensorFlow does not have a predefined function for leaky ReLUs, but it is easy to define:
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
def leaky_relu(z, name=None):
return tf.maximum(0.01 * z, z, name=name)
hidden1 = tf.layers.dense(X, n_hidden1, activation=leaky_relu, name="hidden1")
# # Implementing Batch Normalization with TensorFlow
# Another way to reduce the problem of vanishing/exploding gradients is to use **batch normalization**.
# TensorFlow provides a **tf.nn.batch_normalization()** function that simply centers and normalizes the inputs, but
# you must compute the mean and standard deviation yourself and pass them as parameters to this function, and you must also handle the creation of the scaling and offset parameters (and pass them to this function). Another option is to use the **tf.layers.batch_normalization()** function, which handles all this for you as in the following code:
# In[8]:
tf.reset_default_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
training = tf.placeholder_with_default(False, shape=(), name="training")
hidden1 = tf.layers.dense(X, n_hidden1, name="hidden1")
bn1 = tf.layers.batch_normalization(hidden1, training=training, momentum=0.9)
bn1_act = tf.nn.elu(bn1)
hidden2 = tf.layers.dense(bn1_act, n_hidden2, name="hidden2")
bn2 = tf.layers.batch_normalization(hidden2, training=training, momentum=0.9)
bn2_act = tf.nn.elu(bn2)
logits_before_bn = tf.layers.dense(bn2_act, n_outputs, name="outputs")
logits = tf.layers.batch_normalization(logits_before_bn, training=training, momentum=0.9)
# ###### The code is quite repetitive, with the same batch normalization parameters appearing over and over again. To avoid this repetition, you can use the partial() function from the functools module. It creates a thin wrapper aroud a function and allows you to define default values for some parameters. The creation of the network layers in the preceding code can be modified as follows:
# In[9]:
tf.reset_default_graph()
from functools import partial
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
training = tf.placeholder_with_default(False, shape=(), name="training")
my_batch_norm_layer = partial(tf.layers.batch_normalization, training=training, momentum=0.9)
hidden1 = tf.layers.dense(X, n_hidden1, name="hidden1")
bn1 = my_batch_norm_layer(hidden1)
bn1_act = tf.nn.elu(bn1)
hidden2 = tf.layers.dense(bn1_act, n_hidden2, name="hidden2")
bn2 = my_batch_norm_layer(hidden2)
bn2_act = tf.nn.elu(bn2)
logits_before_bn = tf.layers.dense(bn2_act, n_outputs, name="outputs")
logits = my_batch_norm_layer(logits_before_bn)
# #### Let's build a neural net for MNIST, using the ELU activation function and Batch Normalization at each layer:
# In[10]:
tf.reset_default_graph()
from functools import partial
batch_norm_momentum = 0.9
learning_rate = 0.01
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
training = tf.placeholder_with_default(False, shape=(), name="training")
with tf.name_scope("dnn"):
he_init = tf.contrib.layers.variance_scaling_initializer()
my_batch_norm_layer = partial(tf.layers.batch_normalization, training=training, momentum=batch_norm_momentum)
my_dense_layer = partial(tf.layers.dense, kernel_initializer=he_init)
hidden1 = my_dense_layer(X, n_hidden1, name="hidden1")
bn1 = tf.nn.elu(my_batch_norm_layer(hidden1))
hidden2 = my_dense_layer(bn1, n_hidden2, name="hidden2")
bn2 = tf.nn.elu(my_batch_norm_layer(hidden2))
logits_before_bn = my_dense_layer(bn2, n_outputs, name="outputs")
logits = my_batch_norm_layer(logits_before_bn)
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# In[11]:
n_epochs = 40
batch_size = 64
# In[6]:
import numpy as np
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data()
X_train = X_train.astype(np.float32).reshape(-1, 28*28) / 255.0
X_test = X_test.astype(np.float32).reshape(-1, 28*28) / 255.0
y_train = y_train.astype(np.int32)
y_test = y_test.astype(np.int32)
X_valid, X_train = X_train[:5000], X_train[5000:]
y_valid, y_train = y_train[:5000], y_train[5000:]
# In[13]:
def shuffle_batch(X, y, batch_size):
rnd_idx = np.random.permutation(len(X))
n_batches = len(X) // batch_size
for batch_idx in np.array_split(rnd_idx, n_batches):
X_batch, y_batch = X[batch_idx], y[batch_idx]
yield X_batch, y_batch
# In[14]:
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run([training_op, extra_update_ops], feed_dict={training: True, X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_model_final.ckpt")
# # Gradient Clipping
# A popular technique to lessen the exploding gradients problem is to simply clip the gradients during backpropagation so they never exceed some threshold. This is called **Gradient Clipping**. In TensorFlow, the optimizer's minimize() function takes care of both computing the gradients and applying them, so you must instead call the optimizer's **compute_gradients()** methods first, then create an operation to clip the gradients using the **clip_by_value()** function, and finally create an operation to apply the clipped gradients using the optimizer's **apply_gradients()** method:
# In[31]:
tf.reset_default_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 50
n_hidden3 = 50
n_hidden4 = 50
n_hidden5 = 50
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=None, name= "y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, name="hidden2")
hidden3 = tf.layers.dense(hidden2, n_hidden3, activation=tf.nn.relu, name="hidden3")
hidden4 = tf.layers.dense(hidden3, n_hidden4, activation=tf.nn.relu, name="hidden4")
hidden5 = tf.layers.dense(hidden4, n_hidden5, activation=tf.nn.relu, name="hidden5")
logits = tf.layers.dense(hidden5, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
# In[32]:
learning_rate = 0.01
threshold = 1.0
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
grads_and_vars = optimizer.compute_gradients(loss)
capped_gvs = [(tf.clip_by_value(grad, -threshold, threshold), var) for grad, var in grads_and_vars]
training_op = optimizer.apply_gradients(capped_gvs)
# It will compute the gradients, clip them between -1.0 and 1.0, and apply them.
# In[33]:
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
# In[34]:
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# In[35]:
n_epochs = 40
batch_size = 64
# In[36]:
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_model_final.ckpt")
# # Reusing Pretrained Layers
# ## Reusing a TensorFlow Model
# If the original model was trained using TensorFlow, you can simply restore it and train it on the new task. You can use the **import_meta_graph()** function to import the operations into the default graph. This returns a **Saver** that you can use later to load the model's state (i.e., the variable values):
# In[37]:
tf.reset_default_graph()
saver = tf.train.import_meta_graph("./my_model_final.ckpt.meta")
# In[38]:
# To list all the operations you can use the graph's get_operation() method:
for op in tf.get_default_graph().get_operations():
print(op.name)
# In[39]:
# Once you know which operations you need, you can get a handle on them using the graph's get_operation_by_name()
# and get_tensor_by_name() methods:
X = tf.get_default_graph().get_tensor_by_name("X:0")
y = tf.get_default_graph().get_tensor_by_name("y:0")
accuracy = tf.get_default_graph().get_tensor_by_name("eval/accuracy:0")
training_op = tf.get_default_graph().get_operation_by_name("train/GradientDescent")
# **Note:** Name of tensor is the name of the operation that outputs it followed by :0(or :1 if it is the second output, :2 if it is the third, and so on).
# In[40]:
# If you are the author of the original model, you could make things easier for people who will reuse your model
# by giving operations very clear names and documenting them. Another approach is to create a collection containing
# all the important operations that will want to get a handle on:
for op in (X, y, accuracy, training_op):
tf.add_to_collection("my_important_ops", op)
# In[41]:
# This way people who will reuse your model will be able to simple write:
X, y, accuracy, training_op = tf.get_collection("my_important_ops")
# In[42]:
# You can then restore the model's state using the Saver and continue training using your own data:
with tf.Session() as sess:
saver.restore(sess, "./my_model_final.ckpt")
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_new_model_final.ckpt")
# In general you will want to reuse only the lower layers. If you are using **import_meta_graph()** it will load the whole graph, but you can simply ignore the parts you do not need. In this example, we add a new 4th hidden layer on top of the pretrained 3rd layer (ignoring the old 4th hidden layer). We also build a new output layer, the loss for this new output, and a new optimizer to minimize it. We also need another saver to save the whole graph (containing both the entire old graph plus the new operations), and an initialization operation to initialize all the new variables:
# In[43]:
tf.reset_default_graph()
n_hidden4 = 20
n_outputs = 10
saver = tf.train.import_meta_graph("./my_model_final.ckpt.meta")
X = tf.get_default_graph().get_tensor_by_name("X:0")
y = tf.get_default_graph().get_tensor_by_name("y:0")
hidden3 = tf.get_default_graph().get_tensor_by_name("dnn/hidden3/Relu:0")
new_hidden4 = tf.layers.dense(hidden3, n_hidden4, activation=tf.nn.relu, name="new_hidden4")
new_logits = tf.layers.dense(new_hidden4, n_outputs, name="new_outputs")
with tf.name_scope("new_loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=new_logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("new_eval"):
correct = tf.nn.in_top_k(new_logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
with tf.name_scope("new_train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
new_saver = tf.train.Saver()
# In[44]:
# And we can train this model:
with tf.Session() as sess:
init.run()
saver.restore(sess, "./my_model_final.ckpt")
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = new_saver.save(sess, "./my_new_model_final.ckpt")
# If you have access to the Python code that built the original graph, you can just reuse the parts you need and drop the rest:
# In[45]:
tf.reset_default_graph()
n_inputs = 28 * 28
n_hidden1 = 300 # reused
n_hidden2 = 50 # reused
n_hidden3 = 50 # reused
n_hidden4 = 20 # new
n_outputs = 10 # new
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, name="hidden2")
hidden3 = tf.layers.dense(hidden2, n_hidden3, activation=tf.nn.relu, name="hidden3")
hidden4 = tf.layers.dense(hidden3, n_hidden4, activation=tf.nn.relu, name="hidden4")
logits = tf.layers.dense(hidden4, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
# However, you must create one **Saver** to restore the pretrained model (giving it the list of variables to restore, or else it will complain that the graphs don't match), and another **Saver** to save the new model, once it is trained:
# In[46]:
reuse_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="hidden[123]")
restore_saver = tf.train.Saver(reuse_vars) # To restore layers 1-3
init = tf.global_variables_initializer() # To init all variables, old and new
saver = tf.train.Saver()
with tf.Session() as sess:
init.run()
restore_saver.restore(sess, "./my_model_final.ckpt")
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_new_model_final.ckpt")
# # Reusing Models from Other Frameworks
# If the model was trained using another framework, you will need to load the model parameters manually, then assign them to the appropriate variables.
# In[47]:
tf.reset_default_graph()
n_inputs = 2
n_hidden1 = 3
original_W = [[1., 2., 3.], [4., 5., 6.]]
original_b = [7., 8., 9.]
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
# [...] Build the rest of the model
# Get a handle on the assignment nodes for the hidden1 variables
graph = tf.get_default_graph()
assign_kernel = graph.get_operation_by_name("hidden1/kernel/Assign")
assign_bias = graph.get_operation_by_name("hidden1/bias/Assign")
init_kernel = assign_kernel.inputs[1]
init_bias = assign_bias.inputs[1]
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init, feed_dict={init_kernel: original_W, init_bias: original_b})
# [...] Train the model on your new task
print(hidden1.eval(feed_dict={X: [[10.0, 11.0]]}))
# the weights variable created by the tf.layers.dense() function is called "kernel" (instead of "weights" when
# using the tf.contrib.layers.fully_connected(), as in the book), and the biases variable is called bias instead
# of biases.
# # Freezing the Lower Layers
# It is likely that the lower layers of the first DNN have learned to detect low-level features up in pictures that will be useful across both image classification tasks, so you can just reuse these layers as they are. It is generally a good idea to "freeze" their weights when training the new DNN: if the lower-layer weights are fixed, then the higher-layer weights will be easier to train.
# In[50]:
# To freeze the lower layers during training, one solution is to give the optimizer the lost of variables to
# train, excluding the variables from the lower layers:
tf.reset_default_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 50
n_hidden3 = 50
n_hidden4 = 20
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=None, name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, name="hidden2")
hidden3 = tf.layers.dense(hidden2, n_hidden3, activation=tf.nn.relu, name="hidden3")
hidden4 = tf.layers.dense(hidden3, n_hidden4, activation=tf.nn.relu, name="hidden4")
logits = tf.layers.dense(hidden4, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="hidden[34]|outputs")
training_op = optimizer.minimize(loss, var_list=train_vars)
reuse_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope="hidden[123]") # regular expression
restore_saver = tf.train.Saver(reuse_vars) # to restore layers 1-3
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
init.run()
restore_saver.restore(sess, "./my_model_final.ckpt")
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_new_model_final.ckpt")
# In[51]:
# Another option is to add a stop_gradient() layer in the graph. Any layer below it will be frozen:
tf.reset_default_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 50
n_hidden3 = 50
n_hidden4 = 20
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=None, name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, name="hidden2")
hidden2_stop = tf.stop_gradient(hidden2)
hidden3 = tf.layers.dense(hidden2_stop, n_hidden3, activation=tf.nn.relu, name="hidden3")
hidden4 = tf.layers.dense(hidden3, n_hidden4, activation=tf.nn.relu, name="hidden4")
logits = tf.layers.dense(hidden4, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
reuse_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="hidden[123]")
restore_saver = tf.train.Saver(reuse_vars) # to restore layers 1-3
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
init.run()
restore_saver.restore(sess, "./my_model_final.ckpt")
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_new_model_final.ckpt")
# # Caching the Frozen Layers
# Since the frozen layers won't change then during training, instead of building batches of training instances, it would give a huge boost to training to build batches of outputs from hidden layerr 2 and feed them to the training operation:
# In[61]:
import numpy as np
tf.reset_default_graph()
n_inputs = 28 * 28 # MNIST
n_hidden1 = 300 # reused
n_hidden2 = 50 # reused
n_hidden3 = 50 # reused
n_hidden4 = 20 # new!
n_outputs = 10 # new!
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1") # reused frozen
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, name="hidden2") # reused frozen & cached
hidden2_stop = tf.stop_gradient(hidden2)
hidden3 = tf.layers.dense(hidden2_stop, n_hidden3, activation=tf.nn.relu, name="hidden3") # reused, not frozen
hidden4 = tf.layers.dense(hidden3, n_hidden4, activation=tf.nn.relu, name="hidden4") # new!
logits = tf.layers.dense(hidden4, n_outputs, name="outputs") # new!
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
reuse_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="hidden[123]") # regular expression
restore_saver = tf.train.Saver(reuse_vars) # to restore layers 1-3
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_batches = len(X_train) // batch_size
with tf.Session() as sess:
init.run()
restore_saver.restore(sess, "./my_model_final.ckpt")
h2_cache = sess.run(hidden2, feed_dict={X: X_train})
h2_cache_valid = sess.run(hidden2, feed_dict={X: X_valid})
for epoch in range(n_epochs):
shuffled_idx = np.random.permutation(mnist.train.num_examples)
hidden2_batches = np.array_split(h2_cache[shuffled_idx], n_batches)
y_batches = np.array_split(mnist.train.labels[shuffled_idx], n_batches)
for hidden2_batch, y_batch in zip(hidden2_batches, y_batches):
sess.run(training_op, feed_dict={hidden2: hidden2_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={hidden2: h2_cache_valid, y: y_valid}) # not shown
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_new_model_final.ckpt")
# # Learning Rate Scheduling
# If you start with a high learning rate and then reduce it once it stops making fast progress, you can reach a good solution faster than with the optimal constant learning rate. There are many different strategies to reduce the learning rate during training. These strategies are called learning schedules. We are implementing exponential scheduling here:
# In[85]:
tf.reset_default_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 50
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=None, name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, name="hidden2")
logits = tf.layers.dense(hidden2, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
with tf.name_scope("train"):
# The learning rate will drop by a factor of 10 (decay_rate) every 10000 (decay_steps) steps.
initial_learning_rate = 0.1
decay_steps = 10000
decay_rate = 1/10
global_step = tf.Variable(0, trainable=False, name="global_step")
learning_rate = tf.train.exponential_decay(initial_learning_rate, global_step, decay_steps, decay_rate)
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss, global_step=global_step)
# After setting the hyperparameter values, we create a nontrainable variable global_step (initialized to 0) to keep
# track of the current training iteration number.
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# In[86]:
n_epochs = 20
batch_size = 64
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_model_final.ckpt")
# # L1 and L2 regularization
# We can also use l1 and l2 regularization to constrain a neural network's connection weights (but typically not its biases).
# In[87]:
# For example, assuming you have just one hidden layer with weights W1 and one output layer with weights W2, then
# you can apply L1 regularization like this:
tf.reset_default_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
logits = tf.layers.dense(hidden1, n_outputs, name="outputs")
# In[89]:
W1 = tf.get_default_graph().get_tensor_by_name("hidden1/kernel:0")
W2 = tf.get_default_graph().get_tensor_by_name("outputs/kernel:0")
scale = 0.001 # L1 regularization parameter
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
base_loss = tf.reduce_mean(xentropy, name="avg_xentropy")
reg_losses = tf.reduce_sum(tf.abs(W1)) + tf.reduce_sum(tf.abs(W2))
loss = tf.add(base_loss, scale * reg_losses, name="loss")
# In[90]:
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
learning_rate = 0.01
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 20
batch_size = 64
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_model_final.ckpt")
# TensorFlow provides a better option when there are many layers. Many function that create variables (such as **get_variable()** or **tf.layers.dense()**) accept a ***_regularizer** argument for each created variable.You can pass any function that takes weights as an argument and returns the corresponding regularization loss.
# In[101]:
tf.reset_default_graph()
n_inputs = 28 * 28 # MNIST
n_hidden1 = 300
n_hidden2 = 50
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
# This code creates a neural network with two hidden layers and one output layer, and it also creates nodes in the graph to compute the L1 regularization loss corresponding to each layer's weights. TensorFlwo automatically add these nodes to a special collection containing all the regularization losses.
# In[102]:
scale = 0.001
my_dense_layer = partial(tf.layers.dense, activation=tf.nn.relu,
kernel_regularizer=tf.contrib.layers.l1_regularizer(scale))
with tf.name_scope("dnn"):
hidden1 = my_dense_layer(X, n_hidden1, name="hidden1")
hidden2 = my_dense_layer(hidden1, n_hidden2, name="hidden2")
logits = my_dense_layer(hidden2, n_outputs, activation=None, name="outputs")
# In[103]:
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
base_loss = tf.reduce_mean(xentropy, name="avg_xentropy")
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss = tf.add_n([base_loss] + reg_losses, name="loss")
# In[104]:
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
learning_rate = 0.01
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 20
batch_size = 64
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_model_final.ckpt")
# # Dropout
# To implement dropout using TensorFlow, you can simply apply the **tf.layers.dropout()** function to the input layer and/or to the output of any hidden layer you want. During training, this function randomly drops some items (setting them to 0) and divides the remaining items by the keep probability. After training, this function does nothing at all. The following code applies dropout regularization to our three-layer neural network:
# In[108]:
tf.reset_default_graph()
n_inputs = 28 * 28 # MNIST
n_hidden1 = 300
n_hidden2 = 50
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
# In[109]:
training = tf.placeholder_with_default(False, shape=(), name="training")
dropout_rate = 0.5
X_drop = tf.layers.dropout(X, dropout_rate, training=training)
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
hidden1_drop = tf.layers.dropout(hidden1, dropout_rate, training=training)
hidden2 = tf.layers.dense(hidden1_drop, n_hidden2, activation=tf.nn.relu, name="hidden2")
hidden2_drop = tf.layers.dropout(hidden2, dropout_rate, training=training)
logits = tf.layers.dense(hidden2_drop, n_outputs, name="outputs")
# In[110]:
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("train"):
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# In[111]:
# We need to set training to True only when training, and leave the default False value when testing
n_epochs = 20
batch_size = 64
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch, training: True})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_model_final.ckpt")
# **Note1:** If you observe that the model is overfitting, you can increase the dropout rate. Conversely, you should try decreasing the dropout rate if the model underfits the training set. <br>
# **Note2:** Dropconnect is a variant of dropout where individual conenctions are dropped randomly rather than whole neurons. In general dropout performs better.
# # Max-Norm Regularization
# - Another regularization technique that is quite popular for neural networks is called max-norm regularization: for each neuron, it constrains the weights **w** of the incoming connections such that ||**w**||$_{2}$ $\leq$ r, where r is the max-norm hyperparameter and ||.||$_{2}$ is the l2 norm.<br>
# - Reducing r increases the amount of regularization and helps reduce overfitting. Max-norm regularization can also help alleviate the vanishing/exploding gradients problems (if you are not using Batch Normalization). <br>
#
#
# In[118]:
tf.reset_default_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 50
n_outputs = 10
learning_rate = 0.01
momentum = 0.9
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, name="hidden2")
logits = tf.layers.dense(hidden2, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("train"):
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum)
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
# TensorFlow does not provide an off-the-shelf max-norm regularizer, but it is not too hard to implement. The following code gets a handle on the weights of the first and second hidden layer, then it uses the **clip_by_norm()** function to create an operation that will clip the weights along the second axis so that each row vector ends up with a maximum norm of 1.0. The last line creates an assignment operation that will assign the clipped weights to the weights variable:
# In[119]:
threshold = 1.0
weights = tf.get_default_graph().get_tensor_by_name("hidden1/kernel:0")
clipped_weights = tf.clip_by_norm(weights, clip_norm=threshold, axes=1)
clip_weights = tf.assign(weights, clipped_weights)
# In[120]:
weights2 = tf.get_default_graph().get_tensor_by_name("hidden2/kernel:0")
clipped_weights2 = tf.clip_by_norm(weights, clip_norm=threshold, axes=1)
clip_weights2 = tf.assign(weights, clipped_weights)
# In[121]:
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# In[122]:
n_epochs = 20
batch_size = 64
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
clip_weights.eval()
clip_weights2.eval()
acc_valid = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", acc_valid)
save_path = saver.save(sess, "./my_model_final.ckpt")
# When we want to do this for every hidden layer, we can create a **max_norm_regularizer()** function and use it just like the earlier **l1_regularizer()** function:
# In[129]:
tf.reset_default_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 50
n_outputs = 10
learning_rate = 0.01
momentum = 0.9
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
# In[130]:
def max_norm_regularizer(threshold, axes=1, name="max_norm", collection="max_norm"):
def max_norm(weights):
clipped = tf.clip_by_norm(weights, clip_norm=threshold, axes=axes)
clip_weights = tf.assign(weights, clipped, name=name)
tf.add_to_collection(collection, clip_weights)
return None # there is no regularization loss term
return max_norm
# This function returns a parametrized **max_norm()** function that you can use like any other regularizer:
# In[131]:
max_norm_reg = max_norm_regularizer(threshold=1.0)
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_inputs, activation=tf.nn.relu, kernel_regularizer=max_norm_reg, name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, kernel_regularizer=max_norm_reg, name="hidden2")
logits = tf.layers.dense(hidden2, n_outputs, name="outputs")
# In[132]:
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("train"):
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum)
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# In[134]:
n_epochs = 20
batch_size = 64
# In[135]:
clip_all_weights = tf.get_collection("max_norm")
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
sess.run(clip_all_weights)
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_model_final.ckpt")
# **Exercise:** Build a DNN with five hidden layers of 100 neurons each, He initialization, and the ELU activation function.
# In[17]:
tf.reset_default_graph()
n_inputs = 28 * 28
n_hidden1 = 100
n_hidden2 = 100
n_hidden3 = 100
n_hidden4 = 100
n_hidden5 = 100
n_outputs = 5
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=None, name="y")
he_init = tf.variance_scaling_initializer()
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.elu, kernel_initializer=he_init, name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.elu, kernel_initializer=he_init, name="hidden2")
hidden3 = tf.layers.dense(hidden2, n_hidden3, activation=tf.nn.elu, kernel_initializer=he_init, name="hidden3")
hidden4 = tf.layers.dense(hidden3, n_hidden4, activation=tf.nn.elu, kernel_initializer=he_init, name="hidden4")
hidden5 = tf.layers.dense(hidden4, n_hidden5, activation=tf.nn.elu, kernel_initializer=he_init, name="hidden5")
logits = tf.layers.dense(hidden5, n_outputs, kernel_initializer=he_init, name="outputs")
Y_proba = tf.nn.softmax(logits, name="Y_proba")
# **Exercise:** Using Adam optimization and early stopping, try training it on MNIST but only on digits 0 to 4, as we will use transfer learning for digits 5 to 9 in the next exercise. You will need a softmax output layer with five neurons, and as always make sure to save checkpoints at regular intervals and save the final model so you can reuse it later.
# In[18]:
learning_rate = 0.01
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss, name="training_op")
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# Now let's create the training set, validation and test set (we need the validation set to implement early stopping):
# In[19]:
X_train1 = X_train[y_train < 5]
y_train1 = y_train[y_train < 5]
X_valid1 = X_valid[y_valid < 5]
y_valid1 = y_valid[y_valid < 5]
X_test1 = X_test[y_test < 5]
y_test1 = y_test[y_test < 5]
# In[20]:
n_epochs = 1000
batch_size = 32
max_checks_without_progress = 20
checks_without_progress = 0
best_loss = np.infty
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
rnd_idx = np.random.permutation(len(X_train1))
for rnd_indices in np.array_split(rnd_idx, len(X_train1) // batch_size):
X_batch, y_batch = X_train1[rnd_indices], y_train1[rnd_indices]
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
loss_val, acc_val = sess.run([loss, accuracy], feed_dict={X: X_valid1, y: y_valid1})
if loss_val < best_loss:
save_path = saver.save(sess, "./my_mnist_model_0_to_4.ckpt")
best_loss = loss_val
checks_without_progress = 0
else:
checks_without_progress += 1
if checks_without_progress > max_checks_without_progress:
print("Early stopping!")
break
print("{}\tValidation loss: {:.6f}\tBest loss: {:.6f}\t Accuracy: {:.2f}%".format(epoch, loss_val, best_loss, acc_val * 100))
with tf.Session() as sess:
saver.restore(sess, "./my_mnist_model_0_to_4.ckpt")
acc_test = accuracy.eval(feed_dict={X: X_test1, y: y_test1})
print("Final test accuracy: {:.2f}%".format(acc_test * 100))
# **Exercise:** Tune the hyperparameters using cross-validation and see what precision you can achieve.
#
# Let's create a DNNClassifier class, compatible with Scikit-Learn's RandomizedSearchCV class, to perform hyperparameter tuning. Here are the key points of this implementation:
# - the **\_\_init\_\_()** method (constructor) does nothing more than create instance variables for each of the hyperparameters.
# - the **fit()** method creates the graph, starts a session and trains the model:
# - it calls the _build_graph() method to build the graph (much like the graph we defined earlier). Once this method is done creating the graph, it saves all the important operations as instance variables for easy access by other methods.
# - the _dnn() method builds the hidden layers, just like the dnn() function above, but also with support for batch normalization and dropout (for the next exercises).
# - if the fit() method is given a validation set (X_valid and y_valid), then it implements early stopping. This implementation does not save the best model to disk, but rather to memory: it uses the _get_model_params() method to get all the graph's variables and their values, and the _restore_model_params() method to restore the variable values (of the best model found). This trick helps speed up training.
# - After the fit() method has finished training the model, it keeps the session open so that predictions can be made quickly, without having to save a model to disk and restore it for every prediction. You can close the session by calling the close_session() method.
# - the **predict_proba()** method uses the trained model to predict the class probabilities.
# - the **predict()** method calls predict_proba() and returns the class with the highest probability, for each instance.
# In[21]:
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.exceptions import NotFittedError
class DNNClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, n_hidden_layers=5, n_neurons=100, optimizer_class=tf.train.AdamOptimizer,
learning_rate=0.01, batch_size=32, activation=tf.nn.elu, initializer=he_init,
batch_norm_momentum=None, dropout_rate=None, random_state=None):
"""Initialize the DNNClassifier by simply storing all the hyperparameters."""
self.n_hidden_layers = n_hidden_layers
self.n_neurons = n_neurons
self.optimizer_class = optimizer_class
self.learning_rate = learning_rate
self.batch_size = batch_size
self.activation = activation
self.initializer = initializer
self.batch_norm_momentum = batch_norm_momentum
self.dropout_rate = dropout_rate
self.random_state = random_state
self._session = None
def _dnn(self, inputs):
"""Build the hidden layers, with support for batch normalization and dropout."""
for layer in range(self.n_hidden_layers):
if self.dropout_rate:
inputs = tf.layers.dropout(inputs, self.dropout_rate, training=self._training)
inputs = tf.layers.dense(inputs, self.n_neurons,
kernel_initializer=self.initializer,
name="hidden%d" % (layer + 1))
if self.batch_norm_momentum:
inputs = tf.layers.batch_normalization(inputs, momentum=self.batch_norm_momentum,
training=self._training)
inputs = self.activation(inputs, name="hidden%d_out" % (layer + 1))
return inputs
def _build_graph(self, n_inputs, n_outputs):
"""Build the same model as earlier"""
if self.random_state is not None:
tf.set_random_seed(self.random_state)
np.random.seed(self.random_state)
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
if self.batch_norm_momentum or self.dropout_rate:
self._training = tf.placeholder_with_default(False, shape=(), name='training')
else:
self._training = None
dnn_outputs = self._dnn(X)
logits = tf.layers.dense(dnn_outputs, n_outputs, kernel_initializer=he_init, name="logits")
Y_proba = tf.nn.softmax(logits, name="Y_proba")
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,
logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
optimizer = self.optimizer_class(learning_rate=self.learning_rate)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# Make the important operations available easily through instance variables
self._X, self._y = X, y
self._Y_proba, self._loss = Y_proba, loss
self._training_op, self._accuracy = training_op, accuracy
self._init, self._saver = init, saver
def close_session(self):
if self._session:
self._session.close()
def _get_model_params(self):
"""Get all variable values (used for early stopping, faster than saving to disk)"""
with self._graph.as_default():
gvars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
return {gvar.op.name: value for gvar, value in zip(gvars, self._session.run(gvars))}
def _restore_model_params(self, model_params):
"""Set all variables to the given values (for early stopping, faster than loading from disk)"""
gvar_names = list(model_params.keys())
assign_ops = {gvar_name: self._graph.get_operation_by_name(gvar_name + "/Assign")
for gvar_name in gvar_names}
init_values = {gvar_name: assign_op.inputs[1] for gvar_name, assign_op in assign_ops.items()}
feed_dict = {init_values[gvar_name]: model_params[gvar_name] for gvar_name in gvar_names}
self._session.run(assign_ops, feed_dict=feed_dict)
def fit(self, X, y, n_epochs=100, X_valid=None, y_valid=None):
"""Fit the model to the training set. If X_valid and y_valid are provided, use early stopping."""
self.close_session()
# infer n_inputs and n_outputs from the training set.
n_inputs = X.shape[1]
self.classes_ = np.unique(y)
n_outputs = len(self.classes_)
# Translate the labels vector to a vector of sorted class indices, containing
# integers from 0 to n_outputs - 1.
# For example, if y is equal to [8, 8, 9, 5, 7, 6, 6, 6], then the sorted class
# labels (self.classes_) will be equal to [5, 6, 7, 8, 9], and the labels vector
# will be translated to [3, 3, 4, 0, 2, 1, 1, 1]
self.class_to_index_ = {label: index
for index, label in enumerate(self.classes_)}
y = np.array([self.class_to_index_[label]
for label in y], dtype=np.int32)
self._graph = tf.Graph()
with self._graph.as_default():
self._build_graph(n_inputs, n_outputs)
# extra ops for batch normalization
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# needed in case of early stopping
max_checks_without_progress = 20
checks_without_progress = 0
best_loss = np.infty
best_params = None
# Now train the model!
self._session = tf.Session(graph=self._graph)
with self._session.as_default() as sess:
self._init.run()
for epoch in range(n_epochs):
rnd_idx = np.random.permutation(len(X))
for rnd_indices in np.array_split(rnd_idx, len(X) // self.batch_size):
X_batch, y_batch = X[rnd_indices], y[rnd_indices]
feed_dict = {self._X: X_batch, self._y: y_batch}
if self._training is not None:
feed_dict[self._training] = True
sess.run(self._training_op, feed_dict=feed_dict)
if extra_update_ops:
sess.run(extra_update_ops, feed_dict=feed_dict)
if X_valid is not None and y_valid is not None:
loss_val, acc_val = sess.run([self._loss, self._accuracy],
feed_dict={self._X: X_valid,
self._y: y_valid})
if loss_val < best_loss:
best_params = self._get_model_params()
best_loss = loss_val
checks_without_progress = 0
else:
checks_without_progress += 1
print("{}\tValidation loss: {:.6f}\tBest loss: {:.6f}\tAccuracy: {:.2f}%".format(
epoch, loss_val, best_loss, acc_val * 100))
if checks_without_progress > max_checks_without_progress:
print("Early stopping!")
break
else:
loss_train, acc_train = sess.run([self._loss, self._accuracy],
feed_dict={self._X: X_batch,
self._y: y_batch})
print("{}\tLast training batch loss: {:.6f}\tAccuracy: {:.2f}%".format(
epoch, loss_train, acc_train * 100))
# If we used early stopping then rollback to the best model found
if best_params:
self._restore_model_params(best_params)
return self
def predict_proba(self, X):
if not self._session:
raise NotFittedError("This %s instance is not fitted yet" % self.__class__.__name__)
with self._session.as_default() as sess:
return self._Y_proba.eval(feed_dict={self._X: X})
def predict(self, X):
class_indices = np.argmax(self.predict_proba(X), axis=1)
return np.array([[self.classes_[class_index]]
for class_index in class_indices], np.int32)
def save(self, path):
self._saver.save(self._session, path)
# In[22]:
tf.reset_default_graph()
dnn_clf = DNNClassifier(random_state=42)
dnn_clf.fit(X_train1, y_train1, n_epochs=1000, X_valid=X_valid1, y_valid=y_valid1)
# The model is trained, let's see if it gets the same accuracy as earlier:
# In[23]:
from sklearn.metrics import accuracy_score
y_pred = dnn_clf.predict(X_test1)
accuracy_score(y_test1, y_pred)
# Yep! Working fine. Now we can use Scikit-Learn's RandomizedSearchCV class to search for better hyperparameters (this may take over an hour, depending on your system):
# In[24]:
from sklearn.model_selection import RandomizedSearchCV
def leaky_relu(alpha=0.01):
def parametrized_leaky_relu(z, name=None):
return tf.maximum(alpha * z, z, name=name)
return parametrized_leaky_relu
param_distribs = {
"n_neurons": [10, 30, 50, 70, 90, 100, 120, 140, 160],
"batch_size": [16, 64, 128, 512],
"learning_rate": [0.01, 0.02, 0.05, 0.1],
"activation": [tf.nn.relu, tf.nn.elu, leaky_relu(alpha=0.01), leaky_relu(alpha=0.1)],
# you could also try exploring different numbers of hidden layers, different optimizers, etc.
#"n_hidden_layers": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
#"optimizer_class": [tf.train.AdamOptimizer, partial(tf.train.MomentumOptimizer, momentum=0.95)],
}
rnd_search = RandomizedSearchCV(DNNClassifier(random_state=42), param_distribs, n_iter=50,
cv=3, random_state=42, verbose=2)
rnd_search.fit(X_train1, y_train1, X_valid=X_valid1, y_valid=y_valid1, n_epochs=1000)
# In[25]:
rnd_search.best_params_
# In[26]:
y_pred = rnd_search.predict(X_test1)
accuracy_score(y_test1, y_pred)
# In[27]:
# Let's save this model
rnd_search.best_estimator_.save("./my_best_mnist_model_0_to_4")
# **Exercise:** Now try adding Batch Normalization and compare the learning curves: is it converging faster than before? Does it produce a better model?
# In[28]:
dnn_clf = DNNClassifier(activation=leaky_relu(alpha=0.1), batch_size=500, learning_rate=0.01,
n_neurons=140, random_state=42)
dnn_clf.fit(X_train1, y_train1, n_epochs=1000, X_valid=X_valid1, y_valid=y_valid1)
# In[29]:
y_pred = dnn_clf.predict(X_test1)
accuracy_score(y_test1, y_pred)
# In[30]:
dnn_clf_bn = DNNClassifier(activation=leaky_relu(alpha=0.1), batch_size=500, learning_rate=0.01,
n_neurons=90, random_state=42,
batch_norm_momentum=0.95)
dnn_clf_bn.fit(X_train1, y_train1, n_epochs=1000, X_valid=X_valid1, y_valid=y_valid1)
# In[31]:
y_pred = dnn_clf_bn.predict(X_test1)
accuracy_score(y_test1, y_pred)
# Wow awesome! Batch Normalization improved accuracy! To tweak hyperparameters with batch normalization, you can try RandomizedSearchCV as:
# In[34]:
from sklearn.model_selection import RandomizedSearchCV
param_distribs = {
"n_neurons": [10, 30, 50, 70, 90, 100, 120, 140, 160],
"batch_size": [16, 64, 128, 512],
"learning_rate": [0.01, 0.02, 0.05, 0.1],
"activation": [tf.nn.relu, tf.nn.elu, leaky_relu(alpha=0.01), leaky_relu(alpha=0.1)],
# you could also try exploring different numbers of hidden layers, different optimizers, etc.
#"n_hidden_layers": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
#"optimizer_class": [tf.train.AdamOptimizer, partial(tf.train.MomentumOptimizer, momentum=0.95)],
"batch_norm_momentum": [0.9, 0.95, 0.98, 0.99, 0.999],
}
rnd_search_bn = RandomizedSearchCV(DNNClassifier(random_state=42), param_distribs, n_iter=50, cv=3,
random_state=42, verbose=2)
rnd_search_bn.fit(X_train1, y_train1, X_valid=X_valid1, y_valid=y_valid1, n_epochs=1000)
# In[35]:
rnd_search_bn.best_params_
# In[36]:
y_pred = rnd_search_bn.predict(X_test1)
accuracy_score(y_test1, y_pred)
# **Wow 99.39% accuracy! Awesome!**
# In[40]:
# Let's save this model
rnd_search_bn.best_estimator_.save("./best_mnist_model_0_to_4")
# # Transfer Learning
# **Exercise:** create a new DNN that reuses all the pretrained hidden layers of the previous model, freezes them, and replaces the softmax output layer with a new one.<br>
# Let's load the model graph:
# In[44]:
tf.reset_default_graph()
restore_saver = tf.train.import_meta_graph("./best_mnist_model_0_to_4.meta")
X = tf.get_default_graph().get_tensor_by_name("X:0")
y = tf.get_default_graph().get_tensor_by_name("y:0")
loss = tf.get_default_graph().get_tensor_by_name("loss:0")
Y_proba = tf.get_default_graph().get_tensor_by_name("Y_proba:0")
logits = Y_proba.op.inputs[0]
accuracy = tf.get_default_graph().get_tensor_by_name("accuracy:0")
# In[46]:
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
# In[47]:
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="logits")
# To freeze the lower layers, we will exclude their variables from the optimizer's list of trainable variables, keeping only the output layer's trainable variables:
# In[48]:
learning_rate = 0.01
output_layer_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="logits")
optimizer = tf.train.AdamOptimizer(learning_rate, name="Adam2")
training_op = optimizer.minimize(loss, var_list=output_layer_vars)
# In[49]:
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
init = tf.global_variables_initializer()
five_frozen_saver = tf.train.Saver()
# **Exercise:** train this new DNN on digits 5 to 9, using only 100 images per digit, and time how long it takes. Despite this small number of examples, can you achieve high precision.<br>
# Let's create the training, validation and test sets. We need to subtract 5 from the labels because TensorFlow expects integers from 0 to n_classes-1.
# In[50]:
X_train2_full = X_train[y_train >= 5]
y_train2_full = y_train[y_train >= 5] - 5
X_valid2_full = X_valid[y_valid >= 5]
y_valid2_full = y_valid[y_valid >= 5] - 5
X_test2 = X_test[y_test >= 5]
y_test2 = y_test[y_test >= 5] - 5
# In[51]:
def sample_n_instances_per_class(X, y, n=100):
Xs, ys = [], []
for label in np.unique(y):
idx = (y == label)
Xc = X[idx][:n]
yc = y[idx][:n]
Xs.append(Xc)
ys.append(yc)
return np.concatenate(Xs), np.concatenate(ys)
X_train2, y_train2 = sample_n_instances_per_class(X_train2_full, y_train2_full, n=100)
X_valid2, y_valid2 = sample_n_instances_per_class(X_valid2_full, y_valid2_full, n=30)
# Now let's train the model:
# In[55]:
import time
n_epochs = 1000
batch_size = 16
max_checks_without_progress = 20
checks_without_progress = 0
best_loss = np.infty
with tf.Session() as sess:
init.run()
restore_saver.restore(sess, "./best_mnist_model_0_to_4")
t0 = time.time()
for epoch in range(n_epochs):
rnd_idx = np.random.permutation(len(X_train2))
for rnd_indices in np.array_split(rnd_idx, len(X_train2) // batch_size):
X_batch, y_batch = X_train2[rnd_indices], y_train2[rnd_indices]
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
loss_val, acc_val = sess.run([loss, accuracy], feed_dict={X: X_valid2, y: y_valid2})
if loss_val < best_loss:
save_path = five_frozen_saver.save(sess, "./my_mnist_model_5_to_9_five_frozen")
best_loss = loss_val
checks_without_progress = 0
else:
checks_without_progress += 1
if checks_without_progress > max_checks_without_progress:
print("Early stopping!")
break
print("{}\tValidation loss: {:.6f}\tBest loss: {:.6f}\tAccuracy: {:.2f}%".format(epoch, loss_val, best_loss, acc_val * 100))
t1 = time.time()
print("Total training time: {:.1f}s".format(t1 - t0))
with tf.Session() as sess:
five_frozen_saver.restore(sess, "./my_mnist_model_5_to_9_five_frozen")
acc_test = accuracy.eval(feed_dict={X: X_test2, y: y_test2})
print("Final test accuracy: {:.2f}%".format(acc_test * 100))
# Well that's not a great accuracy, is it? Of course with such a tiny training set, and with only one layer to tweak, we should not expect miracles.
# **Exercise:** try caching the frozen layers, and train the model again: how much faster is it now?
# Let's start by getting a handle on the output of the last frozen layer:
# In[56]:
hidden5_out = tf.get_default_graph().get_tensor_by_name("hidden5_out:0")
# Now let's train the model using roughly the same code as earlier. The difference is that we compute the output of the top frozen layer at the beginning (both for the training set and the validation set), and we cache it. This makes training faster:
# In[61]:
import time
n_epochs = 1000
batch_size = 16
max_checks_without_progress = 20
checks_without_progress = 0
best_loss = np.infty
with tf.Session() as sess:
init.run()
restore_saver.restore(sess, "./best_mnist_model_0_to_4")
t0 = time.time()
hidden5_train = hidden5_out.eval(feed_dict={X: X_train2, y: y_train2})
hidden5_valid = hidden5_out.eval(feed_dict={X: X_valid2, y: y_valid2})
for epoch in range(n_epochs):
rnd_idx = np.random.permutation(len(X_train2))
for rnd_indices in np.array_split(rnd_idx, len(X_train2) // batch_size):
h5_batch, y_batch = hidden5_train[rnd_indices], y_train2[rnd_indices]
sess.run(training_op, feed_dict={hidden5_out: h5_batch, y: y_batch})
loss_val, acc_val = sess.run([loss, accuracy], feed_dict={hidden5_out: hidden5_valid, y: y_valid2})
if loss_val < best_loss:
save_path = five_frozen_saver.save(sess, "./my_mnist_model_5_to_9_five_frozen")
best_loss = loss_val
checks_without_progress = 0
else:
checks_without_progress += 1
if checks_without_progress > max_checks_without_progress:
print("Early stopping!")
break
print("{}\tValidation loss: {:.6f}\tBest loss: {:.6f}\tAccuracy: {:.2f}%".format(
epoch, loss_val, best_loss, acc_val * 100))
t1 = time.time()
print("Total training time: {:.1f}s".format(t1 - t0))
with tf.Session() as sess:
five_frozen_saver.restore(sess, "./my_mnist_model_5_to_9_five_frozen")
acc_test = accuracy.eval(feed_dict={X: X_test2, y: y_test2})
print("Final test accuracy: {:.2f}%".format(acc_test * 100))
# **Exercise:** try again reusing just four hidden layers instead of five. Can you achieve a higher precision?<br>
# Let's load the best model again, but this time we will create a new softmax output layer on top of the 4th hidden layer:
#
# In[62]:
tf.reset_default_graph()
n_outputs = 5
restore_saver = tf.train.import_meta_graph("./best_mnist_model_0_to_4.meta")
X = tf.get_default_graph().get_tensor_by_name("X:0")
y = tf.get_default_graph().get_tensor_by_name("y:0")
hidden4_out = tf.get_default_graph().get_tensor_by_name("hidden4_out:0")
logits = tf.layers.dense(hidden4_out, n_outputs, kernel_initializer=he_init, name="new_logits")
Y_proba = tf.nn.softmax(logits)
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
# And now let's create the training operation. We want to freeze all the layers except for the new output layer:
# In[63]:
learning_rate = 0.01
output_layer_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="new_logits")
optimizer = tf.train.AdamOptimizer(learning_rate, name="Adam2")
training_op = optimizer.minimize(loss, var_list=output_layer_vars)
init = tf.global_variables_initializer()
four_frozen_saver = tf.train.Saver()
# In[65]:
n_epochs = 1000
batch_size = 16
max_checks_without_progress = 20
checks_without_progress = 0
best_loss = np.infty
with tf.Session() as sess:
init.run()
restore_saver.restore(sess, "./best_mnist_model_0_to_4")
for epoch in range(n_epochs):
rnd_idx = np.random.permutation(len(X_train2))
for rnd_indices in np.array_split(rnd_idx, len(X_train2) // batch_size):
X_batch, y_batch = X_train2[rnd_indices], y_train2[rnd_indices]
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
loss_val, acc_val = sess.run([loss, accuracy], feed_dict={X: X_valid2, y: y_valid2})
if loss_val < best_loss:
save_path = four_frozen_saver.save(sess, "./my_mnist_model_5_to_9_four_frozen")
best_loss = loss_val
checks_without_progress = 0
else:
checks_without_progress += 1
if checks_without_progress > max_checks_without_progress:
print("Early stopping!")
break
print("{}\tValidation loss: {:.6f}\tBest loss: {:.6f}\tAccuracy: {:.2f}%".format(
epoch, loss_val, best_loss, acc_val * 100))
with tf.Session() as sess:
four_frozen_saver.restore(sess, "./my_mnist_model_5_to_9_four_frozen")
acc_test = accuracy.eval(feed_dict={X: X_test2, y: y_test2})
print("Final test accuracy: {:.2f}%".format(acc_test * 100))
# **Still not good!**
# **Exercise:** now unfreeze the top two hidden layers and continue training: can you get the model to perform even better?
# In[66]:
learning_rate = 0.01
unfrozen_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="hidden[34]|new_logits")
optimizer = tf.train.AdamOptimizer(learning_rate, name="Adam3")
training_op = optimizer.minimize(loss, var_list=unfrozen_vars)
init = tf.global_variables_initializer()
two_frozen_saver = tf.train.Saver()
# In[67]:
n_epochs = 1000
batch_size = 16
max_checks_without_progress = 20
checks_without_progress = 0
best_loss = np.infty
with tf.Session() as sess:
init.run()
four_frozen_saver.restore(sess, "./my_mnist_model_5_to_9_four_frozen")
for epoch in range(n_epochs):
rnd_idx = np.random.permutation(len(X_train2))
for rnd_indices in np.array_split(rnd_idx, len(X_train2) // batch_size):
X_batch, y_batch = X_train2[rnd_indices], y_train2[rnd_indices]
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
loss_val, acc_val = sess.run([loss, accuracy], feed_dict={X: X_valid2, y: y_valid2})
if loss_val < best_loss:
save_path = two_frozen_saver.save(sess, "./my_mnist_model_5_to_9_two_frozen")
best_loss = loss_val
checks_without_progress = 0
else:
checks_without_progress += 1
if checks_without_progress > max_checks_without_progress:
print("Early stopping!")
break
print("{}\tValidation loss: {:.6f}\tBest loss: {:.6f}\tAccuracy: {:.2f}%".format(
epoch, loss_val, best_loss, acc_val * 100))
with tf.Session() as sess:
two_frozen_saver.restore(sess, "./my_mnist_model_5_to_9_two_frozen")
acc_test = accuracy.eval(feed_dict={X: X_test2, y: y_test2})
print("Final test accuracy: {:.2f}%".format(acc_test * 100))
# Let's check what accuracy we can get by unfreezing all layers:
# In[68]:
learning_rate = 0.01
optimizer = tf.train.AdamOptimizer(learning_rate, name="Adam4")
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
no_frozen_saver = tf.train.Saver()
# In[69]:
n_epochs = 1000
batch_size = 20
max_checks_without_progress = 20
checks_without_progress = 0
best_loss = np.infty
with tf.Session() as sess:
init.run()
two_frozen_saver.restore(sess, "./my_mnist_model_5_to_9_two_frozen")
for epoch in range(n_epochs):
rnd_idx = np.random.permutation(len(X_train2))
for rnd_indices in np.array_split(rnd_idx, len(X_train2) // batch_size):
X_batch, y_batch = X_train2[rnd_indices], y_train2[rnd_indices]
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
loss_val, acc_val = sess.run([loss, accuracy], feed_dict={X: X_valid2, y: y_valid2})
if loss_val < best_loss:
save_path = no_frozen_saver.save(sess, "./my_mnist_model_5_to_9_no_frozen")
best_loss = loss_val
checks_without_progress = 0
else:
checks_without_progress += 1
if checks_without_progress > max_checks_without_progress:
print("Early stopping!")
break
print("{}\tValidation loss: {:.6f}\tBest loss: {:.6f}\tAccuracy: {:.2f}%".format(
epoch, loss_val, best_loss, acc_val * 100))
with tf.Session() as sess:
no_frozen_saver.restore(sess, "./my_mnist_model_5_to_9_no_frozen")
acc_test = accuracy.eval(feed_dict={X: X_test2, y: y_test2})
print("Final test accuracy: {:.2f}%".format(acc_test * 100))
#
#
# Let's compare that to a DNN trained from scratch:
#
# In[70]:
dnn_clf_5_to_9 = DNNClassifier(n_hidden_layers=4, random_state=42)
dnn_clf_5_to_9.fit(X_train2, y_train2, n_epochs=1000, X_valid=X_valid2, y_valid=y_valid2)
# In[71]:
y_pred = dnn_clf_5_to_9.predict(X_test2)
accuracy_score(y_test2, y_pred)
# Here due to less training data transfer learning is failing, we need to do osme tweaks to it to make it more accurate.
| [
"numpy.random.seed",
"tensorflow.clip_by_value",
"tensorflow.get_collection",
"tensorflow.reset_default_graph",
"sklearn.metrics.accuracy_score",
"tensorflow.maximum",
"sklearn.exceptions.NotFittedError",
"tensorflow.assign",
"tensorflow.Variable",
"tensorflow.get_default_graph",
"tensorflow.lay... | [((317, 341), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (339, 341), True, 'import tensorflow as tf\n'), ((346, 406), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, n_inputs)', 'name': '"""X"""'}), "(tf.float32, shape=(None, n_inputs), name='X')\n", (360, 406), True, 'import tensorflow as tf\n'), ((531, 599), 'tensorflow.layers.dense', 'tf.layers.dense', (['X', 'n_hidden1'], {'activation': 'tf.nn.relu', 'name': '"""hidden1"""'}), "(X, n_hidden1, activation=tf.nn.relu, name='hidden1')\n", (546, 599), True, 'import tensorflow as tf\n'), ((720, 744), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (742, 744), True, 'import tensorflow as tf\n'), ((750, 810), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, n_inputs)', 'name': '"""X"""'}), "(tf.float32, shape=(None, n_inputs), name='X')\n", (764, 810), True, 'import tensorflow as tf\n'), ((821, 869), 'tensorflow.contrib.layers.variance_scaling_initializer', 'tf.contrib.layers.variance_scaling_initializer', ([], {}), '()\n', (867, 869), True, 'import tensorflow as tf\n'), ((880, 981), 'tensorflow.layers.dense', 'tf.layers.dense', (['X', 'n_hidden1'], {'activation': 'tf.nn.relu', 'kernel_initializer': 'he_init', 'name': '"""hidden1"""'}), "(X, n_hidden1, activation=tf.nn.relu, kernel_initializer=\n he_init, name='hidden1')\n", (895, 981), True, 'import tensorflow as tf\n'), ((1184, 1208), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1206, 1208), True, 'import tensorflow as tf\n'), ((1214, 1274), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, n_inputs)', 'name': '"""X"""'}), "(tf.float32, shape=(None, n_inputs), name='X')\n", (1228, 1274), True, 'import tensorflow as tf\n'), ((1285, 1352), 'tensorflow.layers.dense', 'tf.layers.dense', (['X', 'n_hidden1'], {'activation': 'tf.nn.elu', 'name': '"""hidden1"""'}), "(X, n_hidden1, activation=tf.nn.elu, name='hidden1')\n", (1300, 1352), True, 'import tensorflow as tf\n'), ((1459, 1483), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1481, 1483), True, 'import tensorflow as tf\n'), ((1489, 1549), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, n_inputs)', 'name': '"""X"""'}), "(tf.float32, shape=(None, n_inputs), name='X')\n", (1503, 1549), True, 'import tensorflow as tf\n'), ((1637, 1705), 'tensorflow.layers.dense', 'tf.layers.dense', (['X', 'n_hidden1'], {'activation': 'leaky_relu', 'name': '"""hidden1"""'}), "(X, n_hidden1, activation=leaky_relu, name='hidden1')\n", (1652, 1705), True, 'import tensorflow as tf\n'), ((2342, 2366), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (2364, 2366), True, 'import tensorflow as tf\n'), ((2439, 2499), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, n_inputs)', 'name': '"""X"""'}), "(tf.float32, shape=(None, n_inputs), name='X')\n", (2453, 2499), True, 'import tensorflow as tf\n'), ((2512, 2573), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(False)'], {'shape': '()', 'name': '"""training"""'}), "(False, shape=(), name='training')\n", (2539, 2573), True, 'import tensorflow as tf\n'), ((2585, 2630), 'tensorflow.layers.dense', 'tf.layers.dense', (['X', 'n_hidden1'], {'name': '"""hidden1"""'}), "(X, n_hidden1, name='hidden1')\n", (2600, 2630), True, 'import tensorflow as tf\n'), ((2637, 2708), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['hidden1'], {'training': 'training', 'momentum': '(0.9)'}), '(hidden1, training=training, momentum=0.9)\n', (2666, 2708), True, 'import tensorflow as tf\n'), ((2719, 2733), 'tensorflow.nn.elu', 'tf.nn.elu', (['bn1'], {}), '(bn1)\n', (2728, 2733), True, 'import tensorflow as tf\n'), ((2744, 2795), 'tensorflow.layers.dense', 'tf.layers.dense', (['bn1_act', 'n_hidden2'], {'name': '"""hidden2"""'}), "(bn1_act, n_hidden2, name='hidden2')\n", (2759, 2795), True, 'import tensorflow as tf\n'), ((2802, 2873), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['hidden2'], {'training': 'training', 'momentum': '(0.9)'}), '(hidden2, training=training, momentum=0.9)\n', (2831, 2873), True, 'import tensorflow as tf\n'), ((2884, 2898), 'tensorflow.nn.elu', 'tf.nn.elu', (['bn2'], {}), '(bn2)\n', (2893, 2898), True, 'import tensorflow as tf\n'), ((2918, 2969), 'tensorflow.layers.dense', 'tf.layers.dense', (['bn2_act', 'n_outputs'], {'name': '"""outputs"""'}), "(bn2_act, n_outputs, name='outputs')\n", (2933, 2969), True, 'import tensorflow as tf\n'), ((2979, 3064), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['logits_before_bn'], {'training': 'training', 'momentum': '(0.9)'}), '(logits_before_bn, training=training, momentum=0.9\n )\n', (3008, 3064), True, 'import tensorflow as tf\n'), ((3466, 3490), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (3488, 3490), True, 'import tensorflow as tf\n'), ((3594, 3654), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, n_inputs)', 'name': '"""X"""'}), "(tf.float32, shape=(None, n_inputs), name='X')\n", (3608, 3654), True, 'import tensorflow as tf\n'), ((3667, 3728), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(False)'], {'shape': '()', 'name': '"""training"""'}), "(False, shape=(), name='training')\n", (3694, 3728), True, 'import tensorflow as tf\n'), ((3752, 3823), 'functools.partial', 'partial', (['tf.layers.batch_normalization'], {'training': 'training', 'momentum': '(0.9)'}), '(tf.layers.batch_normalization, training=training, momentum=0.9)\n', (3759, 3823), False, 'from functools import partial\n'), ((3835, 3880), 'tensorflow.layers.dense', 'tf.layers.dense', (['X', 'n_hidden1'], {'name': '"""hidden1"""'}), "(X, n_hidden1, name='hidden1')\n", (3850, 3880), True, 'import tensorflow as tf\n'), ((3926, 3940), 'tensorflow.nn.elu', 'tf.nn.elu', (['bn1'], {}), '(bn1)\n', (3935, 3940), True, 'import tensorflow as tf\n'), ((3951, 4002), 'tensorflow.layers.dense', 'tf.layers.dense', (['bn1_act', 'n_hidden2'], {'name': '"""hidden2"""'}), "(bn1_act, n_hidden2, name='hidden2')\n", (3966, 4002), True, 'import tensorflow as tf\n'), ((4048, 4062), 'tensorflow.nn.elu', 'tf.nn.elu', (['bn2'], {}), '(bn2)\n', (4057, 4062), True, 'import tensorflow as tf\n'), ((4082, 4133), 'tensorflow.layers.dense', 'tf.layers.dense', (['bn2_act', 'n_outputs'], {'name': '"""outputs"""'}), "(bn2_act, n_outputs, name='outputs')\n", (4097, 4133), True, 'import tensorflow as tf\n'), ((4312, 4336), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (4334, 4336), True, 'import tensorflow as tf\n'), ((4488, 4548), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, n_inputs)', 'name': '"""X"""'}), "(tf.float32, shape=(None, n_inputs), name='X')\n", (4502, 4548), True, 'import tensorflow as tf\n'), ((4553, 4599), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': 'None', 'name': '"""y"""'}), "(tf.int32, shape=None, name='y')\n", (4567, 4599), True, 'import tensorflow as tf\n'), ((4613, 4674), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(False)'], {'shape': '()', 'name': '"""training"""'}), "(False, shape=(), name='training')\n", (4640, 4674), True, 'import tensorflow as tf\n'), ((5759, 5792), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5790, 5792), True, 'import tensorflow as tf\n'), ((5801, 5817), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5815, 5817), True, 'import tensorflow as tf\n'), ((5934, 5969), 'tensorflow.keras.datasets.mnist.load_data', 'tf.keras.datasets.mnist.load_data', ([], {}), '()\n', (5967, 5969), True, 'import tensorflow as tf\n'), ((6571, 6613), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (6588, 6613), True, 'import tensorflow as tf\n'), ((7695, 7719), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (7717, 7719), True, 'import tensorflow as tf\n'), ((7838, 7898), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, n_inputs)', 'name': '"""X"""'}), "(tf.float32, shape=(None, n_inputs), name='X')\n", (7852, 7898), True, 'import tensorflow as tf\n'), ((7903, 7949), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': 'None', 'name': '"""y"""'}), "(tf.int32, shape=None, name='y')\n", (7917, 7949), True, 'import tensorflow as tf\n'), ((9296, 9329), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (9327, 9329), True, 'import tensorflow as tf\n'), ((9338, 9354), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (9352, 9354), True, 'import tensorflow as tf\n'), ((10224, 10248), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (10246, 10248), True, 'import tensorflow as tf\n'), ((10258, 10314), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['"""./my_model_final.ckpt.meta"""'], {}), "('./my_model_final.ckpt.meta')\n", (10284, 10314), True, 'import tensorflow as tf\n'), ((11581, 11618), 'tensorflow.get_collection', 'tf.get_collection', (['"""my_important_ops"""'], {}), "('my_important_ops')\n", (11598, 11618), True, 'import tensorflow as tf\n'), ((12789, 12813), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (12811, 12813), True, 'import tensorflow as tf\n'), ((12855, 12911), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['"""./my_model_final.ckpt.meta"""'], {}), "('./my_model_final.ckpt.meta')\n", (12881, 12911), True, 'import tensorflow as tf\n'), ((13109, 13187), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden3', 'n_hidden4'], {'activation': 'tf.nn.relu', 'name': '"""new_hidden4"""'}), "(hidden3, n_hidden4, activation=tf.nn.relu, name='new_hidden4')\n", (13124, 13187), True, 'import tensorflow as tf\n'), ((13201, 13260), 'tensorflow.layers.dense', 'tf.layers.dense', (['new_hidden4', 'n_outputs'], {'name': '"""new_outputs"""'}), "(new_hidden4, n_outputs, name='new_outputs')\n", (13216, 13260), True, 'import tensorflow as tf\n'), ((13745, 13778), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (13776, 13778), True, 'import tensorflow as tf\n'), ((13791, 13807), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (13805, 13807), True, 'import tensorflow as tf\n'), ((14476, 14500), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (14498, 14500), True, 'import tensorflow as tf\n'), ((14645, 14705), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, n_inputs)', 'name': '"""X"""'}), "(tf.float32, shape=(None, n_inputs), name='X')\n", (14659, 14705), True, 'import tensorflow as tf\n'), ((14710, 14756), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': 'None', 'name': '"""y"""'}), "(tf.int32, shape=None, name='y')\n", (14724, 14756), True, 'import tensorflow as tf\n'), ((15932, 16001), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': '"""hidden[123]"""'}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope='hidden[123]')\n", (15949, 16001), True, 'import tensorflow as tf\n'), ((16018, 16044), 'tensorflow.train.Saver', 'tf.train.Saver', (['reuse_vars'], {}), '(reuse_vars)\n', (16032, 16044), True, 'import tensorflow as tf\n'), ((16077, 16110), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (16108, 16110), True, 'import tensorflow as tf\n'), ((16156, 16172), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (16170, 16172), True, 'import tensorflow as tf\n'), ((16928, 16952), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (16950, 16952), True, 'import tensorflow as tf\n'), ((17055, 17115), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, n_inputs)', 'name': '"""X"""'}), "(tf.float32, shape=(None, n_inputs), name='X')\n", (17069, 17115), True, 'import tensorflow as tf\n'), ((17126, 17194), 'tensorflow.layers.dense', 'tf.layers.dense', (['X', 'n_hidden1'], {'activation': 'tf.nn.relu', 'name': '"""hidden1"""'}), "(X, n_hidden1, activation=tf.nn.relu, name='hidden1')\n", (17141, 17194), True, 'import tensorflow as tf\n'), ((17306, 17328), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (17326, 17328), True, 'import tensorflow as tf\n'), ((17543, 17576), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (17574, 17576), True, 'import tensorflow as tf\n'), ((18634, 18658), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (18656, 18658), True, 'import tensorflow as tf\n'), ((18760, 18820), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, n_inputs)', 'name': '"""X"""'}), "(tf.float32, shape=(None, n_inputs), name='X')\n", (18774, 18820), True, 'import tensorflow as tf\n'), ((18825, 18871), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': 'None', 'name': '"""y"""'}), "(tf.int32, shape=None, name='y')\n", (18839, 18871), True, 'import tensorflow as tf\n'), ((19951, 20020), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': '"""hidden[123]"""'}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope='hidden[123]')\n", (19968, 20020), True, 'import tensorflow as tf\n'), ((20089, 20115), 'tensorflow.train.Saver', 'tf.train.Saver', (['reuse_vars'], {}), '(reuse_vars)\n', (20103, 20115), True, 'import tensorflow as tf\n'), ((20148, 20181), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (20179, 20181), True, 'import tensorflow as tf\n'), ((20190, 20206), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (20204, 20206), True, 'import tensorflow as tf\n'), ((20799, 20823), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (20821, 20823), True, 'import tensorflow as tf\n'), ((20925, 20985), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, n_inputs)', 'name': '"""X"""'}), "(tf.float32, shape=(None, n_inputs), name='X')\n", (20939, 20985), True, 'import tensorflow as tf\n'), ((20990, 21036), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': 'None', 'name': '"""y"""'}), "(tf.int32, shape=None, name='y')\n", (21004, 21036), True, 'import tensorflow as tf\n'), ((22000, 22069), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': '"""hidden[123]"""'}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope='hidden[123]')\n", (22017, 22069), True, 'import tensorflow as tf\n'), ((22087, 22113), 'tensorflow.train.Saver', 'tf.train.Saver', (['reuse_vars'], {}), '(reuse_vars)\n', (22101, 22113), True, 'import tensorflow as tf\n'), ((22146, 22179), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (22177, 22179), True, 'import tensorflow as tf\n'), ((22188, 22204), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (22202, 22204), True, 'import tensorflow as tf\n'), ((22988, 23012), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (23010, 23012), True, 'import tensorflow as tf\n'), ((23168, 23228), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, n_inputs)', 'name': '"""X"""'}), "(tf.float32, shape=(None, n_inputs), name='X')\n", (23182, 23228), True, 'import tensorflow as tf\n'), ((23233, 23279), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': 'None', 'name': '"""y"""'}), "(tf.int32, shape=None, name='y')\n", (23247, 23279), True, 'import tensorflow as tf\n'), ((24317, 24386), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': '"""hidden[123]"""'}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope='hidden[123]')\n", (24334, 24386), True, 'import tensorflow as tf\n'), ((24424, 24450), 'tensorflow.train.Saver', 'tf.train.Saver', (['reuse_vars'], {}), '(reuse_vars)\n', (24438, 24450), True, 'import tensorflow as tf\n'), ((24483, 24516), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (24514, 24516), True, 'import tensorflow as tf\n'), ((24525, 24541), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (24539, 24541), True, 'import tensorflow as tf\n'), ((25864, 25888), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (25886, 25888), True, 'import tensorflow as tf\n'), ((25960, 26020), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, n_inputs)', 'name': '"""X"""'}), "(tf.float32, shape=(None, n_inputs), name='X')\n", (25974, 26020), True, 'import tensorflow as tf\n'), ((26025, 26071), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': 'None', 'name': '"""y"""'}), "(tf.int32, shape=None, name='y')\n", (26039, 26071), True, 'import tensorflow as tf\n'), ((27343, 27376), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (27374, 27376), True, 'import tensorflow as tf\n'), ((27385, 27401), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (27399, 27401), True, 'import tensorflow as tf\n'), ((28201, 28225), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (28223, 28225), True, 'import tensorflow as tf\n'), ((28284, 28344), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, n_inputs)', 'name': '"""X"""'}), "(tf.float32, shape=(None, n_inputs), name='X')\n", (28298, 28344), True, 'import tensorflow as tf\n'), ((28349, 28395), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': 'None', 'name': '"""y"""'}), "(tf.int32, shape=None, name='y')\n", (28363, 28395), True, 'import tensorflow as tf\n'), ((29408, 29441), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (29439, 29441), True, 'import tensorflow as tf\n'), ((29450, 29466), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (29464, 29466), True, 'import tensorflow as tf\n'), ((30259, 30283), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (30281, 30283), True, 'import tensorflow as tf\n'), ((30364, 30424), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, n_inputs)', 'name': '"""X"""'}), "(tf.float32, shape=(None, n_inputs), name='X')\n", (30378, 30424), True, 'import tensorflow as tf\n'), ((30429, 30475), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': 'None', 'name': '"""y"""'}), "(tf.int32, shape=None, name='y')\n", (30443, 30475), True, 'import tensorflow as tf\n'), ((31851, 31884), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (31882, 31884), True, 'import tensorflow as tf\n'), ((31893, 31909), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (31907, 31909), True, 'import tensorflow as tf\n'), ((32823, 32847), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (32845, 32847), True, 'import tensorflow as tf\n'), ((32928, 32988), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, n_inputs)', 'name': '"""X"""'}), "(tf.float32, shape=(None, n_inputs), name='X')\n", (32942, 32988), True, 'import tensorflow as tf\n'), ((32993, 33039), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': 'None', 'name': '"""y"""'}), "(tf.int32, shape=None, name='y')\n", (33007, 33039), True, 'import tensorflow as tf\n'), ((33068, 33129), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(False)'], {'shape': '()', 'name': '"""training"""'}), "(False, shape=(), name='training')\n", (33095, 33129), True, 'import tensorflow as tf\n'), ((33160, 33213), 'tensorflow.layers.dropout', 'tf.layers.dropout', (['X', 'dropout_rate'], {'training': 'training'}), '(X, dropout_rate, training=training)\n', (33177, 33213), True, 'import tensorflow as tf\n'), ((34119, 34152), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (34150, 34152), True, 'import tensorflow as tf\n'), ((34161, 34177), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (34175, 34177), True, 'import tensorflow as tf\n'), ((35689, 35713), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (35711, 35713), True, 'import tensorflow as tf\n'), ((35822, 35882), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, n_inputs)', 'name': '"""X"""'}), "(tf.float32, shape=(None, n_inputs), name='X')\n", (35836, 35882), True, 'import tensorflow as tf\n'), ((35887, 35933), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': 'None', 'name': '"""y"""'}), "(tf.int32, shape=None, name='y')\n", (35901, 35933), True, 'import tensorflow as tf\n'), ((37242, 37295), 'tensorflow.clip_by_norm', 'tf.clip_by_norm', (['weights'], {'clip_norm': 'threshold', 'axes': '(1)'}), '(weights, clip_norm=threshold, axes=1)\n', (37257, 37295), True, 'import tensorflow as tf\n'), ((37311, 37346), 'tensorflow.assign', 'tf.assign', (['weights', 'clipped_weights'], {}), '(weights, clipped_weights)\n', (37320, 37346), True, 'import tensorflow as tf\n'), ((37454, 37507), 'tensorflow.clip_by_norm', 'tf.clip_by_norm', (['weights'], {'clip_norm': 'threshold', 'axes': '(1)'}), '(weights, clip_norm=threshold, axes=1)\n', (37469, 37507), True, 'import tensorflow as tf\n'), ((37524, 37559), 'tensorflow.assign', 'tf.assign', (['weights', 'clipped_weights'], {}), '(weights, clipped_weights)\n', (37533, 37559), True, 'import tensorflow as tf\n'), ((37582, 37615), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (37613, 37615), True, 'import tensorflow as tf\n'), ((37624, 37640), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (37638, 37640), True, 'import tensorflow as tf\n'), ((38549, 38573), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (38571, 38573), True, 'import tensorflow as tf\n'), ((38682, 38742), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, n_inputs)', 'name': '"""X"""'}), "(tf.float32, shape=(None, n_inputs), name='X')\n", (38696, 38742), True, 'import tensorflow as tf\n'), ((38747, 38793), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': 'None', 'name': '"""y"""'}), "(tf.int32, shape=None, name='y')\n", (38761, 38793), True, 'import tensorflow as tf\n'), ((40167, 40200), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (40198, 40200), True, 'import tensorflow as tf\n'), ((40209, 40225), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (40223, 40225), True, 'import tensorflow as tf\n'), ((40305, 40334), 'tensorflow.get_collection', 'tf.get_collection', (['"""max_norm"""'], {}), "('max_norm')\n", (40322, 40334), True, 'import tensorflow as tf\n'), ((40933, 40957), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (40955, 40957), True, 'import tensorflow as tf\n'), ((41077, 41137), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, n_inputs)', 'name': '"""X"""'}), "(tf.float32, shape=(None, n_inputs), name='X')\n", (41091, 41137), True, 'import tensorflow as tf\n'), ((41142, 41188), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': 'None', 'name': '"""y"""'}), "(tf.int32, shape=None, name='y')\n", (41156, 41188), True, 'import tensorflow as tf\n'), ((41200, 41233), 'tensorflow.variance_scaling_initializer', 'tf.variance_scaling_initializer', ([], {}), '()\n', (41231, 41233), True, 'import tensorflow as tf\n'), ((41846, 41925), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden5', 'n_outputs'], {'kernel_initializer': 'he_init', 'name': '"""outputs"""'}), "(hidden5, n_outputs, kernel_initializer=he_init, name='outputs')\n", (41861, 41925), True, 'import tensorflow as tf\n'), ((41936, 41973), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {'name': '"""Y_proba"""'}), "(logits, name='Y_proba')\n", (41949, 41973), True, 'import tensorflow as tf\n'), ((42380, 42451), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'y', 'logits': 'logits'}), '(labels=y, logits=logits)\n', (42426, 42451), True, 'import tensorflow as tf\n'), ((42459, 42496), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['xentropy'], {'name': '"""loss"""'}), "(xentropy, name='loss')\n", (42473, 42496), True, 'import tensorflow as tf\n'), ((42510, 42547), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (42532, 42547), True, 'import tensorflow as tf\n'), ((42618, 42646), 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['logits', 'y', '(1)'], {}), '(logits, y, 1)\n', (42632, 42646), True, 'import tensorflow as tf\n'), ((42728, 42761), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (42759, 42761), True, 'import tensorflow as tf\n'), ((42770, 42786), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (42784, 42786), True, 'import tensorflow as tf\n'), ((54916, 54940), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (54938, 54940), True, 'import tensorflow as tf\n'), ((55234, 55265), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test1', 'y_pred'], {}), '(y_test1, y_pred)\n', (55248, 55265), False, 'from sklearn.metrics import accuracy_score\n'), ((56515, 56546), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test1', 'y_pred'], {}), '(y_test1, y_pred)\n', (56529, 56546), False, 'from sklearn.metrics import accuracy_score\n'), ((57098, 57129), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test1', 'y_pred'], {}), '(y_test1, y_pred)\n', (57112, 57129), False, 'from sklearn.metrics import accuracy_score\n'), ((57489, 57520), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test1', 'y_pred'], {}), '(y_test1, y_pred)\n', (57503, 57520), False, 'from sklearn.metrics import accuracy_score\n'), ((58653, 58684), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test1', 'y_pred'], {}), '(y_test1, y_pred)\n', (58667, 58684), False, 'from sklearn.metrics import accuracy_score\n'), ((59064, 59088), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (59086, 59088), True, 'import tensorflow as tf\n'), ((59106, 59166), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['"""./best_mnist_model_0_to_4.meta"""'], {}), "('./best_mnist_model_0_to_4.meta')\n", (59132, 59166), True, 'import tensorflow as tf\n'), ((59509, 59560), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {}), '(tf.GraphKeys.TRAINABLE_VARIABLES)\n', (59526, 59560), True, 'import tensorflow as tf\n'), ((59575, 59642), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""logits"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='logits')\n", (59592, 59642), True, 'import tensorflow as tf\n'), ((59865, 59932), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""logits"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='logits')\n", (59882, 59932), True, 'import tensorflow as tf\n'), ((59945, 59996), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {'name': '"""Adam2"""'}), "(learning_rate, name='Adam2')\n", (59967, 59996), True, 'import tensorflow as tf\n'), ((60088, 60116), 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['logits', 'y', '(1)'], {}), '(logits, y, 1)\n', (60102, 60116), True, 'import tensorflow as tf\n'), ((60198, 60231), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (60229, 60231), True, 'import tensorflow as tf\n'), ((60252, 60268), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (60266, 60268), True, 'import tensorflow as tf\n'), ((65493, 65517), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (65515, 65517), True, 'import tensorflow as tf\n'), ((65550, 65610), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['"""./best_mnist_model_0_to_4.meta"""'], {}), "('./best_mnist_model_0_to_4.meta')\n", (65576, 65610), True, 'import tensorflow as tf\n'), ((65801, 65892), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden4_out', 'n_outputs'], {'kernel_initializer': 'he_init', 'name': '"""new_logits"""'}), "(hidden4_out, n_outputs, kernel_initializer=he_init, name=\n 'new_logits')\n", (65816, 65892), True, 'import tensorflow as tf\n'), ((65898, 65919), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (65911, 65919), True, 'import tensorflow as tf\n'), ((65931, 66002), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'y', 'logits': 'logits'}), '(labels=y, logits=logits)\n', (65977, 66002), True, 'import tensorflow as tf\n'), ((66010, 66034), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['xentropy'], {}), '(xentropy)\n', (66024, 66034), True, 'import tensorflow as tf\n'), ((66045, 66073), 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['logits', 'y', '(1)'], {}), '(logits, y, 1)\n', (66059, 66073), True, 'import tensorflow as tf\n'), ((66317, 66388), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""new_logits"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='new_logits')\n", (66334, 66388), True, 'import tensorflow as tf\n'), ((66401, 66452), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {'name': '"""Adam2"""'}), "(learning_rate, name='Adam2')\n", (66423, 66452), True, 'import tensorflow as tf\n'), ((66528, 66561), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (66559, 66561), True, 'import tensorflow as tf\n'), ((66582, 66598), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (66596, 66598), True, 'import tensorflow as tf\n'), ((68215, 68302), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""hidden[34]|new_logits"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope=\n 'hidden[34]|new_logits')\n", (68232, 68302), True, 'import tensorflow as tf\n'), ((68310, 68361), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {'name': '"""Adam3"""'}), "(learning_rate, name='Adam3')\n", (68332, 68361), True, 'import tensorflow as tf\n'), ((68433, 68466), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (68464, 68466), True, 'import tensorflow as tf\n'), ((68486, 68502), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (68500, 68502), True, 'import tensorflow as tf\n'), ((70043, 70094), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {'name': '"""Adam4"""'}), "(learning_rate, name='Adam4')\n", (70065, 70094), True, 'import tensorflow as tf\n'), ((70142, 70175), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (70173, 70175), True, 'import tensorflow as tf\n'), ((70194, 70210), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (70208, 70210), True, 'import tensorflow as tf\n'), ((71919, 71950), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test2', 'y_pred'], {}), '(y_test2, y_pred)\n', (71933, 71950), False, 'from sklearn.metrics import accuracy_score\n'), ((1591, 1625), 'tensorflow.maximum', 'tf.maximum', (['(0.01 * z)', 'z'], {'name': 'name'}), '(0.01 * z, z, name=name)\n', (1601, 1625), True, 'import tensorflow as tf\n'), ((4681, 4701), 'tensorflow.name_scope', 'tf.name_scope', (['"""dnn"""'], {}), "('dnn')\n", (4694, 4701), True, 'import tensorflow as tf\n'), ((4717, 4765), 'tensorflow.contrib.layers.variance_scaling_initializer', 'tf.contrib.layers.variance_scaling_initializer', ([], {}), '()\n', (4763, 4765), True, 'import tensorflow as tf\n'), ((4792, 4884), 'functools.partial', 'partial', (['tf.layers.batch_normalization'], {'training': 'training', 'momentum': 'batch_norm_momentum'}), '(tf.layers.batch_normalization, training=training, momentum=\n batch_norm_momentum)\n', (4799, 4884), False, 'from functools import partial\n'), ((4901, 4953), 'functools.partial', 'partial', (['tf.layers.dense'], {'kernel_initializer': 'he_init'}), '(tf.layers.dense, kernel_initializer=he_init)\n', (4908, 4953), False, 'from functools import partial\n'), ((5310, 5331), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (5323, 5331), True, 'import tensorflow as tf\n'), ((5348, 5419), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'y', 'logits': 'logits'}), '(labels=y, logits=logits)\n', (5394, 5419), True, 'import tensorflow as tf\n'), ((5431, 5468), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['xentropy'], {'name': '"""loss"""'}), "(xentropy, name='loss')\n", (5445, 5468), True, 'import tensorflow as tf\n'), ((5479, 5501), 'tensorflow.name_scope', 'tf.name_scope', (['"""train"""'], {}), "('train')\n", (5492, 5501), True, 'import tensorflow as tf\n'), ((5519, 5567), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (5552, 5567), True, 'import tensorflow as tf\n'), ((5621, 5642), 'tensorflow.name_scope', 'tf.name_scope', (['"""eval"""'], {}), "('eval')\n", (5634, 5642), True, 'import tensorflow as tf\n'), ((5658, 5686), 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['logits', 'y', '(1)'], {}), '(logits, y, 1)\n', (5672, 5686), True, 'import tensorflow as tf\n'), ((6417, 6451), 'numpy.array_split', 'np.array_split', (['rnd_idx', 'n_batches'], {}), '(rnd_idx, n_batches)\n', (6431, 6451), True, 'import numpy as np\n'), ((6620, 6632), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (6630, 6632), True, 'import tensorflow as tf\n'), ((7957, 7977), 'tensorflow.name_scope', 'tf.name_scope', (['"""dnn"""'], {}), "('dnn')\n", (7970, 7977), True, 'import tensorflow as tf\n'), ((7993, 8061), 'tensorflow.layers.dense', 'tf.layers.dense', (['X', 'n_hidden1'], {'activation': 'tf.nn.relu', 'name': '"""hidden1"""'}), "(X, n_hidden1, activation=tf.nn.relu, name='hidden1')\n", (8008, 8061), True, 'import tensorflow as tf\n'), ((8076, 8150), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden1', 'n_hidden2'], {'activation': 'tf.nn.relu', 'name': '"""hidden2"""'}), "(hidden1, n_hidden2, activation=tf.nn.relu, name='hidden2')\n", (8091, 8150), True, 'import tensorflow as tf\n'), ((8165, 8239), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden2', 'n_hidden3'], {'activation': 'tf.nn.relu', 'name': '"""hidden3"""'}), "(hidden2, n_hidden3, activation=tf.nn.relu, name='hidden3')\n", (8180, 8239), True, 'import tensorflow as tf\n'), ((8254, 8328), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden3', 'n_hidden4'], {'activation': 'tf.nn.relu', 'name': '"""hidden4"""'}), "(hidden3, n_hidden4, activation=tf.nn.relu, name='hidden4')\n", (8269, 8328), True, 'import tensorflow as tf\n'), ((8343, 8417), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden4', 'n_hidden5'], {'activation': 'tf.nn.relu', 'name': '"""hidden5"""'}), "(hidden4, n_hidden5, activation=tf.nn.relu, name='hidden5')\n", (8358, 8417), True, 'import tensorflow as tf\n'), ((8431, 8482), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden5', 'n_outputs'], {'name': '"""outputs"""'}), "(hidden5, n_outputs, name='outputs')\n", (8446, 8482), True, 'import tensorflow as tf\n'), ((8493, 8514), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (8506, 8514), True, 'import tensorflow as tf\n'), ((8531, 8602), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'y', 'logits': 'logits'}), '(labels=y, logits=logits)\n', (8577, 8602), True, 'import tensorflow as tf\n'), ((8614, 8651), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['xentropy'], {'name': '"""loss"""'}), "(xentropy, name='loss')\n", (8628, 8651), True, 'import tensorflow as tf\n'), ((8709, 8731), 'tensorflow.name_scope', 'tf.name_scope', (['"""train"""'], {}), "('train')\n", (8722, 8731), True, 'import tensorflow as tf\n'), ((8749, 8811), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (8782, 8811), True, 'import tensorflow as tf\n'), ((9132, 9153), 'tensorflow.name_scope', 'tf.name_scope', (['"""eval"""'], {}), "('eval')\n", (9145, 9153), True, 'import tensorflow as tf\n'), ((9169, 9197), 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['logits', 'y', '(1)'], {}), '(logits, y, 1)\n', (9183, 9197), True, 'import tensorflow as tf\n'), ((9418, 9430), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (9428, 9430), True, 'import tensorflow as tf\n'), ((11417, 11461), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""my_important_ops"""', 'op'], {}), "('my_important_ops', op)\n", (11437, 11461), True, 'import tensorflow as tf\n'), ((11740, 11752), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (11750, 11752), True, 'import tensorflow as tf\n'), ((13267, 13292), 'tensorflow.name_scope', 'tf.name_scope', (['"""new_loss"""'], {}), "('new_loss')\n", (13280, 13292), True, 'import tensorflow as tf\n'), ((13309, 13384), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'y', 'logits': 'new_logits'}), '(labels=y, logits=new_logits)\n', (13355, 13384), True, 'import tensorflow as tf\n'), ((13396, 13433), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['xentropy'], {'name': '"""loss"""'}), "(xentropy, name='loss')\n", (13410, 13433), True, 'import tensorflow as tf\n'), ((13444, 13469), 'tensorflow.name_scope', 'tf.name_scope', (['"""new_eval"""'], {}), "('new_eval')\n", (13457, 13469), True, 'import tensorflow as tf\n'), ((13485, 13517), 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['new_logits', 'y', '(1)'], {}), '(new_logits, y, 1)\n', (13499, 13517), True, 'import tensorflow as tf\n'), ((13601, 13627), 'tensorflow.name_scope', 'tf.name_scope', (['"""new_train"""'], {}), "('new_train')\n", (13614, 13627), True, 'import tensorflow as tf\n'), ((13645, 13693), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (13678, 13693), True, 'import tensorflow as tf\n'), ((13858, 13870), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (13868, 13870), True, 'import tensorflow as tf\n'), ((14765, 14785), 'tensorflow.name_scope', 'tf.name_scope', (['"""dnn"""'], {}), "('dnn')\n", (14778, 14785), True, 'import tensorflow as tf\n'), ((14801, 14869), 'tensorflow.layers.dense', 'tf.layers.dense', (['X', 'n_hidden1'], {'activation': 'tf.nn.relu', 'name': '"""hidden1"""'}), "(X, n_hidden1, activation=tf.nn.relu, name='hidden1')\n", (14816, 14869), True, 'import tensorflow as tf\n'), ((14884, 14958), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden1', 'n_hidden2'], {'activation': 'tf.nn.relu', 'name': '"""hidden2"""'}), "(hidden1, n_hidden2, activation=tf.nn.relu, name='hidden2')\n", (14899, 14958), True, 'import tensorflow as tf\n'), ((14973, 15047), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden2', 'n_hidden3'], {'activation': 'tf.nn.relu', 'name': '"""hidden3"""'}), "(hidden2, n_hidden3, activation=tf.nn.relu, name='hidden3')\n", (14988, 15047), True, 'import tensorflow as tf\n'), ((15062, 15136), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden3', 'n_hidden4'], {'activation': 'tf.nn.relu', 'name': '"""hidden4"""'}), "(hidden3, n_hidden4, activation=tf.nn.relu, name='hidden4')\n", (15077, 15136), True, 'import tensorflow as tf\n'), ((15150, 15201), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden4', 'n_outputs'], {'name': '"""outputs"""'}), "(hidden4, n_outputs, name='outputs')\n", (15165, 15201), True, 'import tensorflow as tf\n'), ((15212, 15233), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (15225, 15233), True, 'import tensorflow as tf\n'), ((15250, 15321), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'y', 'logits': 'logits'}), '(labels=y, logits=logits)\n', (15296, 15321), True, 'import tensorflow as tf\n'), ((15333, 15370), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['xentropy'], {'name': '"""loss"""'}), "(xentropy, name='loss')\n", (15347, 15370), True, 'import tensorflow as tf\n'), ((15381, 15402), 'tensorflow.name_scope', 'tf.name_scope', (['"""eval"""'], {}), "('eval')\n", (15394, 15402), True, 'import tensorflow as tf\n'), ((15418, 15446), 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['logits', 'y', '(1)'], {}), '(logits, y, 1)\n', (15432, 15446), True, 'import tensorflow as tf\n'), ((15534, 15556), 'tensorflow.name_scope', 'tf.name_scope', (['"""train"""'], {}), "('train')\n", (15547, 15556), True, 'import tensorflow as tf\n'), ((15574, 15622), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (15607, 15622), True, 'import tensorflow as tf\n'), ((16179, 16191), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (16189, 16191), True, 'import tensorflow as tf\n'), ((17583, 17595), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (17593, 17595), True, 'import tensorflow as tf\n'), ((18878, 18898), 'tensorflow.name_scope', 'tf.name_scope', (['"""dnn"""'], {}), "('dnn')\n", (18891, 18898), True, 'import tensorflow as tf\n'), ((18914, 18982), 'tensorflow.layers.dense', 'tf.layers.dense', (['X', 'n_hidden1'], {'activation': 'tf.nn.relu', 'name': '"""hidden1"""'}), "(X, n_hidden1, activation=tf.nn.relu, name='hidden1')\n", (18929, 18982), True, 'import tensorflow as tf\n'), ((18997, 19071), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden1', 'n_hidden2'], {'activation': 'tf.nn.relu', 'name': '"""hidden2"""'}), "(hidden1, n_hidden2, activation=tf.nn.relu, name='hidden2')\n", (19012, 19071), True, 'import tensorflow as tf\n'), ((19086, 19160), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden2', 'n_hidden3'], {'activation': 'tf.nn.relu', 'name': '"""hidden3"""'}), "(hidden2, n_hidden3, activation=tf.nn.relu, name='hidden3')\n", (19101, 19160), True, 'import tensorflow as tf\n'), ((19175, 19249), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden3', 'n_hidden4'], {'activation': 'tf.nn.relu', 'name': '"""hidden4"""'}), "(hidden3, n_hidden4, activation=tf.nn.relu, name='hidden4')\n", (19190, 19249), True, 'import tensorflow as tf\n'), ((19263, 19314), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden4', 'n_outputs'], {'name': '"""outputs"""'}), "(hidden4, n_outputs, name='outputs')\n", (19278, 19314), True, 'import tensorflow as tf\n'), ((19325, 19346), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (19338, 19346), True, 'import tensorflow as tf\n'), ((19363, 19434), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'y', 'logits': 'logits'}), '(labels=y, logits=logits)\n', (19409, 19434), True, 'import tensorflow as tf\n'), ((19446, 19483), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['xentropy'], {'name': '"""loss"""'}), "(xentropy, name='loss')\n", (19460, 19483), True, 'import tensorflow as tf\n'), ((19490, 19511), 'tensorflow.name_scope', 'tf.name_scope', (['"""eval"""'], {}), "('eval')\n", (19503, 19511), True, 'import tensorflow as tf\n'), ((19527, 19555), 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['logits', 'y', '(1)'], {}), '(logits, y, 1)\n', (19541, 19555), True, 'import tensorflow as tf\n'), ((19639, 19661), 'tensorflow.name_scope', 'tf.name_scope', (['"""train"""'], {}), "('train')\n", (19652, 19661), True, 'import tensorflow as tf\n'), ((19719, 19767), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (19752, 19767), True, 'import tensorflow as tf\n'), ((19789, 19868), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""hidden[34]|outputs"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='hidden[34]|outputs')\n", (19806, 19868), True, 'import tensorflow as tf\n'), ((20213, 20225), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (20223, 20225), True, 'import tensorflow as tf\n'), ((21043, 21063), 'tensorflow.name_scope', 'tf.name_scope', (['"""dnn"""'], {}), "('dnn')\n", (21056, 21063), True, 'import tensorflow as tf\n'), ((21079, 21147), 'tensorflow.layers.dense', 'tf.layers.dense', (['X', 'n_hidden1'], {'activation': 'tf.nn.relu', 'name': '"""hidden1"""'}), "(X, n_hidden1, activation=tf.nn.relu, name='hidden1')\n", (21094, 21147), True, 'import tensorflow as tf\n'), ((21162, 21236), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden1', 'n_hidden2'], {'activation': 'tf.nn.relu', 'name': '"""hidden2"""'}), "(hidden1, n_hidden2, activation=tf.nn.relu, name='hidden2')\n", (21177, 21236), True, 'import tensorflow as tf\n'), ((21256, 21281), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['hidden2'], {}), '(hidden2)\n', (21272, 21281), True, 'import tensorflow as tf\n'), ((21296, 21375), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden2_stop', 'n_hidden3'], {'activation': 'tf.nn.relu', 'name': '"""hidden3"""'}), "(hidden2_stop, n_hidden3, activation=tf.nn.relu, name='hidden3')\n", (21311, 21375), True, 'import tensorflow as tf\n'), ((21390, 21464), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden3', 'n_hidden4'], {'activation': 'tf.nn.relu', 'name': '"""hidden4"""'}), "(hidden3, n_hidden4, activation=tf.nn.relu, name='hidden4')\n", (21405, 21464), True, 'import tensorflow as tf\n'), ((21478, 21529), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden4', 'n_outputs'], {'name': '"""outputs"""'}), "(hidden4, n_outputs, name='outputs')\n", (21493, 21529), True, 'import tensorflow as tf\n'), ((21540, 21561), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (21553, 21561), True, 'import tensorflow as tf\n'), ((21578, 21649), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'y', 'logits': 'logits'}), '(labels=y, logits=logits)\n', (21624, 21649), True, 'import tensorflow as tf\n'), ((21661, 21698), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['xentropy'], {'name': '"""loss"""'}), "(xentropy, name='loss')\n", (21675, 21698), True, 'import tensorflow as tf\n'), ((21705, 21726), 'tensorflow.name_scope', 'tf.name_scope', (['"""eval"""'], {}), "('eval')\n", (21718, 21726), True, 'import tensorflow as tf\n'), ((21742, 21770), 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['logits', 'y', '(1)'], {}), '(logits, y, 1)\n', (21756, 21770), True, 'import tensorflow as tf\n'), ((21854, 21876), 'tensorflow.name_scope', 'tf.name_scope', (['"""train"""'], {}), "('train')\n", (21867, 21876), True, 'import tensorflow as tf\n'), ((21894, 21942), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (21927, 21942), True, 'import tensorflow as tf\n'), ((22211, 22223), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (22221, 22223), True, 'import tensorflow as tf\n'), ((23288, 23308), 'tensorflow.name_scope', 'tf.name_scope', (['"""dnn"""'], {}), "('dnn')\n", (23301, 23308), True, 'import tensorflow as tf\n'), ((23324, 23392), 'tensorflow.layers.dense', 'tf.layers.dense', (['X', 'n_hidden1'], {'activation': 'tf.nn.relu', 'name': '"""hidden1"""'}), "(X, n_hidden1, activation=tf.nn.relu, name='hidden1')\n", (23339, 23392), True, 'import tensorflow as tf\n'), ((23423, 23497), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden1', 'n_hidden2'], {'activation': 'tf.nn.relu', 'name': '"""hidden2"""'}), "(hidden1, n_hidden2, activation=tf.nn.relu, name='hidden2')\n", (23438, 23497), True, 'import tensorflow as tf\n'), ((23542, 23567), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['hidden2'], {}), '(hidden2)\n', (23558, 23567), True, 'import tensorflow as tf\n'), ((23582, 23661), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden2_stop', 'n_hidden3'], {'activation': 'tf.nn.relu', 'name': '"""hidden3"""'}), "(hidden2_stop, n_hidden3, activation=tf.nn.relu, name='hidden3')\n", (23597, 23661), True, 'import tensorflow as tf\n'), ((23697, 23771), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden3', 'n_hidden4'], {'activation': 'tf.nn.relu', 'name': '"""hidden4"""'}), "(hidden3, n_hidden4, activation=tf.nn.relu, name='hidden4')\n", (23712, 23771), True, 'import tensorflow as tf\n'), ((23792, 23843), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden4', 'n_outputs'], {'name': '"""outputs"""'}), "(hidden4, n_outputs, name='outputs')\n", (23807, 23843), True, 'import tensorflow as tf\n'), ((23857, 23878), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (23870, 23878), True, 'import tensorflow as tf\n'), ((23895, 23966), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'y', 'logits': 'logits'}), '(labels=y, logits=logits)\n', (23941, 23966), True, 'import tensorflow as tf\n'), ((23978, 24015), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['xentropy'], {'name': '"""loss"""'}), "(xentropy, name='loss')\n", (23992, 24015), True, 'import tensorflow as tf\n'), ((24022, 24043), 'tensorflow.name_scope', 'tf.name_scope', (['"""eval"""'], {}), "('eval')\n", (24035, 24043), True, 'import tensorflow as tf\n'), ((24059, 24087), 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['logits', 'y', '(1)'], {}), '(logits, y, 1)\n', (24073, 24087), True, 'import tensorflow as tf\n'), ((24171, 24193), 'tensorflow.name_scope', 'tf.name_scope', (['"""train"""'], {}), "('train')\n", (24184, 24193), True, 'import tensorflow as tf\n'), ((24211, 24259), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (24244, 24259), True, 'import tensorflow as tf\n'), ((24588, 24600), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (24598, 24600), True, 'import tensorflow as tf\n'), ((26078, 26098), 'tensorflow.name_scope', 'tf.name_scope', (['"""dnn"""'], {}), "('dnn')\n", (26091, 26098), True, 'import tensorflow as tf\n'), ((26114, 26182), 'tensorflow.layers.dense', 'tf.layers.dense', (['X', 'n_hidden1'], {'activation': 'tf.nn.relu', 'name': '"""hidden1"""'}), "(X, n_hidden1, activation=tf.nn.relu, name='hidden1')\n", (26129, 26182), True, 'import tensorflow as tf\n'), ((26197, 26271), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden1', 'n_hidden2'], {'activation': 'tf.nn.relu', 'name': '"""hidden2"""'}), "(hidden1, n_hidden2, activation=tf.nn.relu, name='hidden2')\n", (26212, 26271), True, 'import tensorflow as tf\n'), ((26285, 26336), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden2', 'n_outputs'], {'name': '"""outputs"""'}), "(hidden2, n_outputs, name='outputs')\n", (26300, 26336), True, 'import tensorflow as tf\n'), ((26347, 26368), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (26360, 26368), True, 'import tensorflow as tf\n'), ((26385, 26456), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'y', 'logits': 'logits'}), '(labels=y, logits=logits)\n', (26431, 26456), True, 'import tensorflow as tf\n'), ((26468, 26505), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['xentropy'], {'name': '"""loss"""'}), "(xentropy, name='loss')\n", (26482, 26505), True, 'import tensorflow as tf\n'), ((26516, 26537), 'tensorflow.name_scope', 'tf.name_scope', (['"""eval"""'], {}), "('eval')\n", (26529, 26537), True, 'import tensorflow as tf\n'), ((26553, 26581), 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['logits', 'y', '(1)'], {}), '(logits, y, 1)\n', (26567, 26581), True, 'import tensorflow as tf\n'), ((26669, 26691), 'tensorflow.name_scope', 'tf.name_scope', (['"""train"""'], {}), "('train')\n", (26682, 26691), True, 'import tensorflow as tf\n'), ((26887, 26938), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)', 'name': '"""global_step"""'}), "(0, trainable=False, name='global_step')\n", (26898, 26938), True, 'import tensorflow as tf\n'), ((26959, 27050), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['initial_learning_rate', 'global_step', 'decay_steps', 'decay_rate'], {}), '(initial_learning_rate, global_step, decay_steps,\n decay_rate)\n', (26985, 27050), True, 'import tensorflow as tf\n'), ((27063, 27100), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (27085, 27100), True, 'import tensorflow as tf\n'), ((27452, 27464), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (27462, 27464), True, 'import tensorflow as tf\n'), ((28404, 28424), 'tensorflow.name_scope', 'tf.name_scope', (['"""dnn"""'], {}), "('dnn')\n", (28417, 28424), True, 'import tensorflow as tf\n'), ((28440, 28508), 'tensorflow.layers.dense', 'tf.layers.dense', (['X', 'n_hidden1'], {'activation': 'tf.nn.relu', 'name': '"""hidden1"""'}), "(X, n_hidden1, activation=tf.nn.relu, name='hidden1')\n", (28455, 28508), True, 'import tensorflow as tf\n'), ((28522, 28573), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden1', 'n_outputs'], {'name': '"""outputs"""'}), "(hidden1, n_outputs, name='outputs')\n", (28537, 28573), True, 'import tensorflow as tf\n'), ((28773, 28794), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (28786, 28794), True, 'import tensorflow as tf\n'), ((28811, 28882), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'y', 'logits': 'logits'}), '(labels=y, logits=logits)\n', (28857, 28882), True, 'import tensorflow as tf\n'), ((28899, 28944), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['xentropy'], {'name': '"""avg_xentropy"""'}), "(xentropy, name='avg_xentropy')\n", (28913, 28944), True, 'import tensorflow as tf\n'), ((29027, 29077), 'tensorflow.add', 'tf.add', (['base_loss', '(scale * reg_losses)'], {'name': '"""loss"""'}), "(base_loss, scale * reg_losses, name='loss')\n", (29033, 29077), True, 'import tensorflow as tf\n'), ((29097, 29118), 'tensorflow.name_scope', 'tf.name_scope', (['"""eval"""'], {}), "('eval')\n", (29110, 29118), True, 'import tensorflow as tf\n'), ((29134, 29162), 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['logits', 'y', '(1)'], {}), '(logits, y, 1)\n', (29148, 29162), True, 'import tensorflow as tf\n'), ((29268, 29290), 'tensorflow.name_scope', 'tf.name_scope', (['"""train"""'], {}), "('train')\n", (29281, 29290), True, 'import tensorflow as tf\n'), ((29308, 29356), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (29341, 29356), True, 'import tensorflow as tf\n'), ((29504, 29516), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (29514, 29516), True, 'import tensorflow as tf\n'), ((30971, 30991), 'tensorflow.name_scope', 'tf.name_scope', (['"""dnn"""'], {}), "('dnn')\n", (30984, 30991), True, 'import tensorflow as tf\n'), ((31218, 31239), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (31231, 31239), True, 'import tensorflow as tf\n'), ((31256, 31327), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'y', 'logits': 'logits'}), '(labels=y, logits=logits)\n', (31302, 31327), True, 'import tensorflow as tf\n'), ((31344, 31389), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['xentropy'], {'name': '"""avg_xentropy"""'}), "(xentropy, name='avg_xentropy')\n", (31358, 31389), True, 'import tensorflow as tf\n'), ((31407, 31460), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.REGULARIZATION_LOSSES'], {}), '(tf.GraphKeys.REGULARIZATION_LOSSES)\n', (31424, 31460), True, 'import tensorflow as tf\n'), ((31472, 31519), 'tensorflow.add_n', 'tf.add_n', (['([base_loss] + reg_losses)'], {'name': '"""loss"""'}), "([base_loss] + reg_losses, name='loss')\n", (31480, 31519), True, 'import tensorflow as tf\n'), ((31540, 31561), 'tensorflow.name_scope', 'tf.name_scope', (['"""eval"""'], {}), "('eval')\n", (31553, 31561), True, 'import tensorflow as tf\n'), ((31577, 31605), 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['logits', 'y', '(1)'], {}), '(logits, y, 1)\n', (31591, 31605), True, 'import tensorflow as tf\n'), ((31711, 31733), 'tensorflow.name_scope', 'tf.name_scope', (['"""train"""'], {}), "('train')\n", (31724, 31733), True, 'import tensorflow as tf\n'), ((31751, 31799), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (31784, 31799), True, 'import tensorflow as tf\n'), ((31947, 31959), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (31957, 31959), True, 'import tensorflow as tf\n'), ((33220, 33240), 'tensorflow.name_scope', 'tf.name_scope', (['"""dnn"""'], {}), "('dnn')\n", (33233, 33240), True, 'import tensorflow as tf\n'), ((33256, 33324), 'tensorflow.layers.dense', 'tf.layers.dense', (['X', 'n_hidden1'], {'activation': 'tf.nn.relu', 'name': '"""hidden1"""'}), "(X, n_hidden1, activation=tf.nn.relu, name='hidden1')\n", (33271, 33324), True, 'import tensorflow as tf\n'), ((33344, 33403), 'tensorflow.layers.dropout', 'tf.layers.dropout', (['hidden1', 'dropout_rate'], {'training': 'training'}), '(hidden1, dropout_rate, training=training)\n', (33361, 33403), True, 'import tensorflow as tf\n'), ((33418, 33497), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden1_drop', 'n_hidden2'], {'activation': 'tf.nn.relu', 'name': '"""hidden2"""'}), "(hidden1_drop, n_hidden2, activation=tf.nn.relu, name='hidden2')\n", (33433, 33497), True, 'import tensorflow as tf\n'), ((33517, 33576), 'tensorflow.layers.dropout', 'tf.layers.dropout', (['hidden2', 'dropout_rate'], {'training': 'training'}), '(hidden2, dropout_rate, training=training)\n', (33534, 33576), True, 'import tensorflow as tf\n'), ((33590, 33646), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden2_drop', 'n_outputs'], {'name': '"""outputs"""'}), "(hidden2_drop, n_outputs, name='outputs')\n", (33605, 33646), True, 'import tensorflow as tf\n'), ((33667, 33688), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (33680, 33688), True, 'import tensorflow as tf\n'), ((33705, 33776), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'y', 'logits': 'logits'}), '(labels=y, logits=logits)\n', (33751, 33776), True, 'import tensorflow as tf\n'), ((33788, 33825), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['xentropy'], {'name': '"""loss"""'}), "(xentropy, name='loss')\n", (33802, 33825), True, 'import tensorflow as tf\n'), ((33832, 33854), 'tensorflow.name_scope', 'tf.name_scope', (['"""train"""'], {}), "('train')\n", (33845, 33854), True, 'import tensorflow as tf\n'), ((33872, 33927), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['learning_rate'], {'momentum': '(0.9)'}), '(learning_rate, momentum=0.9)\n', (33898, 33927), True, 'import tensorflow as tf\n'), ((33981, 34002), 'tensorflow.name_scope', 'tf.name_scope', (['"""eval"""'], {}), "('eval')\n", (33994, 34002), True, 'import tensorflow as tf\n'), ((34018, 34046), 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['logits', 'y', '(1)'], {}), '(logits, y, 1)\n', (34032, 34046), True, 'import tensorflow as tf\n'), ((34330, 34342), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (34340, 34342), True, 'import tensorflow as tf\n'), ((35942, 35962), 'tensorflow.name_scope', 'tf.name_scope', (['"""dnn"""'], {}), "('dnn')\n", (35955, 35962), True, 'import tensorflow as tf\n'), ((35978, 36046), 'tensorflow.layers.dense', 'tf.layers.dense', (['X', 'n_hidden1'], {'activation': 'tf.nn.relu', 'name': '"""hidden1"""'}), "(X, n_hidden1, activation=tf.nn.relu, name='hidden1')\n", (35993, 36046), True, 'import tensorflow as tf\n'), ((36061, 36135), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden1', 'n_hidden2'], {'activation': 'tf.nn.relu', 'name': '"""hidden2"""'}), "(hidden1, n_hidden2, activation=tf.nn.relu, name='hidden2')\n", (36076, 36135), True, 'import tensorflow as tf\n'), ((36149, 36200), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden2', 'n_outputs'], {'name': '"""outputs"""'}), "(hidden2, n_outputs, name='outputs')\n", (36164, 36200), True, 'import tensorflow as tf\n'), ((36207, 36228), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (36220, 36228), True, 'import tensorflow as tf\n'), ((36245, 36316), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'y', 'logits': 'logits'}), '(labels=y, logits=logits)\n', (36291, 36316), True, 'import tensorflow as tf\n'), ((36328, 36365), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['xentropy'], {'name': '"""loss"""'}), "(xentropy, name='loss')\n", (36342, 36365), True, 'import tensorflow as tf\n'), ((36372, 36394), 'tensorflow.name_scope', 'tf.name_scope', (['"""train"""'], {}), "('train')\n", (36385, 36394), True, 'import tensorflow as tf\n'), ((36412, 36463), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['learning_rate', 'momentum'], {}), '(learning_rate, momentum)\n', (36438, 36463), True, 'import tensorflow as tf\n'), ((36517, 36538), 'tensorflow.name_scope', 'tf.name_scope', (['"""eval"""'], {}), "('eval')\n", (36530, 36538), True, 'import tensorflow as tf\n'), ((36554, 36582), 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['logits', 'y', '(1)'], {}), '(logits, y, 1)\n', (36568, 36582), True, 'import tensorflow as tf\n'), ((37692, 37704), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (37702, 37704), True, 'import tensorflow as tf\n'), ((39375, 39395), 'tensorflow.name_scope', 'tf.name_scope', (['"""dnn"""'], {}), "('dnn')\n", (39388, 39395), True, 'import tensorflow as tf\n'), ((39411, 39516), 'tensorflow.layers.dense', 'tf.layers.dense', (['X', 'n_inputs'], {'activation': 'tf.nn.relu', 'kernel_regularizer': 'max_norm_reg', 'name': '"""hidden1"""'}), "(X, n_inputs, activation=tf.nn.relu, kernel_regularizer=\n max_norm_reg, name='hidden1')\n", (39426, 39516), True, 'import tensorflow as tf\n'), ((39526, 39637), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden1', 'n_hidden2'], {'activation': 'tf.nn.relu', 'kernel_regularizer': 'max_norm_reg', 'name': '"""hidden2"""'}), "(hidden1, n_hidden2, activation=tf.nn.relu,\n kernel_regularizer=max_norm_reg, name='hidden2')\n", (39541, 39637), True, 'import tensorflow as tf\n'), ((39647, 39698), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden2', 'n_outputs'], {'name': '"""outputs"""'}), "(hidden2, n_outputs, name='outputs')\n", (39662, 39698), True, 'import tensorflow as tf\n'), ((39719, 39740), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (39732, 39740), True, 'import tensorflow as tf\n'), ((39757, 39828), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'y', 'logits': 'logits'}), '(labels=y, logits=logits)\n', (39803, 39828), True, 'import tensorflow as tf\n'), ((39840, 39877), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['xentropy'], {'name': '"""loss"""'}), "(xentropy, name='loss')\n", (39854, 39877), True, 'import tensorflow as tf\n'), ((39884, 39906), 'tensorflow.name_scope', 'tf.name_scope', (['"""train"""'], {}), "('train')\n", (39897, 39906), True, 'import tensorflow as tf\n'), ((39924, 39975), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['learning_rate', 'momentum'], {}), '(learning_rate, momentum)\n', (39950, 39975), True, 'import tensorflow as tf\n'), ((40029, 40050), 'tensorflow.name_scope', 'tf.name_scope', (['"""eval"""'], {}), "('eval')\n", (40042, 40050), True, 'import tensorflow as tf\n'), ((40066, 40094), 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['logits', 'y', '(1)'], {}), '(logits, y, 1)\n', (40080, 40094), True, 'import tensorflow as tf\n'), ((40341, 40353), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (40351, 40353), True, 'import tensorflow as tf\n'), ((41240, 41260), 'tensorflow.name_scope', 'tf.name_scope', (['"""dnn"""'], {}), "('dnn')\n", (41253, 41260), True, 'import tensorflow as tf\n'), ((41276, 41376), 'tensorflow.layers.dense', 'tf.layers.dense', (['X', 'n_hidden1'], {'activation': 'tf.nn.elu', 'kernel_initializer': 'he_init', 'name': '"""hidden1"""'}), "(X, n_hidden1, activation=tf.nn.elu, kernel_initializer=\n he_init, name='hidden1')\n", (41291, 41376), True, 'import tensorflow as tf\n'), ((41386, 41491), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden1', 'n_hidden2'], {'activation': 'tf.nn.elu', 'kernel_initializer': 'he_init', 'name': '"""hidden2"""'}), "(hidden1, n_hidden2, activation=tf.nn.elu,\n kernel_initializer=he_init, name='hidden2')\n", (41401, 41491), True, 'import tensorflow as tf\n'), ((41502, 41607), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden2', 'n_hidden3'], {'activation': 'tf.nn.elu', 'kernel_initializer': 'he_init', 'name': '"""hidden3"""'}), "(hidden2, n_hidden3, activation=tf.nn.elu,\n kernel_initializer=he_init, name='hidden3')\n", (41517, 41607), True, 'import tensorflow as tf\n'), ((41618, 41723), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden3', 'n_hidden4'], {'activation': 'tf.nn.elu', 'kernel_initializer': 'he_init', 'name': '"""hidden4"""'}), "(hidden3, n_hidden4, activation=tf.nn.elu,\n kernel_initializer=he_init, name='hidden4')\n", (41633, 41723), True, 'import tensorflow as tf\n'), ((41734, 41839), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden4', 'n_hidden5'], {'activation': 'tf.nn.elu', 'kernel_initializer': 'he_init', 'name': '"""hidden5"""'}), "(hidden4, n_hidden5, activation=tf.nn.elu,\n kernel_initializer=he_init, name='hidden5')\n", (41749, 41839), True, 'import tensorflow as tf\n'), ((42673, 42701), 'tensorflow.cast', 'tf.cast', (['correct', 'tf.float32'], {}), '(correct, tf.float32)\n', (42680, 42701), True, 'import tensorflow as tf\n'), ((43242, 43254), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (43252, 43254), True, 'import tensorflow as tf\n'), ((44200, 44212), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (44210, 44212), True, 'import tensorflow as tf\n'), ((60143, 60171), 'tensorflow.cast', 'tf.cast', (['correct', 'tf.float32'], {}), '(correct, tf.float32)\n', (60150, 60171), True, 'import tensorflow as tf\n'), ((60947, 60959), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (60956, 60959), True, 'import numpy as np\n'), ((61482, 61494), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (61492, 61494), True, 'import tensorflow as tf\n'), ((61589, 61600), 'time.time', 'time.time', ([], {}), '()\n', (61598, 61600), False, 'import time\n'), ((62548, 62559), 'time.time', 'time.time', ([], {}), '()\n', (62557, 62559), False, 'import time\n'), ((62628, 62640), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (62638, 62640), True, 'import tensorflow as tf\n'), ((63668, 63680), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (63678, 63680), True, 'import tensorflow as tf\n'), ((63775, 63786), 'time.time', 'time.time', ([], {}), '()\n', (63784, 63786), False, 'import time\n'), ((64934, 64945), 'time.time', 'time.time', ([], {}), '()\n', (64943, 64945), False, 'import time\n'), ((65010, 65022), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (65020, 65022), True, 'import tensorflow as tf\n'), ((66100, 66128), 'tensorflow.cast', 'tf.cast', (['correct', 'tf.float32'], {}), '(correct, tf.float32)\n', (66107, 66128), True, 'import tensorflow as tf\n'), ((66734, 66746), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (66744, 66746), True, 'import tensorflow as tf\n'), ((67788, 67800), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (67798, 67800), True, 'import tensorflow as tf\n'), ((68638, 68650), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (68648, 68650), True, 'import tensorflow as tf\n'), ((69704, 69716), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (69714, 69716), True, 'import tensorflow as tf\n'), ((70346, 70358), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (70356, 70358), True, 'import tensorflow as tf\n'), ((71408, 71420), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (71418, 71420), True, 'import tensorflow as tf\n'), ((5717, 5745), 'tensorflow.cast', 'tf.cast', (['correct', 'tf.float32'], {}), '(correct, tf.float32)\n', (5724, 5745), True, 'import tensorflow as tf\n'), ((9228, 9256), 'tensorflow.cast', 'tf.cast', (['correct', 'tf.float32'], {}), '(correct, tf.float32)\n', (9235, 9256), True, 'import tensorflow as tf\n'), ((10417, 10439), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (10437, 10439), True, 'import tensorflow as tf\n'), ((10647, 10669), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (10667, 10669), True, 'import tensorflow as tf\n'), ((10700, 10722), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (10720, 10722), True, 'import tensorflow as tf\n'), ((10760, 10782), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (10780, 10782), True, 'import tensorflow as tf\n'), ((10835, 10857), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (10855, 10857), True, 'import tensorflow as tf\n'), ((12917, 12939), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (12937, 12939), True, 'import tensorflow as tf\n'), ((12970, 12992), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (12990, 12992), True, 'import tensorflow as tf\n'), ((13030, 13052), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (13050, 13052), True, 'import tensorflow as tf\n'), ((13548, 13576), 'tensorflow.cast', 'tf.cast', (['correct', 'tf.float32'], {}), '(correct, tf.float32)\n', (13555, 13576), True, 'import tensorflow as tf\n'), ((15477, 15505), 'tensorflow.cast', 'tf.cast', (['correct', 'tf.float32'], {}), '(correct, tf.float32)\n', (15484, 15505), True, 'import tensorflow as tf\n'), ((19586, 19614), 'tensorflow.cast', 'tf.cast', (['correct', 'tf.float32'], {}), '(correct, tf.float32)\n', (19593, 19614), True, 'import tensorflow as tf\n'), ((21801, 21829), 'tensorflow.cast', 'tf.cast', (['correct', 'tf.float32'], {}), '(correct, tf.float32)\n', (21808, 21829), True, 'import tensorflow as tf\n'), ((24118, 24146), 'tensorflow.cast', 'tf.cast', (['correct', 'tf.float32'], {}), '(correct, tf.float32)\n', (24125, 24146), True, 'import tensorflow as tf\n'), ((24869, 24916), 'numpy.random.permutation', 'np.random.permutation', (['mnist.train.num_examples'], {}), '(mnist.train.num_examples)\n', (24890, 24916), True, 'import numpy as np\n'), ((24943, 24992), 'numpy.array_split', 'np.array_split', (['h2_cache[shuffled_idx]', 'n_batches'], {}), '(h2_cache[shuffled_idx], n_batches)\n', (24957, 24992), True, 'import numpy as np\n'), ((25013, 25072), 'numpy.array_split', 'np.array_split', (['mnist.train.labels[shuffled_idx]', 'n_batches'], {}), '(mnist.train.labels[shuffled_idx], n_batches)\n', (25027, 25072), True, 'import numpy as np\n'), ((26612, 26640), 'tensorflow.cast', 'tf.cast', (['correct', 'tf.float32'], {}), '(correct, tf.float32)\n', (26619, 26640), True, 'import tensorflow as tf\n'), ((28593, 28615), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (28613, 28615), True, 'import tensorflow as tf\n'), ((28660, 28682), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (28680, 28682), True, 'import tensorflow as tf\n'), ((29193, 29221), 'tensorflow.cast', 'tf.cast', (['correct', 'tf.float32'], {}), '(correct, tf.float32)\n', (29200, 29221), True, 'import tensorflow as tf\n'), ((30924, 30963), 'tensorflow.contrib.layers.l1_regularizer', 'tf.contrib.layers.l1_regularizer', (['scale'], {}), '(scale)\n', (30956, 30963), True, 'import tensorflow as tf\n'), ((31636, 31664), 'tensorflow.cast', 'tf.cast', (['correct', 'tf.float32'], {}), '(correct, tf.float32)\n', (31643, 31664), True, 'import tensorflow as tf\n'), ((34077, 34105), 'tensorflow.cast', 'tf.cast', (['correct', 'tf.float32'], {}), '(correct, tf.float32)\n', (34084, 34105), True, 'import tensorflow as tf\n'), ((36613, 36641), 'tensorflow.cast', 'tf.cast', (['correct', 'tf.float32'], {}), '(correct, tf.float32)\n', (36620, 36641), True, 'import tensorflow as tf\n'), ((37162, 37184), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (37182, 37184), True, 'import tensorflow as tf\n'), ((37373, 37395), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (37393, 37395), True, 'import tensorflow as tf\n'), ((38941, 38997), 'tensorflow.clip_by_norm', 'tf.clip_by_norm', (['weights'], {'clip_norm': 'threshold', 'axes': 'axes'}), '(weights, clip_norm=threshold, axes=axes)\n', (38956, 38997), True, 'import tensorflow as tf\n'), ((39021, 39059), 'tensorflow.assign', 'tf.assign', (['weights', 'clipped'], {'name': 'name'}), '(weights, clipped, name=name)\n', (39030, 39059), True, 'import tensorflow as tf\n'), ((39068, 39114), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['collection', 'clip_weights'], {}), '(collection, clip_weights)\n', (39088, 39114), True, 'import tensorflow as tf\n'), ((40125, 40153), 'tensorflow.cast', 'tf.cast', (['correct', 'tf.float32'], {}), '(correct, tf.float32)\n', (40132, 40153), True, 'import tensorflow as tf\n'), ((48274, 48334), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, n_inputs)', 'name': '"""X"""'}), "(tf.float32, shape=(None, n_inputs), name='X')\n", (48288, 48334), True, 'import tensorflow as tf\n'), ((48347, 48393), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': 'None', 'name': '"""y"""'}), "(tf.int32, shape=None, name='y')\n", (48361, 48393), True, 'import tensorflow as tf\n'), ((48648, 48735), 'tensorflow.layers.dense', 'tf.layers.dense', (['dnn_outputs', 'n_outputs'], {'kernel_initializer': 'he_init', 'name': '"""logits"""'}), "(dnn_outputs, n_outputs, kernel_initializer=he_init, name=\n 'logits')\n", (48663, 48735), True, 'import tensorflow as tf\n'), ((48749, 48786), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {'name': '"""Y_proba"""'}), "(logits, name='Y_proba')\n", (48762, 48786), True, 'import tensorflow as tf\n'), ((48807, 48878), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'y', 'logits': 'logits'}), '(labels=y, logits=logits)\n', (48853, 48878), True, 'import tensorflow as tf\n'), ((48960, 48997), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['xentropy'], {'name': '"""loss"""'}), "(xentropy, name='loss')\n", (48974, 48997), True, 'import tensorflow as tf\n'), ((49140, 49168), 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['logits', 'y', '(1)'], {}), '(logits, y, 1)\n', (49154, 49168), True, 'import tensorflow as tf\n'), ((49266, 49299), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (49297, 49299), True, 'import tensorflow as tf\n'), ((49316, 49332), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (49330, 49332), True, 'import tensorflow as tf\n'), ((50952, 50964), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (50961, 50964), True, 'import numpy as np\n'), ((51512, 51582), 'numpy.array', 'np.array', (['[self.class_to_index_[label] for label in y]'], {'dtype': 'np.int32'}), '([self.class_to_index_[label] for label in y], dtype=np.int32)\n', (51520, 51582), True, 'import numpy as np\n'), ((51636, 51646), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (51644, 51646), True, 'import tensorflow as tf\n'), ((52100, 52129), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self._graph'}), '(graph=self._graph)\n', (52110, 52129), True, 'import tensorflow as tf\n'), ((54720, 54807), 'numpy.array', 'np.array', (['[[self.classes_[class_index]] for class_index in class_indices]', 'np.int32'], {}), '([[self.classes_[class_index]] for class_index in class_indices],\n np.int32)\n', (54728, 54807), True, 'import numpy as np\n'), ((55596, 55631), 'tensorflow.maximum', 'tf.maximum', (['(alpha * z)', 'z'], {'name': 'name'}), '(alpha * z, z, name=name)\n', (55606, 55631), True, 'import tensorflow as tf\n'), ((59172, 59194), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (59192, 59194), True, 'import tensorflow as tf\n'), ((59225, 59247), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (59245, 59247), True, 'import tensorflow as tf\n'), ((59281, 59303), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (59301, 59303), True, 'import tensorflow as tf\n'), ((59343, 59365), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (59363, 59365), True, 'import tensorflow as tf\n'), ((59439, 59461), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (59459, 59461), True, 'import tensorflow as tf\n'), ((61091, 61109), 'numpy.concatenate', 'np.concatenate', (['Xs'], {}), '(Xs)\n', (61105, 61109), True, 'import numpy as np\n'), ((61111, 61129), 'numpy.concatenate', 'np.concatenate', (['ys'], {}), '(ys)\n', (61125, 61129), True, 'import numpy as np\n'), ((63209, 63231), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (63229, 63231), True, 'import tensorflow as tf\n'), ((65616, 65638), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (65636, 65638), True, 'import tensorflow as tf\n'), ((65669, 65691), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (65689, 65691), True, 'import tensorflow as tf\n'), ((65733, 65755), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (65753, 65755), True, 'import tensorflow as tf\n'), ((8886, 8931), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['grad', '(-threshold)', 'threshold'], {}), '(grad, -threshold, threshold)\n', (8902, 8931), True, 'import tensorflow as tf\n'), ((28976, 28986), 'tensorflow.abs', 'tf.abs', (['W1'], {}), '(W1)\n', (28982, 28986), True, 'import tensorflow as tf\n'), ((29004, 29014), 'tensorflow.abs', 'tf.abs', (['W2'], {}), '(W2)\n', (29010, 29014), True, 'import tensorflow as tf\n'), ((47524, 47635), 'tensorflow.layers.dense', 'tf.layers.dense', (['inputs', 'self.n_neurons'], {'kernel_initializer': 'self.initializer', 'name': "('hidden%d' % (layer + 1))"}), "(inputs, self.n_neurons, kernel_initializer=self.initializer,\n name='hidden%d' % (layer + 1))\n", (47539, 47635), True, 'import tensorflow as tf\n'), ((48177, 48214), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['self.random_state'], {}), '(self.random_state)\n', (48195, 48214), True, 'import tensorflow as tf\n'), ((48227, 48260), 'numpy.random.seed', 'np.random.seed', (['self.random_state'], {}), '(self.random_state)\n', (48241, 48260), True, 'import numpy as np\n'), ((48484, 48545), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(False)'], {'shape': '()', 'name': '"""training"""'}), "(False, shape=(), name='training')\n", (48511, 48545), True, 'import tensorflow as tf\n'), ((49203, 49231), 'tensorflow.cast', 'tf.cast', (['correct', 'tf.float32'], {}), '(correct, tf.float32)\n', (49210, 49231), True, 'import tensorflow as tf\n'), ((49887, 49935), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {}), '(tf.GraphKeys.GLOBAL_VARIABLES)\n', (49904, 49935), True, 'import tensorflow as tf\n'), ((51816, 51858), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (51833, 51858), True, 'import tensorflow as tf\n'), ((54423, 54501), 'sklearn.exceptions.NotFittedError', 'NotFittedError', (["('This %s instance is not fitted yet' % self.__class__.__name__)"], {}), "('This %s instance is not fitted yet' % self.__class__.__name__)\n", (54437, 54501), False, 'from sklearn.exceptions import NotFittedError\n'), ((47433, 47502), 'tensorflow.layers.dropout', 'tf.layers.dropout', (['inputs', 'self.dropout_rate'], {'training': 'self._training'}), '(inputs, self.dropout_rate, training=self._training)\n', (47450, 47502), True, 'import tensorflow as tf\n'), ((47772, 47873), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['inputs'], {'momentum': 'self.batch_norm_momentum', 'training': 'self._training'}), '(inputs, momentum=self.batch_norm_momentum,\n training=self._training)\n', (47801, 47873), True, 'import tensorflow as tf\n')] |
from tkinter import *
import tkinter
import pyautogui
import PIL.Image, PIL.ImageTk
import cv2
import numpy as np
import time
import os
import keyboard
import datetime
import os
import keyboard
from PIL import ImageTk
import win32clipboard as clip
import win32con
from io import BytesIO
from PIL import ImageGrab
if os.path.exists("screenshots"):
pass
else:
os.mkdir("screenshots")
lst = os.listdir("screenshots")
for i in lst:
os.remove("screenshots/"+i)
x = datetime.datetime.now()
a = 0
def screenshot():
global a
ss = pyautogui.screenshot()
ss = np.array(ss)
cv_img = cv2.cvtColor(ss,cv2.COLOR_BGR2RGB)
cv_img = cv2.resize(cv_img,(650,380))
a+=1
cv2.imwrite("screenshots/image"+str(a)+".png", cv_img)
# cv2.imshow("Recent shot saved", cv_img)
# cv2.waitKey(0)
while True:
if keyboard.is_pressed("shift + a"):
time.sleep(0.1)
screenshot()
if keyboard.is_pressed("shift + d"):
break
root = tkinter.Tk()
root.title("Screenshot saver")
img_no = 0
image_list = []
for i in os.listdir("screenshots"):
image = cv2.imread("screenshots/"+i)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
photo = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(image))
image_list.append(photo)
# print(image_list)
import PIL.Image
my_label = Label(image=image_list[0])
my_label.grid(row=0, column=0, columnspan=3)
def forward(image_number):
global my_label
global button_forward
global button_back
global img_no
my_label = Label(image=image_list[image_number-1])
my_label.grid_forget()
button_forward = Button(root, text=">>", command=lambda: forward(image_number+1))
button_back = Button(root, text="<<", command=lambda: back(image_number-1))
if image_number == 5:
button_forward = Button(root, text=">>", state=DISABLED)
my_label.grid(row=0, column=0, columnspan=3)
button_back.grid(row=1, column=0)
button_forward.grid(row=1, column=2)
img_no+=1
def back(image_number):
global my_label
global button_forward
global button_back
global img_no
my_label.grid_forget()
my_label = Label(image=image_list[image_number-1])
button_forward = Button(root, text=">>", command=lambda: forward(image_number+1))
button_back = Button(root, text="<<", command=lambda: back(image_number-1))
if image_number == 1:
button_back = Button(root, text="<<", state=DISABLED)
my_label.grid(row=0, column=0, columnspan=3)
button_back.grid(row=1, column=0)
button_forward.grid(row=1, column=2)
img_no-=1
def copy():
global img_no
img_list = os.listdir("screenshots")
file_path = "screenshots/"+img_list[img_no]
image = PIL.Image.open(file_path)
output = BytesIO()
image.convert('RGB').save(output, 'BMP')
data = output.getvalue()[14:]
output.close()
clip.OpenClipboard()
clip.EmptyClipboard()
clip.SetClipboardData(win32con.CF_DIB, data)
clip.CloseClipboard()
button_back = Button(root, text="<<", command=back, state=DISABLED,)
button_exit = Button(root, text="Copy", command=copy)
button_forward = Button(root, text=">>", command=lambda: forward(2))
button_back.grid(row=1, column=0)
button_exit.grid(row=1, column=1)
button_forward.grid(row=1, column=2)
root.mainloop()
| [
"os.listdir",
"os.mkdir",
"os.remove",
"io.BytesIO",
"win32clipboard.SetClipboardData",
"win32clipboard.CloseClipboard",
"cv2.cvtColor",
"os.path.exists",
"pyautogui.screenshot",
"win32clipboard.EmptyClipboard",
"time.sleep",
"keyboard.is_pressed",
"cv2.imread",
"win32clipboard.OpenClipboa... | [((340, 369), 'os.path.exists', 'os.path.exists', (['"""screenshots"""'], {}), "('screenshots')\n", (354, 369), False, 'import os\n'), ((427, 452), 'os.listdir', 'os.listdir', (['"""screenshots"""'], {}), "('screenshots')\n", (437, 452), False, 'import os\n'), ((508, 531), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (529, 531), False, 'import datetime\n'), ((1058, 1070), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (1068, 1070), False, 'import tkinter\n'), ((1146, 1171), 'os.listdir', 'os.listdir', (['"""screenshots"""'], {}), "('screenshots')\n", (1156, 1171), False, 'import os\n'), ((394, 417), 'os.mkdir', 'os.mkdir', (['"""screenshots"""'], {}), "('screenshots')\n", (402, 417), False, 'import os\n'), ((475, 504), 'os.remove', 'os.remove', (["('screenshots/' + i)"], {}), "('screenshots/' + i)\n", (484, 504), False, 'import os\n'), ((586, 608), 'pyautogui.screenshot', 'pyautogui.screenshot', ([], {}), '()\n', (606, 608), False, 'import pyautogui\n'), ((619, 631), 'numpy.array', 'np.array', (['ss'], {}), '(ss)\n', (627, 631), True, 'import numpy as np\n'), ((646, 681), 'cv2.cvtColor', 'cv2.cvtColor', (['ss', 'cv2.COLOR_BGR2RGB'], {}), '(ss, cv2.COLOR_BGR2RGB)\n', (658, 681), False, 'import cv2\n'), ((695, 725), 'cv2.resize', 'cv2.resize', (['cv_img', '(650, 380)'], {}), '(cv_img, (650, 380))\n', (705, 725), False, 'import cv2\n'), ((906, 938), 'keyboard.is_pressed', 'keyboard.is_pressed', (['"""shift + a"""'], {}), "('shift + a')\n", (925, 938), False, 'import keyboard\n'), ((995, 1027), 'keyboard.is_pressed', 'keyboard.is_pressed', (['"""shift + d"""'], {}), "('shift + d')\n", (1014, 1027), False, 'import keyboard\n'), ((1184, 1214), 'cv2.imread', 'cv2.imread', (["('screenshots/' + i)"], {}), "('screenshots/' + i)\n", (1194, 1214), False, 'import cv2\n'), ((1223, 1261), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (1235, 1261), False, 'import cv2\n'), ((2674, 2699), 'os.listdir', 'os.listdir', (['"""screenshots"""'], {}), "('screenshots')\n", (2684, 2699), False, 'import os\n'), ((2793, 2802), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (2800, 2802), False, 'from io import BytesIO\n'), ((2897, 2917), 'win32clipboard.OpenClipboard', 'clip.OpenClipboard', ([], {}), '()\n', (2915, 2917), True, 'import win32clipboard as clip\n'), ((2920, 2941), 'win32clipboard.EmptyClipboard', 'clip.EmptyClipboard', ([], {}), '()\n', (2939, 2941), True, 'import win32clipboard as clip\n'), ((2944, 2988), 'win32clipboard.SetClipboardData', 'clip.SetClipboardData', (['win32con.CF_DIB', 'data'], {}), '(win32con.CF_DIB, data)\n', (2965, 2988), True, 'import win32clipboard as clip\n'), ((2991, 3012), 'win32clipboard.CloseClipboard', 'clip.CloseClipboard', ([], {}), '()\n', (3010, 3012), True, 'import win32clipboard as clip\n'), ((949, 964), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (959, 964), False, 'import time\n')] |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
DATA_DIR = os.path.join('..', 'data')
def load_data(path):
"""Loads the data!"""
return pd.read_pickle(path)
def split_intervals(data_intervals, h_split):
"""
Receives a list of intervals and a split coefficient and returns two lists, each one containing a subinterval
of input.
:param data_intervals: List of time series.
:param h_split: Split coefficient. (float in (0, 1))
:return:
- output_train: List of time series, each element in the list is made of a data_interval element,
from the indexes 0 to int(len(input) * h_split).
- output_label: List of time series, each element in the list is made of a data_interval element,
each element in the list is from the indexes int(len(input) * h_split) to len(input).
"""
output_train = []
output_label = []
interval_length = np.shape(data_intervals[0])[0]
split_idx = int(h_split * interval_length)
for j, data_interval in enumerate(data_intervals):
output_train.append(data_interval[:split_idx])
output_label.append(data_interval[split_idx:])
return output_train, output_label
def get_performance(cluster_indexes, labels):
"""
Iterates in all clusters and prints the performance for each one of them.
Each cluster performance is calculated as the sum of its elements' performances.
:param cluster_indexes: List with cluster indexes for all elements.
:param labels: List with performances for all elements.
:return:
- output_performance: List with the performance (sum of labels) of each cluster.
- output_labels: List with the list of labels for each cluster.
"""
clusters = np.sort(np.unique(cluster_indexes))
output_performance = []
output_labels = []
for c in clusters:
indxs = np.where(cluster_indexes == c)[0]
labels_ = np.asarray(labels)[indxs]
output_labels.append(labels_)
output_performance.append(np.sum(labels_))
return output_performance, output_labels
def plot_hist(data, hists, title):
"""If 'hists' is True, plots an histogram for each cluster performance."""
if hists:
fig = plt.figure()
plt.hist(data, bins=30)
plt.grid()
plt.title(title)
def normalize_intervals(train_intervals):
"""
Normalize the time series in a given list.
:param train_intervals: List of time series.
:return: The same list of time series, with each element ranging between 1 and -1.
"""
output = []
for train in train_intervals:
temp = train - np.mean(train)
temp = temp / (np.amax(temp) - np.amin(temp))
output.append(temp)
return output
def get_cluster_indexes(normalized_train_intervals, n_clusters):
"""Executes a Kmeans algorithm on input data and returns a list with what cluster each element belongs to."""
kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(normalized_train_intervals)
return kmeans.labels_
def get_labels(label_intervals):
"""
Calculates each interval performance.
:param label_intervals: List of time series.
:return:
- labels: The performance related to each element in the input, calculated as the subtraction between its
last and first element.
"""
labels = []
for data_ in label_intervals:
labels.append(data_[-1] - data_[0])
return labels
def get_intervals(date_init, date_end, interval):
"""
Returns a list of time intervals.
:param date_init: Initial date. (For example: 2010-01-01)
:param date_end: End date. (For example: 2018-01-01)
:param interval: List of two integers, the limits of each day being considered. (For example: [6, 15])
:return:
- output: List of time intervals.
Each element is a different day, restricted to the hours as specified in 'interval'.
"""
h_init, h_end = interval
output = []
rango_total = pd.date_range(date_init, date_end, freq='D')
for i in rango_total:
_ = pd.date_range(i, periods=1440, freq='T')
output.append(_[h_init * 60:h_end * 60])
return output
def data_to_intervals(data, intervals):
"""
Split input data into a list of intervals. Each interval is a range of datetimes.
:param data: Time series.
:param intervals: List of datetimes time series.
:return: A list where each element is input data restricted to a time interval defined by elements in 'intervals'.
"""
interval_length = np.shape(intervals[0])[0]
output = []
for j, interval in enumerate(intervals):
data_ = data[interval]
if (not(np.isnan(data_).any())) and (np.shape(data_)[0] == interval_length):
output.append(data_)
return output
| [
"matplotlib.pyplot.title",
"pandas.date_range",
"numpy.sum",
"matplotlib.pyplot.hist",
"numpy.amin",
"sklearn.cluster.KMeans",
"numpy.asarray",
"numpy.isnan",
"numpy.shape",
"numpy.amax",
"matplotlib.pyplot.figure",
"numpy.where",
"numpy.mean",
"pandas.read_pickle",
"os.path.join",
"nu... | [((130, 156), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""'], {}), "('..', 'data')\n", (142, 156), False, 'import os\n'), ((217, 237), 'pandas.read_pickle', 'pd.read_pickle', (['path'], {}), '(path)\n', (231, 237), True, 'import pandas as pd\n'), ((4132, 4176), 'pandas.date_range', 'pd.date_range', (['date_init', 'date_end'], {'freq': '"""D"""'}), "(date_init, date_end, freq='D')\n", (4145, 4176), True, 'import pandas as pd\n'), ((1010, 1037), 'numpy.shape', 'np.shape', (['data_intervals[0]'], {}), '(data_intervals[0])\n', (1018, 1037), True, 'import numpy as np\n'), ((1850, 1876), 'numpy.unique', 'np.unique', (['cluster_indexes'], {}), '(cluster_indexes)\n', (1859, 1876), True, 'import numpy as np\n'), ((2328, 2340), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2338, 2340), True, 'import matplotlib.pyplot as plt\n'), ((2349, 2372), 'matplotlib.pyplot.hist', 'plt.hist', (['data'], {'bins': '(30)'}), '(data, bins=30)\n', (2357, 2372), True, 'import matplotlib.pyplot as plt\n'), ((2381, 2391), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2389, 2391), True, 'import matplotlib.pyplot as plt\n'), ((2400, 2416), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2409, 2416), True, 'import matplotlib.pyplot as plt\n'), ((4216, 4256), 'pandas.date_range', 'pd.date_range', (['i'], {'periods': '(1440)', 'freq': '"""T"""'}), "(i, periods=1440, freq='T')\n", (4229, 4256), True, 'import pandas as pd\n'), ((4694, 4716), 'numpy.shape', 'np.shape', (['intervals[0]'], {}), '(intervals[0])\n', (4702, 4716), True, 'import numpy as np\n'), ((1970, 2000), 'numpy.where', 'np.where', (['(cluster_indexes == c)'], {}), '(cluster_indexes == c)\n', (1978, 2000), True, 'import numpy as np\n'), ((2022, 2040), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (2032, 2040), True, 'import numpy as np\n'), ((2120, 2135), 'numpy.sum', 'np.sum', (['labels_'], {}), '(labels_)\n', (2126, 2135), True, 'import numpy as np\n'), ((2735, 2749), 'numpy.mean', 'np.mean', (['train'], {}), '(train)\n', (2742, 2749), True, 'import numpy as np\n'), ((3046, 3091), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_clusters', 'random_state': '(0)'}), '(n_clusters=n_clusters, random_state=0)\n', (3052, 3091), False, 'from sklearn.cluster import KMeans\n'), ((2773, 2786), 'numpy.amax', 'np.amax', (['temp'], {}), '(temp)\n', (2780, 2786), True, 'import numpy as np\n'), ((2789, 2802), 'numpy.amin', 'np.amin', (['temp'], {}), '(temp)\n', (2796, 2802), True, 'import numpy as np\n'), ((4861, 4876), 'numpy.shape', 'np.shape', (['data_'], {}), '(data_)\n', (4869, 4876), True, 'import numpy as np\n'), ((4832, 4847), 'numpy.isnan', 'np.isnan', (['data_'], {}), '(data_)\n', (4840, 4847), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""
Quadtree Image Segmentation
<NAME>
Split the image into four quadrants.
Find the quadrant with the highest error.
Split that quadrant into four quadrants.
Repeat N times.
"""
import heapq
import argparse
import imageio
import imageio_ffmpeg
import numpy as np
from tqdm import tqdm
def border(image):
# Add a black border around the given image quadrant.
image[0, :] = 0
image[-1, :] = 0
image[:, 0] = 0
image[:, -1] = 0
def error(image, avg):
# Compute the error of a given quadrant.
h, w = image.shape
mean = avg[0] * 0.299 + avg[1] * 0.587 + avg[2] * 0.114
return np.sum((image - mean) ** 2) / (h * w)
def quad(image, edited, iterations, quadrants=None, min_width=10, min_height=10, set_border=True):
"""
Performs the quadtree segmentation algorithm on the image.
The resulting quadtree image is stored in "edited".
"""
if quadrants is None:
quadrants = []
gray = (image * np.array([0.299, 0.587, 0.114])).sum(axis=2, dtype=np.uint8)
h, w = image.shape[:2]
# Create the integral image, edge padded by one to the top and left.
I = np.pad(image.astype(np.uint32), ((1, 0), (1, 0), (0, 0)), mode='edge')
np.cumsum(I, axis=0, out=I)
np.cumsum(I, axis=1, out=I)
# Top left quadrant x and y coordinates.
x, y = 0, 0
for _ in range(iterations):
if h > min_height and w > min_width:
hw, hh = w // 2, h // 2
tlA, tlB, tlC, tlD = I[y, x], I[y, x+hw], I[y+hh, x], I[y+hh, x+hw]
trA, trB, trC, trD = I[y, x+hw], I[y, x+w], I[y+hh, x+hw], I[y+hh, x+w]
blA, blB, blC, blD = I[y+hh, x], I[y+hh, x+hw], I[y+h, x], I[y+h, x+hw]
brA, brB, brC, brD = I[y+hh, x+hw], I[y+hh, x+w], I[y+h, x+hw], I[y+h, x+w]
tl_avg = (tlD + tlA - tlB - tlC) / (hw * hh)
tr_avg = (trD + trA - trB - trC) / ((w - hw) * hh)
bl_avg = (blD + blA - blB - blC) / (hw * (h - hh))
br_avg = (brD + brA - brB - brC) / ((w - hw) * (h - hh))
edited[y:y+hh, x:x+hw] = tl_avg # Top Left
edited[y:y+hh, x+hw:x+w] = tr_avg # Top Right
edited[y+hh:y+h, x:x+hw] = bl_avg # Bottom Left
edited[y+hh:y+h, x+hw:x+w] = br_avg # Bottom Right
if set_border:
border(edited[y:y+hh, x:x+hw])
border(edited[y:y+hh, x+hw:x+w])
border(edited[y+hh:y+h, x:x+hw])
border(edited[y+hh:y+h, x+hw:x+w])
heapq.heappush(quadrants, (-error(gray[y:y+hh, x:x+hw], tl_avg), x, y, hw, hh))
heapq.heappush(quadrants, (-error(gray[y:y+hh, x+hw:x+w], tr_avg), x + hw, y, w - hw, hh))
heapq.heappush(quadrants, (-error(gray[y+hh:y+h, x:x+hw], bl_avg), x, y + hh, hw, h - hh))
heapq.heappush(quadrants, (-error(gray[y+hh:y+h, x+hw:x+w], br_avg), x + hw, y + hh, w - hw, h - hh))
if quadrants:
_, x, y, w, h = heapq.heappop(quadrants)
else:
break
def parse_args():
parser = argparse.ArgumentParser(description="Quadtree Image Segmentation.")
parser.add_argument("input", type=str, help="Image to segment.")
parser.add_argument("output", type=str, help="Output filename.")
parser.add_argument("iterations", type=int, help="Number of segmentation iterations.")
parser.add_argument("-q", "--quality", type=int, default=5, help="Quality of the output video. (0-10), 0 worst, 10 best.")
parser.add_argument("-b", "--border", action="store_true", help="Add borders to subimages.")
parser.add_argument("-a", "--audio", action="store_true", help="Add audio from the input file to the output file.")
parser.add_argument("-mw", "--minwidth", type=int, default=10, help="Minimum width of the smallest image quadrant.")
parser.add_argument("-mh", "--minheight", type=int, default=10, help="Minimum height of the smallest image quadrant.")
return parser.parse_args()
def quadtree_video(args):
# Convert every frame of input video to quadtree image and store as output video.
with imageio.read(args.input) as video:
data = video.get_meta_data()
kwargs = {"fps": data["fps"], "quality": min(max(args.quality, 0), 10)}
if args.audio:
kwargs["audio_path"] = args.input
writer = imageio_ffmpeg.write_frames(args.output, data["source_size"], **kwargs)
writer.send(None)
quadrants = []
buffer = np.empty(data['source_size'][::-1] + (3,), dtype=np.uint8)
for frame in tqdm(video, total=int(data["fps"] * data["duration"] + 0.5)):
np.copyto(buffer, frame)
quad(
frame,
buffer,
args.iterations,
quadrants=quadrants,
min_width=args.minwidth,
min_height=args.minheight,
set_border=args.border,
)
writer.send(buffer)
quadrants.clear()
writer.close()
def quadtree_image(args, image):
copy = image.copy()
quad(
image,
copy,
args.iterations,
min_width=args.minwidth,
min_height=args.minheight,
set_border=args.border,
)
imageio.imsave(args.output, copy)
def main():
args = parse_args()
# Try to load an image from the given input. If this fails, assume it's a video.
try:
image = imageio.imread(args.input)[..., :3]
except Exception:
quadtree_video(args)
else:
quadtree_image(args, image)
if __name__ == "__main__":
main()
| [
"numpy.sum",
"argparse.ArgumentParser",
"imageio.read",
"numpy.empty",
"imageio.imread",
"heapq.heappop",
"numpy.cumsum",
"imageio_ffmpeg.write_frames",
"numpy.array",
"numpy.copyto",
"imageio.imsave"
] | [((1226, 1253), 'numpy.cumsum', 'np.cumsum', (['I'], {'axis': '(0)', 'out': 'I'}), '(I, axis=0, out=I)\n', (1235, 1253), True, 'import numpy as np\n'), ((1258, 1285), 'numpy.cumsum', 'np.cumsum', (['I'], {'axis': '(1)', 'out': 'I'}), '(I, axis=1, out=I)\n', (1267, 1285), True, 'import numpy as np\n'), ((3091, 3158), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Quadtree Image Segmentation."""'}), "(description='Quadtree Image Segmentation.')\n", (3114, 3158), False, 'import argparse\n'), ((5285, 5318), 'imageio.imsave', 'imageio.imsave', (['args.output', 'copy'], {}), '(args.output, copy)\n', (5299, 5318), False, 'import imageio\n'), ((637, 664), 'numpy.sum', 'np.sum', (['((image - mean) ** 2)'], {}), '((image - mean) ** 2)\n', (643, 664), True, 'import numpy as np\n'), ((4131, 4155), 'imageio.read', 'imageio.read', (['args.input'], {}), '(args.input)\n', (4143, 4155), False, 'import imageio\n'), ((4371, 4442), 'imageio_ffmpeg.write_frames', 'imageio_ffmpeg.write_frames', (['args.output', "data['source_size']"], {}), "(args.output, data['source_size'], **kwargs)\n", (4398, 4442), False, 'import imageio_ffmpeg\n'), ((4510, 4568), 'numpy.empty', 'np.empty', (["(data['source_size'][::-1] + (3,))"], {'dtype': 'np.uint8'}), "(data['source_size'][::-1] + (3,), dtype=np.uint8)\n", (4518, 4568), True, 'import numpy as np\n'), ((3001, 3025), 'heapq.heappop', 'heapq.heappop', (['quadrants'], {}), '(quadrants)\n', (3014, 3025), False, 'import heapq\n'), ((4664, 4688), 'numpy.copyto', 'np.copyto', (['buffer', 'frame'], {}), '(buffer, frame)\n', (4673, 4688), True, 'import numpy as np\n'), ((5468, 5494), 'imageio.imread', 'imageio.imread', (['args.input'], {}), '(args.input)\n', (5482, 5494), False, 'import imageio\n'), ((981, 1012), 'numpy.array', 'np.array', (['[0.299, 0.587, 0.114]'], {}), '([0.299, 0.587, 0.114])\n', (989, 1012), True, 'import numpy as np\n')] |
import cv2
import numpy as np
from utils import *
img = cv2.imread('/home/yared/Documents/leaf images/Apple___healthy_markers/0bb2ddc5-d1f4-4fc2-be6b-6b63c60790df___RS_HL 7550.JPG',0)
img[img > 0 ] = 255
img_inv = cv2.bitwise_not(img)
debug(img_inv, 'img_inv')
print('type', img_inv.dtype)
nb_components, output, stats, centroids = \
cv2.connectedComponentsWithStats(img_inv, connectivity=8, ltype=cv2.CV_32S)
# debug(nb_components, 'nb_components')
# debug(output, 'output')
# debug(stats, 'stats')
# debug(centroids, 'centroids')
sizes = stats[1:, -1]; nb_components = nb_components - 1
min_size = 300
img2 = np.zeros((output.shape))
for i in range(0, nb_components):
if sizes[i] >= min_size:
img2[output == i + 1] = 255
cv2.imshow('ImageWindow',img2)
# cv2.waitKey(0) | [
"cv2.bitwise_not",
"numpy.zeros",
"cv2.connectedComponentsWithStats",
"cv2.imread",
"cv2.imshow"
] | [((57, 195), 'cv2.imread', 'cv2.imread', (['"""/home/yared/Documents/leaf images/Apple___healthy_markers/0bb2ddc5-d1f4-4fc2-be6b-6b63c60790df___RS_HL 7550.JPG"""', '(0)'], {}), "(\n '/home/yared/Documents/leaf images/Apple___healthy_markers/0bb2ddc5-d1f4-4fc2-be6b-6b63c60790df___RS_HL 7550.JPG'\n , 0)\n", (67, 195), False, 'import cv2\n'), ((215, 235), 'cv2.bitwise_not', 'cv2.bitwise_not', (['img'], {}), '(img)\n', (230, 235), False, 'import cv2\n'), ((339, 414), 'cv2.connectedComponentsWithStats', 'cv2.connectedComponentsWithStats', (['img_inv'], {'connectivity': '(8)', 'ltype': 'cv2.CV_32S'}), '(img_inv, connectivity=8, ltype=cv2.CV_32S)\n', (371, 414), False, 'import cv2\n'), ((617, 639), 'numpy.zeros', 'np.zeros', (['output.shape'], {}), '(output.shape)\n', (625, 639), True, 'import numpy as np\n'), ((743, 774), 'cv2.imshow', 'cv2.imshow', (['"""ImageWindow"""', 'img2'], {}), "('ImageWindow', img2)\n", (753, 774), False, 'import cv2\n')] |
from typing import List, Tuple, Union
import geopandas as gpd
import numpy as np
from scipy.stats import norm, poisson, uniform
from scipy.stats._distn_infrastructure import rv_frozen
from shapely.geometry import Point
from .area import Area
from .feature import Feature
from .utils import clip_points
class Layer:
"""A container for `Feature` objects
The `Layer` class is mostly useful as a way to create groups of similar features.
Parameters
----------
name : str
Unique name for the layer
area : Area
Containing area
input_features : List[Feature]
List of features that originally made up the Layer (before clipping)
Attributes
----------
name : str
Name of the layer
input_features : List[Feature]
List of features that make up the layer
df : geopandas GeoDataFrame
`GeoDataFrame` with a row for each feature in the layer
"""
def __init__(
self,
name: str,
area: Area,
input_features: List[Feature],
):
"""Create a `Layer` instance."""
self.name = name
self.input_features = input_features
self.df = gpd.GeoDataFrame(
[feature.to_dict() for feature in self.input_features],
geometry="shape",
)
# clip by area
if all(self.df.geom_type == "Point"):
tmp_area = area
self.df = clip_points(self.df, tmp_area.df)
@classmethod
def from_shapefile(
cls,
path: str,
name: str,
area: Area,
time_penalty: Union[float, rv_frozen] = 0.0,
ideal_obs_rate: Union[float, rv_frozen] = 1.0,
**kwargs,
) -> "Layer":
"""Create a `Layer` instance from a shapefile.
Parameters
----------
path : str
Filepath to the shapefile
name : str
Unique name for the layer
area : Area
Containing area
time_penalty : Union[float, rv_frozen], optional
Minimum amount of time it takes to record a feature (the default is 0.0, which indicates no time cost for feature recording)
ideal_obs_rate : Union[float, rv_frozen], optional
Ideal observation rate: the frequency with which an artifact or feature will be recorded, assuming the following ideal conditions:
- It lies inside or intersects the Coverage
- Surface visibility is 100%
- The surveyor is highly skilled
The default is 1.0, which indicates that when visibility and surveyor skill allow, the feature will always be recorded.
Returns
-------
Layer
"""
tmp_gdf = gpd.read_file(path, **kwargs)
shape_list = tmp_gdf.geometry.tolist()
feature_list = [
Feature(
name=f"{name}_{i}",
layer_name=name,
shape=shape_list[i],
time_penalty=time_penalty,
ideal_obs_rate=ideal_obs_rate,
)
for i in range(len(shape_list))
]
return cls(
name=name,
area=area,
input_features=feature_list,
)
@classmethod
def from_pseudorandom_points(
cls,
n: int,
name: str,
area: Area,
time_penalty: Union[float, rv_frozen] = 0.0,
ideal_obs_rate: Union[float, rv_frozen] = 1.0,
) -> "Layer":
"""Create a `Layer` instance of pseudorandom points
Parameters
----------
n : int
Number of points to generate
name : str
Unique name for the layer
area : Area
Containing area
time_penalty : Union[float, rv_frozen], optional
Minimum amount of time it takes to record a feature (the default is 0.0, which indicates no time cost for feature recording)
ideal_obs_rate : Union[float, rv_frozen], optional
Ideal observation rate: the frequency with which an artifact or feature will be recorded, assuming the following ideal conditions:
- It lies inside or intersects the Coverage
- Surface visibility is 100%
- The surveyor is highly skilled
The default is 1.0, which indicates that when visibility and surveyor skill allow, the feature will always be recorded.
Returns
-------
Layer
See Also
--------
from_poisson_points : simple Poisson points `Layer`
from_thomas_points : good for clusters with centers from Poisson points
from_matern_points : good for clusters with centers from Poisson points
"""
tmp_area = area
bounds = tmp_area.df.total_bounds
n_pts: int = 0
feature_list: List[Feature] = []
while n_pts < n:
xs = (np.random.random(1) * (bounds[2] - bounds[0])) + bounds[0]
ys = (np.random.random(1) * (bounds[3] - bounds[1])) + bounds[1]
points_gds = gpd.GeoSeries([Point(xy) for xy in zip(xs, ys)])
shape_list = points_gds.geometry.tolist()
feature = Feature(
name=f"{name}_{n_pts}",
layer_name=name,
shape=shape_list[0],
time_penalty=time_penalty,
ideal_obs_rate=ideal_obs_rate,
)
tmp_df = gpd.GeoDataFrame([feature.to_dict()], geometry="shape")
# clip by area
clipped_df = clip_points(tmp_df, tmp_area.df)
if clipped_df.shape[0] > 0:
feature_list.append(feature)
n_pts += 1
return cls(
name=name,
area=area,
input_features=feature_list,
)
@classmethod
def from_poisson_points(
cls,
rate: float,
name: str,
area: Area,
time_penalty: Union[float, rv_frozen] = 0.0,
ideal_obs_rate: Union[float, rv_frozen] = 1.0,
) -> "Layer":
"""Create a `Layer` instance of points with a Poisson point process
Parameters
----------
rate : float
Theoretical events per unit area across the whole space. See Notes in `poisson_points()` for more details
name : str
Unique name for the layer
area : Area
Containing area
time_penalty : Union[float, rv_frozen], optional
Minimum amount of time it takes to record a feature (the default is 0.0, which indicates no time cost for feature recording)
ideal_obs_rate : Union[float, rv_frozen], optional
Ideal observation rate: the frequency with which an artifact or feature will be recorded, assuming the following ideal conditions:
- It lies inside or intersects the Coverage
- Surface visibility is 100%
- The surveyor is highly skilled
The default is 1.0, which indicates that when visibility and surveyor skill allow, the feature will always be recorded.
Returns
-------
Layer
See Also
--------
poisson_points : includes details on Poisson point process
from_pseudorandom_points : faster, naive point creation
from_thomas_points : good for clusters with centers from Poisson points
from_matern_points : good for clusters with centers from Poisson points
Notes
-----
The generated point coordinates are not guaranteed to fall within the given area, only within its bounding box. The generated GeoDataFrame, `df`, is clipped by the actual area bounds *after* they are generated, which can result in fewer points than expected. All points will remain in the `input_features`.
"""
tmp_area = area
points = cls.poisson_points(tmp_area, rate)
points_gds = gpd.GeoSeries([Point(xy) for xy in points])
shape_list = points_gds.geometry.tolist()
# check to see that some points were created
assert len(shape_list) > 0, "Parameters resulted in zero points"
feature_list = [
Feature(
name=f"{name}_{i}",
layer_name=name,
shape=shape_list[i],
time_penalty=time_penalty,
ideal_obs_rate=ideal_obs_rate,
)
for i in range(len(shape_list))
]
return cls(
name=name,
area=area,
input_features=feature_list,
)
@classmethod
def from_thomas_points(
cls,
parent_rate: float,
child_rate: float,
gauss_var: float,
name: str,
area: Area,
time_penalty: Union[float, rv_frozen] = 0.0,
ideal_obs_rate: Union[float, rv_frozen] = 1.0,
) -> "Layer":
"""Create a `Layer` instance with a Thomas point process.
It has a Poisson number of clusters, each with a Poisson number of points distributed with an isotropic Gaussian distribution of a given variance.
Parameters
----------
parent_rate : float
Theoretical clusters per unit area across the whole space. See Notes in `poisson_points()` for more details
child_rate : float
Theoretical child points per unit area per cluster across the whole space.
gauss_var : float
Variance of the isotropic Gaussian distributions around the cluster centers
name : str
Unique name for the layer
area : Area
Containing area
time_penalty : Union[float, rv_frozen], optional
Minimum amount of time it takes to record a feature (the default is 0.0, which indicates no time cost for feature recording)
ideal_obs_rate : Union[float, rv_frozen], optional
Ideal observation rate: the frequency with which an artifact or feature will be recorded, assuming the following ideal conditions:
- It lies inside or intersects the Coverage
- Surface visibility is 100%
- The surveyor is highly skilled
The default is 1.0, which indicates that when visibility and surveyor skill allow, the feature will always be recorded.
Returns
-------
Layer
See Also
--------
poisson_points : includes details on Poisson point process
from_pseudorandom_points : faster, naive point creation
from_poisson_points : simple Poisson points `Layer`
from_matern_points : similar process, good for clusters with centers from Poisson points
Notes
-----
1. Parents (cluster centers) are NOT created as points in the output
2. The generated point coordinates are not guaranteed to fall within the given area, only within its bounding box. The generated GeoDataFrame, `df`, is clipped by the actual area bounds *after* they are generated, which can result in fewer points than expected. All points will remain in the `input_features`.
"""
tmp_area = area
parents = cls.poisson_points(tmp_area, parent_rate)
M = parents.shape[0]
points = list()
for i in range(M):
N = poisson(child_rate).rvs()
for __ in range(N):
pdf = norm(loc=parents[i, :2], scale=(gauss_var, gauss_var))
points.append(list(pdf.rvs(2)))
points = np.array(points)
points_gds = gpd.GeoSeries([Point(xy) for xy in points])
shape_list = points_gds.geometry.tolist()
# check to see that some points were created
assert len(shape_list) > 0, "Parameters resulted in zero points"
feature_list = [
Feature(
name=f"{name}_{i}",
layer_name=name,
shape=shape_list[i],
time_penalty=time_penalty,
ideal_obs_rate=ideal_obs_rate,
)
for i in range(len(shape_list))
]
return cls(
name=name,
area=area,
input_features=feature_list,
)
@classmethod
def from_matern_points(
cls,
parent_rate: float,
child_rate: float,
radius: float,
name: str,
area: Area,
time_penalty: Union[float, rv_frozen] = 0.0,
ideal_obs_rate: Union[float, rv_frozen] = 1.0,
) -> "Layer":
"""Create a `Layer` instance with a Matérn point process.
It has a Poisson number of clusters, each with a Poisson number of points distributed uniformly across a disk of a given radius.
Parameters
----------
parent_rate : float
Theoretical clusters per unit area across the whole space. See Notes in `poisson_points()` for more details
child_rate : float
Theoretical child points per unit area per cluster across the whole space.
radius : float
Radius of the disk around the cluster centers
name : str
Unique name for the layer
area : Area
Containing area
time_penalty : Union[float, rv_frozen], optional
Minimum amount of time it takes to record a feature (the default is 0.0, which indicates no time cost for feature recording)
ideal_obs_rate : Union[float, rv_frozen], optional
Ideal observation rate: the frequency with which an artifact or feature will be recorded, assuming the following ideal conditions:
- It lies inside or intersects the Coverage (see below)
- Surface visibility is 100%
- The surveyor is highly skilled
The default is 1.0, which indicates that when visibility and surveyor skill allow, the feature will always be recorded.
Returns
-------
Layer
See Also
--------
poisson_points : includes details on Poisson point process
from_pseudorandom_points : faster, naive point creation
from_poisson_points : simple Poisson points `Layer`
from_thomas_points : similar process, good for clusters with centers from Poisson points
uniform_disk : function used to specify point locations around parents
Notes
-----
1. Parents (cluster centers) are NOT created as points in the output
2. The generated point coordinates are not guaranteed to fall within the given area, only within its bounding box. The generated GeoDataFrame, `df`, is clipped by the actual area bounds *after* they are generated, which can result in fewer points than expected. All points will remain in the `input_features`.
"""
tmp_area = area
parents = cls.poisson_points(tmp_area, parent_rate)
M = parents.shape[0]
points = list()
for i in range(M):
N = poisson(child_rate).rvs()
for __ in range(N):
x, y = cls.uniform_disk(parents[i, 0], parents[i, 1], radius)
points.append([x, y])
points = np.array(points)
points_gds = gpd.GeoSeries([Point(xy) for xy in points])
shape_list = points_gds.geometry.tolist()
# check to see that some points were created
assert len(shape_list) > 0, "Parameters resulted in zero points"
feature_list = [
Feature(
name=f"{name}_{i}",
layer_name=name,
shape=shape_list[i],
time_penalty=time_penalty,
ideal_obs_rate=ideal_obs_rate,
)
for i in range(len(shape_list))
]
return cls(
name=name,
area=area,
input_features=feature_list,
)
@staticmethod
def poisson_points(area: Area, rate: float) -> np.ndarray:
"""Create point coordinates from a Poisson process.
Parameters
----------
area : Area
Bounding area
rate : float
Theoretical events per unit area across the whole space. See Notes for more details
Returns
-------
np.ndarray
See Also
--------
from_poisson_points : creates `Layer` with Poisson process
from_pseudorandom_points : faster, naive point creation
from_thomas_points : good for clusters with centers from Poisson points
from_matern_points : good for clusters with centers from Poisson points
Notes
-----
1. A Poisson point process is usually said to be more "purely" random than most random number generators (like the one used in `from_pseudorandom_points()`)
2. The rate (usually called "lambda") of the Poisson point process represents the number of events per unit of area per unit of time across some theoretical space of which our `Area` is some subset. In this case, we only have one unit of time, so the rate really represents a theoretical number of events per unit area. For example, if the specified rate is 5, in any 1x1 square, the number of points observed will be drawn randomly from a Poisson distribution with a shape parameter of 5. In practical terms, this means that over many 1x1 areas (or many observations of the same area), the mean number of points observed in that area will approximate 5.
"""
bounds = area.df.total_bounds
dx = bounds[2] - bounds[0]
dy = bounds[3] - bounds[1]
N = poisson(rate * dx * dy).rvs()
xs = uniform.rvs(0, dx, ((N, 1))) + bounds[0]
ys = uniform.rvs(0, dy, ((N, 1))) + bounds[1]
return np.hstack((xs, ys))
@staticmethod
def uniform_disk(x: float, y: float, r: float) -> Tuple[float, float]:
"""Randomly locate a point within a disk of specified radius
Parameters
----------
x, y : float
Coordinates of disk center
r : float
Radius of the disk
Returns
-------
Tuple[float, float]
Random point within the disk
"""
r = uniform(0, r ** 2.0).rvs()
theta = uniform(0, 2 * np.pi).rvs()
xt = np.sqrt(r) * np.cos(theta)
yt = np.sqrt(r) * np.sin(theta)
return x + xt, y + yt
# TODO: this.
@classmethod
def from_rectangles(cls, area: Area, n: int):
# random centroid?
# random rotation?
# n_polygons?
# TODO: centroid options: from Poisson, from pseudorandom
# TODO: rotation: pseudorandom
#
#
# create centroid coords from Poisson
# create rectangle of given dimensions around centroids
# rotate
raise NotImplementedError(
"`from_rectangles()` will be available in a future version of prospect"
)
| [
"shapely.geometry.Point",
"scipy.stats.norm",
"scipy.stats.poisson",
"scipy.stats.uniform.rvs",
"scipy.stats.uniform",
"numpy.hstack",
"numpy.sin",
"numpy.array",
"geopandas.read_file",
"numpy.cos",
"numpy.random.random",
"numpy.sqrt"
] | [((2730, 2759), 'geopandas.read_file', 'gpd.read_file', (['path'], {}), '(path, **kwargs)\n', (2743, 2759), True, 'import geopandas as gpd\n'), ((11467, 11483), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (11475, 11483), True, 'import numpy as np\n'), ((15091, 15107), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (15099, 15107), True, 'import numpy as np\n'), ((17646, 17665), 'numpy.hstack', 'np.hstack', (['(xs, ys)'], {}), '((xs, ys))\n', (17655, 17665), True, 'import numpy as np\n'), ((17536, 17562), 'scipy.stats.uniform.rvs', 'uniform.rvs', (['(0)', 'dx', '(N, 1)'], {}), '(0, dx, (N, 1))\n', (17547, 17562), False, 'from scipy.stats import norm, poisson, uniform\n'), ((17590, 17616), 'scipy.stats.uniform.rvs', 'uniform.rvs', (['(0)', 'dy', '(N, 1)'], {}), '(0, dy, (N, 1))\n', (17601, 17616), False, 'from scipy.stats import norm, poisson, uniform\n'), ((18188, 18198), 'numpy.sqrt', 'np.sqrt', (['r'], {}), '(r)\n', (18195, 18198), True, 'import numpy as np\n'), ((18201, 18214), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (18207, 18214), True, 'import numpy as np\n'), ((18228, 18238), 'numpy.sqrt', 'np.sqrt', (['r'], {}), '(r)\n', (18235, 18238), True, 'import numpy as np\n'), ((18241, 18254), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (18247, 18254), True, 'import numpy as np\n'), ((7925, 7934), 'shapely.geometry.Point', 'Point', (['xy'], {}), '(xy)\n', (7930, 7934), False, 'from shapely.geometry import Point\n'), ((11347, 11401), 'scipy.stats.norm', 'norm', ([], {'loc': 'parents[i, :2]', 'scale': '(gauss_var, gauss_var)'}), '(loc=parents[i, :2], scale=(gauss_var, gauss_var))\n', (11351, 11401), False, 'from scipy.stats import norm, poisson, uniform\n'), ((11520, 11529), 'shapely.geometry.Point', 'Point', (['xy'], {}), '(xy)\n', (11525, 11529), False, 'from shapely.geometry import Point\n'), ((15144, 15153), 'shapely.geometry.Point', 'Point', (['xy'], {}), '(xy)\n', (15149, 15153), False, 'from shapely.geometry import Point\n'), ((17493, 17516), 'scipy.stats.poisson', 'poisson', (['(rate * dx * dy)'], {}), '(rate * dx * dy)\n', (17500, 17516), False, 'from scipy.stats import norm, poisson, uniform\n'), ((18104, 18124), 'scipy.stats.uniform', 'uniform', (['(0)', '(r ** 2.0)'], {}), '(0, r ** 2.0)\n', (18111, 18124), False, 'from scipy.stats import norm, poisson, uniform\n'), ((18147, 18168), 'scipy.stats.uniform', 'uniform', (['(0)', '(2 * np.pi)'], {}), '(0, 2 * np.pi)\n', (18154, 18168), False, 'from scipy.stats import norm, poisson, uniform\n'), ((4903, 4922), 'numpy.random.random', 'np.random.random', (['(1)'], {}), '(1)\n', (4919, 4922), True, 'import numpy as np\n'), ((4980, 4999), 'numpy.random.random', 'np.random.random', (['(1)'], {}), '(1)\n', (4996, 4999), True, 'import numpy as np\n'), ((5079, 5088), 'shapely.geometry.Point', 'Point', (['xy'], {}), '(xy)\n', (5084, 5088), False, 'from shapely.geometry import Point\n'), ((11267, 11286), 'scipy.stats.poisson', 'poisson', (['child_rate'], {}), '(child_rate)\n', (11274, 11286), False, 'from scipy.stats import norm, poisson, uniform\n'), ((14900, 14919), 'scipy.stats.poisson', 'poisson', (['child_rate'], {}), '(child_rate)\n', (14907, 14919), False, 'from scipy.stats import norm, poisson, uniform\n')] |
import numpy as np
from datetime import datetime, timedelta
import talib
from scipy.signal import argrelmin, argrelmax
from binance.client import Client
from binance_client.configs import get_binance_client
from binance_client.constants import SignalDirection
from binance_client.kline import get_kline_dataframe
from signals.divergence import long_divergence, short_divergence
from utils.string import extract_number
from utils.time import calculate_time_delta
def rsi_13(close_prices):
return talib.RSI(np.array(close_prices), timeperiod=13)
def check_divergence(prices, indicators):
prices = np.array(prices)
indicators = np.array(indicators)
(price_min_indexes,) = argrelmin(prices)
(price_max_indexes,) = argrelmax(prices)
(rsi_min_indexes,) = argrelmin(indicators)
(rsi_max_indexes,) = argrelmax(indicators)
# bearish divergence
if len(price_max_indexes) >= 2 and len(rsi_max_indexes) >= 2:
if price_max_indexes[-1] == rsi_max_indexes[-1] == len(prices) - 3 and \
price_max_indexes[-2] == rsi_max_indexes[-2] and \
rsi_max_indexes[-1] - rsi_max_indexes[-2] > 4:
if prices[price_max_indexes[-1]] > prices[price_max_indexes[-2]] and \
indicators[price_max_indexes[-1]] < indicators[price_max_indexes[-2]]:
# print('bearish divergence')
return SignalDirection.SHORT
if len(price_min_indexes) >= 2 and len(rsi_min_indexes) >= 2:
if price_min_indexes[-1] == rsi_min_indexes[-1] == len(prices) - 3 and \
price_min_indexes[-2] == rsi_min_indexes[-2] and \
rsi_min_indexes[-1] - rsi_min_indexes[-2] > 4:
if prices[price_min_indexes[-1]] < prices[price_min_indexes[-2]] and \
indicators[price_min_indexes[-1]] > indicators[price_min_indexes[-2]]:
# print('bullish divergence')
return SignalDirection.LONG
def check_symbol_divergence(symbol, interval, count, to_datetime=None):
to_datetime = to_datetime or datetime.utcnow()
to_timestamp = to_datetime.timestamp()
delta = calculate_time_delta(interval, count)
from_datetime = to_datetime - delta
from_timestamp = from_datetime.timestamp()
df = get_kline_dataframe(symbol, Client.KLINE_INTERVAL_1HOUR, str(from_timestamp), str(to_timestamp))
df["candle_low"] = df[["open", "close"]].min(axis=1)
df["candle_high"] = df[["open", "close"]].max(axis=1)
df["rsi13"] = talib.RSI(df['close'], timeperiod=13)
newest_index, long_start_indexes = long_divergence(df["candle_low"], df["rsi13"])
newest_index, short_start_indexes = short_divergence(df["candle_high"], df["rsi13"])
if long_start_indexes and not short_start_indexes:
pass
elif short_start_indexes and not long_start_indexes:
pass
if __name__ == '__main__':
check_symbol_divergence('BTCUSDT', Client.KLINE_INTERVAL_1HOUR, count)
close_prices = get_close_array(interval=Client.KLINE_INTERVAL_5MINUTE, start_str='3 hours ago UTC')
rsi = talib.RSI(np.array(close_prices), timeperiod=13)
print(check_divergence(close_prices, rsi))
| [
"scipy.signal.argrelmin",
"signals.divergence.short_divergence",
"signals.divergence.long_divergence",
"datetime.datetime.utcnow",
"scipy.signal.argrelmax",
"numpy.array",
"talib.RSI",
"utils.time.calculate_time_delta"
] | [((608, 624), 'numpy.array', 'np.array', (['prices'], {}), '(prices)\n', (616, 624), True, 'import numpy as np\n'), ((642, 662), 'numpy.array', 'np.array', (['indicators'], {}), '(indicators)\n', (650, 662), True, 'import numpy as np\n'), ((691, 708), 'scipy.signal.argrelmin', 'argrelmin', (['prices'], {}), '(prices)\n', (700, 708), False, 'from scipy.signal import argrelmin, argrelmax\n'), ((736, 753), 'scipy.signal.argrelmax', 'argrelmax', (['prices'], {}), '(prices)\n', (745, 753), False, 'from scipy.signal import argrelmin, argrelmax\n'), ((780, 801), 'scipy.signal.argrelmin', 'argrelmin', (['indicators'], {}), '(indicators)\n', (789, 801), False, 'from scipy.signal import argrelmin, argrelmax\n'), ((827, 848), 'scipy.signal.argrelmax', 'argrelmax', (['indicators'], {}), '(indicators)\n', (836, 848), False, 'from scipy.signal import argrelmin, argrelmax\n'), ((2139, 2176), 'utils.time.calculate_time_delta', 'calculate_time_delta', (['interval', 'count'], {}), '(interval, count)\n', (2159, 2176), False, 'from utils.time import calculate_time_delta\n'), ((2504, 2541), 'talib.RSI', 'talib.RSI', (["df['close']"], {'timeperiod': '(13)'}), "(df['close'], timeperiod=13)\n", (2513, 2541), False, 'import talib\n'), ((2582, 2628), 'signals.divergence.long_divergence', 'long_divergence', (["df['candle_low']", "df['rsi13']"], {}), "(df['candle_low'], df['rsi13'])\n", (2597, 2628), False, 'from signals.divergence import long_divergence, short_divergence\n'), ((2670, 2718), 'signals.divergence.short_divergence', 'short_divergence', (["df['candle_high']", "df['rsi13']"], {}), "(df['candle_high'], df['rsi13'])\n", (2686, 2718), False, 'from signals.divergence import long_divergence, short_divergence\n'), ((512, 534), 'numpy.array', 'np.array', (['close_prices'], {}), '(close_prices)\n', (520, 534), True, 'import numpy as np\n'), ((2066, 2083), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2081, 2083), False, 'from datetime import datetime, timedelta\n'), ((3086, 3108), 'numpy.array', 'np.array', (['close_prices'], {}), '(close_prices)\n', (3094, 3108), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.